CombinedText stringlengths 4 3.42M |
|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""*Experimental* support for running Keras models on the TPU.
To use, wrap your model with the `keras_support.tpu_model` function.
Example usage:
```
image = tf.keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3))( image)
flattened = tf.keras.layers.Flatten()(c1)
logits = tf.keras.layers.Dense(10, activation='softmax')(flattened)
model = tf.keras.Model(inputs=[image], outputs=[logits])
resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu=tpu_name)
strategy = keras_support.TPUDistributionStrategy(resolver)
model = keras_support.tpu_model(model, strategy=strategy)
# Only TF optimizers are currently supported.
model.compile(optimizer=tf.train.AdamOptimizer(), ...)
# `images` and `labels` should be Numpy arrays. Support for tensor input
# (e.g. datasets) is planned.
model.fit(images, labels)
```
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import re
import sys
import time
import numpy as np
import six
from tensorflow.contrib.cluster_resolver.python.training import tpu_cluster_resolver as tpu_cluster_resolver_lib
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import keras_tpu_variables
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers as keras_optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.utils.generic_utils import make_batches
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
# TODO(b/114775106): temporary shim to optionally initialize the TPU
# This increases the odds our session is initialized, but shouldn't be needed.
_TEST_REWRITE_OP = None
def _maybe_initialize_tpu(session):
"""Initialize the TPU if it has not already been initialized."""
global _TEST_REWRITE_OP
try:
# Try to use cached version to avoid another ground of graph optimization.
test_rewrite_op = _TEST_REWRITE_OP
if (test_rewrite_op is None or
test_rewrite_op[0].graph != ops.get_default_graph()):
def test_op():
return constant_op.constant(1) + constant_op.constant(1)
test_rewrite_op = tpu.rewrite(test_op)
_TEST_REWRITE_OP = test_rewrite_op
session.run(test_rewrite_op)
except errors.FailedPreconditionError as _:
session.run(tpu.initialize_system())
@contextlib.contextmanager
def _tpu_session_context():
"""Initialize the TPU and cleans cache entries for bad sessions."""
try:
_maybe_initialize_tpu(K.get_session())
yield
except (errors.FailedPreconditionError, errors.AbortedError) as e:
K.clear_session()
raise Exception("""
An error occurred connecting or initializing your TPU.
The session has been reset. re-run keras_to_tpu_model to create a new session.
""" + str(e))
def setup_tpu_session(cluster_resolver):
"""Construct or return a `tf.Session` connected to the given cluster."""
master = cluster_resolver.master()
# Use the existing session if we're already connected to this TPU
# N.B K.get_session() is a non-trivial operation, and may fail if the remote
# session has been reset.
try:
default_session = K.get_session()
if (default_session._target == master and
getattr(default_session, '_tpu_initialized', None)):
return
except errors.AbortedError as _:
# We lost the remote session and need to re-initialize.
logging.warning('Lost remote session: creating a new session.')
cluster_spec = cluster_resolver.cluster_spec()
config = config_pb2.ConfigProto(isolate_session_state=True)
if cluster_spec:
config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
tpu_session = tf_session.Session(target=master, config=config)
tpu_session.run(tpu.initialize_system())
tpu_session._tpu_initialized = True
# N.B. We have to call `K.set_session()` AND set our session as the
# TF default. `K.get_session()` surprisingly does not return the value
# supplied by K.set_session otherwise.
K.set_session(tpu_session)
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master, cluster_def=cluster_def, query_topology=False))
return tpu_system_metadata
class TPUDistributionStrategy(object):
"""The strategy to run Keras model on TPU."""
def __init__(self, tpu_cluster_resolver=None, using_single_core=False):
"""Construct a TPUDistributionStrategy.
Args:
tpu_cluster_resolver: Any instance of `TPUClusterResolver`. If None, will
create one with '' as master address.
using_single_core: Bool. This is the debugging option, which might be
removed in future once the model replication functionality is mature
enough. If `False` (default behavior), the system automatically finds
the best configuration, in terms of number of TPU cores, for the model
replication, typically using all avaiable TPU cores. If overwrites as
`True`, force the model replication using single core, i.e., no
replication.
Raises:
Exception: No TPU Found on the given worker.
"""
if tpu_cluster_resolver is None:
tpu_cluster_resolver = tpu_cluster_resolver_lib.TPUClusterResolver('')
metadata = get_tpu_system_metadata(tpu_cluster_resolver)
self._tpu_metadata = metadata
self._tpu_cluster_resolver = tpu_cluster_resolver
self._num_cores = 1 if using_single_core else metadata.num_cores
# Walk device list to identify TPU worker for enqueue/dequeue operations.
worker_re = re.compile('/job:([^/]+)')
for device in metadata.devices:
if 'TPU:0' in device.name:
self._worker_name = worker_re.search(device.name).group(1)
return
raise Exception('No TPU found on given worker.')
def _make_assignment_for_model(self, cpu_model):
"""Makes a `TPUAssignment` for the passed in `cpu_model`."""
num_cores = self._num_cores
if num_cores > 1 and cpu_model.stateful:
logging.warning(
'Model replication does not currently support stateful models. '
'Degrading to a single core.')
num_cores = 1
return TPUAssignment(worker_name=self._worker_name, num_cores=num_cores)
class TPUAssignment(object):
"""This is object holding TPU resources assignment for the concrete model.
`TPUDistributionStrategy` is responsible to create the instance of
`TPUAssignment`, so, it can dynamically adjust the `num_cores` to use based on
model and input batch sizes.
"""
def __init__(self, worker_name, num_cores):
self._worker_name = worker_name
self._num_cores = num_cores
@property
def worker_name(self):
return self._worker_name
@property
def num_towers(self):
# TODO(xiejw): Support automatically assign num_cores based on inputs.
return self._num_cores
class TPUEmbedding(embeddings.Embedding):
"""TPU compatible embedding layer.
The default Keras layer is not TPU compatible. This layer is a drop-in
replacement: it has the same behavior and will work on CPU and GPU devices.
"""
def build(self, input_shape):
if input_shape[0] is None:
raise ValueError(
'TPUEmbeddings must have a fixed input_length or input shape.')
return super(TPUEmbedding, self).build(input_shape)
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = math_ops.cast(inputs, 'int32')
inputs = array_ops.one_hot(inputs, self.input_dim)
return math_ops.tensordot(inputs, self.embeddings, 1)
def _cross_replica_concat(tensor, core_id, num_cores, name):
"""Concatenate `tensor` across cores.
Args:
tensor: The tensor to be concatenated. Must be [int32 and float32].
core_id: Tensor indicating the current TPU core.
num_cores: Python int. The total number of TPU cores in the system.
name: The string name to print for debugging.
Returns:
The same concatenated Tensor on each core.
"""
input_dtype = tensor.dtype
if input_dtype not in [dtypes.bfloat16, dtypes.float32, dtypes.int32]:
raise TypeError('For model replication, only (bfloat16, float32 and int32) '
'is supported for model outputs and targets. Got {} for '
'{}.'.format(input_dtype, name))
batch_size = tensor.shape[0]
mask = math_ops.to_float(
math_ops.equal(np.arange(num_cores, dtype=np.int32), core_id))
mask = array_ops.reshape(mask, [num_cores] + [1] * tensor.shape.ndims)
result = mask * math_ops.to_float(tensor)
local_tensor_with_holes = array_ops.reshape(result,
[-1] + result.shape.as_list()[2:])
concat_tensor = tpu_ops.cross_replica_sum(local_tensor_with_holes)
concat_tensor.set_shape((num_cores * batch_size,) + tuple(tensor.shape[1:]))
if concat_tensor != input_dtype:
concat_tensor = math_ops.cast(concat_tensor, input_dtype)
return concat_tensor
class KerasCrossShardOptimizer(keras_optimizers.Optimizer):
"""An optimizer that averages gradients across TPU shards."""
def __init__(self, opt, name='KerasCrossShardOptimizer'):
"""Construct a new cross-shard optimizer.
Args:
opt: An existing `Optimizer` to encapsulate.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "KerasCrossShardOptimizer".
Raises:
ValueError: If reduction is not a valid cross-shard reduction.
"""
super(KerasCrossShardOptimizer, self).__init__()
self._name = name
self._opt = opt
logging.info('KerasCrossShard: %s %s', self._opt, self._opt.weights)
def get_updates(self, loss, params):
self._opt.get_gradients = self.get_gradients
return self._opt.get_updates(loss, params)
def get_gradients(self, loss, params):
num_shards = tpu_function.get_tpu_context().number_of_shards
grads = super(KerasCrossShardOptimizer, self).get_gradients(loss, params)
return [tpu_ops.cross_replica_sum(grad) / num_shards for grad in grads]
def get_weights(self):
return self._opt.get_weights()
def get_config(self):
return self._opt.get_config()
# Defer remaining operations to the underlying optimizer
def __getattr__(self, key):
return getattr(self._opt, key)
class TPUModelOp(
collections.namedtuple('TPUModelOp', [
'compile_op', 'execute_op', 'infeed_tensors', 'infeed_op', 'outfeed_op'
])):
pass
def _valid_name(tensor_name):
"""Return a valid tensor name (strips '/', ':', etc)."""
return re.sub('[^a-zA-Z0-9_-]+', '', tensor_name)
def _replicated_optimizer(opt):
"""Wrap the optimizer `opt` with CrossShardOptimizer if applicable."""
# Always wrap `opt` with CrossShardOptimizer, even if we are running on a
# single core. This ensures Keras properly tracks and initializes optimizer
# variables.
if isinstance(opt, keras_optimizers.TFOptimizer):
return tpu_optimizer.CrossShardOptimizer(opt.optimizer)
else:
return KerasCrossShardOptimizer(opt)
def _clone_optimizer(optimizer, config=None, worker_name=None):
"""Returns a cloned optimizer with the provided optimizer.config or config."""
if not isinstance(optimizer, keras_optimizers.Optimizer):
# In the first call to tpu_model(model), Keras may not have wrapped the TF
# optimizer in the TFOptimizer helper, e.g., the given model isn't compiled
# or optimizer isn't set, and later generated tpu_model compiles with a TF
# optimizer.
return optimizer
if isinstance(optimizer, keras_optimizers.TFOptimizer):
return keras_optimizers.TFOptimizer(optimizer.optimizer)
if config is None:
config = optimizer.get_config()
logging.info('Cloning %s %s', optimizer.__class__.__name__, config)
with ops.device(
'%s/device:CPU:0' % ('/job:%s' % worker_name if worker_name else '')):
# Explicitly put optimizer parameter variables on TPU worker.
return optimizer.__class__.from_config(config)
class TPURewriteContext(object):
"""Prepare the environment for a Keras model during `tpu.rewrite`.
This overrides the default placeholder behaviour to instead refer to a preset
input mapping. Placeholders are unsupported in TPU compiled code, and must
be replaced with explicit inputs or values from the infeed queue.
Instead of explicitly threading inputs all the way through the Keras codebase,
we override the behavior of the placeholder while compiling and inject the
Tensors from the infeed in place of the placeholder.
Similarly, as we compile a new sub-graph for each unique shape and execution
mode, we need to override the behavior of an embedded `name_scope` call in
the base Keras layer code. This allows us to re-use the same weights across
many compiles and share a single session/graph.
"""
def __init__(self, input_map):
self._input_map = input_map
self._default_placeholder = None
self._default_name_scope = None
def __enter__(self):
def _placeholder(dtype, shape=None, name=None): # pylint: disable=unused-argument
logging.info('Remapping placeholder for %s', name)
if name in self._input_map:
return self._input_map[name]
else:
logging.info('Default: %s', name)
return self._default_placeholder(dtype, shape, name)
def _name_scope(name, default_name=None, values=None):
caller_frame = sys._getframe().f_back
caller_obj = caller_frame.f_locals.get('self')
if (caller_obj is not None and
isinstance(caller_obj, base_layer.Layer) and name is not None):
return variable_scope.variable_scope(
name, default_name, values, reuse=variable_scope.AUTO_REUSE)
return self._default_name_scope(name, default_name, values)
self._default_placeholder = array_ops.placeholder
self._default_name_scope = ops.name_scope
self._default_make_variable = base_layer_utils.make_variable
self._default_random_normal = random_ops.random_normal
self._default_qr = gen_linalg_ops.qr
array_ops.placeholder = _placeholder
# Replace random_ops.random_normal with a dummy function because
# `random_normal` isn't yet implemented on the TPU. Because these
# initialized values are overwritten by the CPU values, this is okay.
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
del mean
del stddev
del seed
return array_ops.zeros(shape, dtype=dtype, name=name)
random_ops.random_normal = random_normal
# Replace gen_linalg_ops.qr because QR decomposition is not yet implemented.
# TODO(saeta): Remove qr override once we confirm the qr implementation is
# ok.
# pylint: disable=redefined-builtin
def qr(input, full_matrices=False, name=None):
"""Dummy implementation of qr decomposition."""
del full_matrices # TODO(saeta): Properly handle the full matrix case.
input_shape = input.shape
if len(input_shape) < 2:
raise ValueError('Invalid shape passed to qr: %s' % input_shape)
p = min(input_shape[-1], input_shape[-2])
if len(input_shape) == 2:
q = array_ops.zeros((p, p), name=name)
r = array_ops.zeros(input_shape, name=name)
return (r, q)
elif len(input_shape) == 3:
n = input_shape[0]
q = array_ops.zeros((n, p, p), name=name)
r = array_ops.zeros(input_shape, name=name)
return (r, q)
else:
raise ValueError('Invalid shape passed to qr: %s' % input_shape)
gen_linalg_ops.qr = qr
ops.name_scope = _name_scope
base_layer_utils.make_variable = variable_scope.get_variable
logging.info('Overriding default placeholder.')
return
def __exit__(self, exc_type, exc_val, exc_tb):
array_ops.placeholder = self._default_placeholder
ops.name_scope = self._default_name_scope
base_layer_utils.make_variable = self._default_make_variable
random_ops.random_normal = self._default_random_normal
gen_linalg_ops.qr = self._default_qr
class SizedInfeed(
collections.namedtuple('SizedInfeed',
['sharded_infeed_tensors', 'infeed_ops'])):
"""Represents an instantiation of the infeed ops for a concrete input shape.
sharded_infeed_tensors: A data structure of Tensors used to represent the
placeholder tensors that must be fed when using feed_dicts.
infeed_ops: the set of ops that will be run to drive infeed for a single step.
"""
pass
class TPUInfeedInstance(object):
"""TPUInfeedInstance represents the logic to manage feeding in a single step.
See the comments on the `TPUInfeedManager` for a description for how infeed
is managed.
"""
@abc.abstractmethod
def make_input_specs(self, input_tensors):
"""Constructs the infeed_specs for the given Infeed instance.
Args:
input_tensors: The inputs to the model.
Returns:
A list of
"""
pass
def make_feed_dict(self, tpu_model_op):
"""Constructs a feed_dict for this instance, given the tpu_model_op.
Args:
tpu_model_op: A `TPUModelOp` representing the TPU Model for this
instance's input spec.
Returns:
A dictionary to use as the feed_dict of a `session.run` call.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class TPUInfeedManager(object):
"""TPUInfeedManager manages the data infeeding of data to a TPU computation.
Because there are multiple data sources (e.g. in-memory NumPy arrays,
`tf.data.Dataset`s), we abstract the different logic behind a single
interface: the `TPUInfeedManager`.
(1) A `TPUFunction` is called with a set of inputs. Based on the inputs,
`TPUFunction` retrieves the corresponding `TPUInfeedManager` (or constructs a
new one if required).
(2) The `TPUFunction` calls `make_infeed_instance` on the `TPUInfeedManager`
which returns a `TPUInfeedInstance`.
(3) The `TPUFunction` checks in the shape cache for a pre-compiled instance of
the model based on the returned `input_specs` from `TPUInfeedInstance`.
(4) [Optional.] If the model has not already been instantiated for the given
input spec, the `TPUFunction` compiles the model for the input spec (using the
`TPUInfeedManager`).
(5) The `TPUInfeedInstance` constructs the session.run's feed_dict given the
compiled model instance corresponding to its shape.
"""
@abc.abstractmethod
def make_infeed_instance(self, inputs):
"""Given a single step's input, construct a `TPUInfeedInstance`.
Args:
inputs: The inputs to a given step.
Returns:
A subclass of `TPUInfeedInstance`.
"""
pass
@abc.abstractmethod
def build_infeed_from_input_specs(self, input_specs, execution_mode):
"""For a given input specification (size, type), construct the infeed ops.
This is called only once for a given input specification and builds the
graph ops. It does not have a pointer to the actual infeed data.
Args:
input_specs: TODO(saeta): Document me!
execution_mode: TODO(saeta): Document me!
Returns:
A `SizedInfeed` instance.
"""
pass
class TPUNumpyInfeedManager(TPUInfeedManager):
"""TPU Infeed manager for Numpy inputs."""
class NumpyInfeedInstance(TPUInfeedInstance):
"""Infeed instance for Numpy inputs."""
def __init__(self, sharded_inputs):
self._sharded_inputs = sharded_inputs
def make_input_specs(self, input_tensors):
# Compute an input specification (used to generate infeed enqueue and
# dequeue operations). We use the shape from our input array and the
# dtype from our model. A user may pass in a float64 for a float32
# input: for model compatibility we still must generate a float32 infeed.
input_specs = []
# We use the shape and dtype from the first shard to compute the input
# metadata (`input_specs`); all replicas have the same type and shape.
for tensor, ary in zip(input_tensors, self._sharded_inputs[0]):
input_specs.append(
tensor_spec.TensorSpec(ary.shape, tensor.dtype,
_valid_name(tensor.name)))
return input_specs
def make_feed_dict(self, tpu_model_op):
infeed_dict = {}
for infeed_tensors, inputs in zip(tpu_model_op.infeed_tensors,
self._sharded_inputs):
for tensor, value in zip(infeed_tensors, inputs):
infeed_dict[tensor] = value
return infeed_dict
def __init__(self, tpu_assignment):
self._tpu_assignment = tpu_assignment
def _split_tensors(self, inputs):
"""Split input data across shards.
Each input is sliced along the batch axis.
Args:
inputs: List of Numpy arrays to run on the TPU.
Returns:
List of lists containing the input to feed to each TPU shard.
"""
if self._tpu_assignment.num_towers == 1:
return [inputs]
batch_size = inputs[0].shape[0]
assert batch_size % self._tpu_assignment.num_towers == 0, (
'batch_size must be divisible by the number of TPU cores in use (%s '
'vs %s)' % (batch_size, self._tpu_assignment.num_towers))
shard_size = batch_size // self._tpu_assignment.num_towers
input_list = []
for index in range(self._tpu_assignment.num_towers):
shard_inputs = [
x[index * shard_size:(index + 1) * shard_size] for x in inputs
]
input_list.append(shard_inputs)
return input_list
def make_infeed_instance(self, inputs):
sharded_inputs = self._split_tensors(inputs)
return self.NumpyInfeedInstance(sharded_inputs)
def build_infeed_from_input_specs(self, input_specs, execution_mode):
infeed_op = []
shard_infeed_tensors = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
infeed_tensors = []
with ops.device('/device:TPU:%d' % shard_id):
for spec in input_specs:
# Construct placeholders for each of the inputs.
infeed_tensors.append(
array_ops.placeholder(
dtype=spec.dtype,
shape=spec.shape,
name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
shard_infeed_tensors.append(infeed_tensors)
infeed_op.append(
tpu_ops.infeed_enqueue_tuple(
infeed_tensors, [spec.shape for spec in input_specs],
name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
device_ordinal=shard_id))
return SizedInfeed(
infeed_ops=infeed_op, sharded_infeed_tensors=shard_infeed_tensors)
class TPUDatasetInfeedManager(TPUInfeedManager):
"""Manages infeed for a `tf.data.Dataset` into a TPU computation.
"""
class DatasetInfeedInstance(TPUInfeedInstance):
"""An instance of the TPU infeed."""
def __init__(self, input_specs):
self._input_specs = input_specs
def make_input_specs(self, input_tensors):
# TODO(saeta): Do error checking here!
return self._input_specs
def make_feed_dict(self, tpu_model_op):
# TODO(saeta): Verify tpu_model_op is as expected!
return {}
# pylint: disable=redefined-outer-name
def __init__(self, dataset, tpu_assignment, mode):
"""Constructs a TPUDatasetInfeedManager.
Args:
dataset: A `tf.data.Dataset` to infeed.
tpu_assignment: The `TPUAssignment` used to configure the
Keras TPU model.
mode: ModeKeys enum.
"""
self._verify_dataset_shape(dataset)
self._dataset = dataset
self._tpu_assignment = tpu_assignment
dummy_x_shape = dataset.output_shapes[0].as_list()
dummy_x_shape[0] *= tpu_assignment.num_towers
dummy_y_shape = dataset.output_shapes[1].as_list()
dummy_y_shape[0] *= tpu_assignment.num_towers
self._iterator = dataset_ops.make_initializable_iterator(dataset)
K.get_session().run(self._iterator.initializer)
self._get_next_ops = []
ctrl_deps = []
for i in range(tpu_assignment.num_towers):
with ops.control_dependencies(ctrl_deps): # Ensure deterministic
# TODO(saeta): Ensure correct placement!
get_next_op = self._iterator.get_next()
self._get_next_ops.append(get_next_op)
ctrl_deps.extend(get_next_op)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
self._dummy_x = np.zeros(
dummy_x_shape, dtype=dataset.output_types[0].as_numpy_dtype)
self._dummy_y = np.zeros(
dummy_y_shape, dtype=dataset.output_types[1].as_numpy_dtype)
input_specs = []
if isinstance(self._iterator.output_shapes, tuple):
assert isinstance(self._iterator.output_types, tuple)
assert len(self._iterator.output_shapes) == len(
self._iterator.output_types)
for i in range(len(self._iterator.output_shapes)):
spec = tensor_spec.TensorSpec(self._iterator.output_shapes[i],
self._iterator.output_types[i])
input_specs.append(spec)
elif isinstance(self._iterator.output_shapes, tensor_shape.TensorShape):
spec = tensor_spec.TensorSpec(self._iterator.output_shapes,
self._iterator.output_types)
input_specs.append(spec)
# Pre-process the inputs and get_next_ops before caching.
input_specs, self._get_next_ops = (
_inject_tpu_inputs_for_dataset(
tpu_assignment, mode, input_specs, self._get_next_ops))
self._infeed_instance = self.DatasetInfeedInstance(input_specs)
def _verify_dataset_shape(self, dataset):
"""Verifies a dataset is of an appropriate shape for TPUs."""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise ValueError('The function passed as the `x` parameter did not '
'return a `tf.data.Dataset`.')
if not isinstance(dataset.output_classes, tuple):
raise ValueError('The dataset must return a tuple of tf.Tensors, '
'instead it returns: %s' % dataset.output_classes)
if len(dataset.output_classes) != 2:
raise ValueError('The dataset must return a 2-element tuple, got '
'%s output classes instead.' % (dataset.output_classes,))
for i, cls in enumerate(dataset.output_classes):
if cls != ops.Tensor:
raise ValueError('The dataset returned a non-Tensor type (%s) at '
'index %d.' % (cls, i))
for i, shape in enumerate(dataset.output_shapes):
if not shape:
raise ValueError('The dataset returns a scalar tensor in '
'tuple index %d. Did you forget to batch? '
'(Output shapes: %s).' % (i, dataset.output_shapes))
for j, dim in enumerate(shape):
if dim.value is None:
if j == 0:
hint = (' Hint: did you use `ds.batch(BATCH_SIZE, '
'drop_remainder=True)`?')
else:
hint = ''
raise ValueError(
'The Keras-TPU integration for `tf.data` '
'currently requires static shapes. The provided '
'dataset only has a partially defined shape. '
'(Dimension %d of output tensor %d is not statically known '
'for output shapes: %s.%s)' % (j, i, dataset.output_shapes, hint))
@property
def dummy_x(self):
return self._dummy_x
@property
def dummy_y(self):
return self._dummy_y
def make_infeed_instance(self, inputs):
# TODO(saeta): Verify inputs is as expected.
return self._infeed_instance
def build_infeed_from_input_specs(self, input_specs, execution_mode):
shard_infeed_tensors = self._get_next_ops
assert len(shard_infeed_tensors) == self._tpu_assignment.num_towers
infeed_ops = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
infeed_ops.append(
tpu_ops.infeed_enqueue_tuple(
shard_infeed_tensors[shard_id],
[spec.shape for spec in input_specs],
name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
device_ordinal=shard_id))
return SizedInfeed(
infeed_ops=infeed_ops, sharded_infeed_tensors=shard_infeed_tensors)
def _inject_tpu_inputs_for_dataset(tpu_assignment, mode,
input_specs, get_next_ops):
"""Append core information to the set of dataset inputs."""
# This is used during compilation to identify the current TPU core and enable
# concatenation operations across cores.
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return input_specs, get_next_ops
# Dataset inputs operate on per core basis.
per_core_batch_size = input_specs[0].shape.as_list()[0]
# Insert, at head, the tensor for core_id.
assert len(get_next_ops) == tpu_assignment.num_towers
for i in range(tpu_assignment.num_towers):
core_id_constant = constant_op.constant(
np.array([i] * per_core_batch_size).astype('int32'),
dtype=dtypes.int32,
name='cord_id_constant')
get_next_ops[i] = [core_id_constant] + list(get_next_ops[i])
# Insert the input spec at head also.
input_specs = [tensor_spec.TensorSpec([per_core_batch_size], dtypes.int32)
] + input_specs
return input_specs, get_next_ops
def _inject_tpu_inputs_for_infeed(tpu_assignment, mode,
core_id_place_holder, input_tensors, inputs):
"""Append core information to the set of inputs."""
# This is used during compilation to identify the current TPU core and enable
# concatenation operations across cores.
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return input_tensors, inputs
# Puts a place holder in input spec.
input_tensors = [core_id_place_holder] + input_tensors
# Now fill the core id. For `num_cores` = 2, `batch_size` = 8, we fill the
# core id inputs as [0, 0, 0, 0, 1, 1, 1, 1], so each core sees its core id
# (duplicated).
num_cores = tpu_assignment.num_towers
per_core_batch_size = inputs[0].shape[0] // num_cores
core_ids = np.arange(num_cores).repeat(per_core_batch_size)
inputs = [core_ids] + inputs
return input_tensors, inputs
def _read_tpu_coreid_from_infeed(mode, infeed_tensors):
"""Popping out the core ids from infeed."""
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return None, infeed_tensors
if len(infeed_tensors) <= 1:
raise RuntimeError(
'The infeed tensors on TPU core has only {} tensors. '
'This is not expected. Please report a bug.\nTensors: {}'.format(
len(infeed_tensors), infeed_tensors))
core_id = infeed_tensors[0][0] # Pop out the scalar version.
rest = infeed_tensors[1:]
return core_id, rest
class TPUFunction(object):
"""K.function compatible interface for invoking a TPU compiled function.
Recompilation is triggered on-demand for each set of new inputs shapes: the
results are cached for future execution. We expect most computations will
be dominated by a standard batch-size, followed by a straggler batch for
the end of training or evaluation.
All `inputs` and `outputs` will be loaded via the infeed and outfeed queues
instead of being injected as `feed_dict` items or fetches.
"""
def __init__(self, model, execution_mode, tpu_assignment):
self.model = model
self.execution_mode = execution_mode
self._tpu_assignment = tpu_assignment
self._compilation_cache = {}
self._cloned_model = None
self._cloned_optimizer = None
# Create a placeholder for the TPU core ID. Cache the placeholder to avoid
# modifying the graph for every batch.
self._core_id_place_holder = array_ops.placeholder(
dtype=dtypes.int32, shape=[1], name='core_id')
def _specialize_model(self, input_specs, infeed_manager):
"""Specialize `self.model` (a Keras model) for the given input shapes."""
# Re-create our input and output layers inside our subgraph. They will be
# attached to the true computation when we clone our model in `tpu_fn`.
K.set_learning_phase(self.execution_mode == model_fn_lib.ModeKeys.TRAIN)
# functools.partial and callable objects are not supported by tpu.rewrite
def _model_fn():
"""Compute fit/eval/predict for the TPU."""
is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT
# During train/eval, we infeed our features as well as labels.
if is_training or is_test:
infeed_layers = self.model._input_layers + self.model._output_layers
else:
infeed_layers = self.model._input_layers
# Generate our infeed operation to read features & labels.
infeed_tensors = tpu_ops.infeed_dequeue_tuple(
dtypes=[spec.dtype for spec in input_specs],
shapes=[spec.shape for spec in input_specs],
name='infeed-%s' % self.execution_mode)
core_id, infeed_tensors = (
_read_tpu_coreid_from_infeed(
mode=self.execution_mode, infeed_tensors=infeed_tensors))
assert len(infeed_tensors) == len(infeed_layers), (
'Infeed inputs did not match model: %s vs %s' % (infeed_layers,
infeed_tensors))
tpu_targets = []
tpu_input_map = {}
# Sort infeed outputs into inputs and labels for calling our Keras model.
for tensor, layer in zip(infeed_tensors, infeed_layers):
if layer in self.model._input_layers:
tpu_input_map[layer.name] = tensor
if layer in self.model._output_layers:
tpu_targets.append(tensor)
# Clone our CPU model, running within the TPU device context.
#
# We use the id of the original model as a key to avoid weight collisions
# (if a user re-runs the same model multiple times, in e.g. Colab).
with TPURewriteContext(tpu_input_map):
with variable_scope.variable_scope('tpu_%s' % id(self.model)):
with keras_tpu_variables.replicated_scope(
self._tpu_assignment.num_towers):
if not self._cloned_optimizer:
self._cloned_optimizer = _clone_optimizer(
self.model.cpu_optimizer,
worker_name=self._tpu_assignment.worker_name)
self._cloned_model = models.clone_model(self.model)
# When running on more than one core, concatenate outputs at the end
# of processing. In backprop stage, the gradients will be
# calculated according to the local inputs as gradient of
# cross-replica-concat being zero for any outputs other than those
# from mlocal core so the loss calculation is identical.
num_towers = self.model._tpu_assignment.num_towers
if num_towers > 1 and (is_training or is_test):
new_outputs = [
_cross_replica_concat(
o, core_id, num_towers,
name='model output ({})'.format(o.name))
for o in self._cloned_model.outputs
]
# Recast all low precision outputs back to float32 since we only
# casted the inputs to bfloat16 and not targets. This is done so
# that we can preserve precision when calculating the loss value.
if new_outputs and new_outputs[0].dtype == dtypes.bfloat16:
new_outputs = [
math_ops.cast(o, dtypes.float32) for o in new_outputs]
self._cloned_model.outputs = new_outputs
tpu_targets = [
_cross_replica_concat(
tensor,
core_id,
num_towers,
name='model target ({})'.format(tensor.name))
for tensor in tpu_targets
]
if is_training or is_test:
with variable_scope.variable_scope(
'metrics', reuse=variable_scope.AUTO_REUSE):
self._cloned_model.compile(
optimizer=_replicated_optimizer(self._cloned_optimizer),
loss=self.model.loss,
loss_weights=self.model.loss_weights,
metrics=metrics_module.clone_metrics(
self.model._compile_metrics),
weighted_metrics=metrics_module.clone_metrics(
self.model._compile_weighted_metrics),
target_tensors=tpu_targets,
)
# Compute our outfeed depending on the execution mode
if is_training:
if not isinstance(self._cloned_optimizer, keras_optimizers.TFOptimizer):
# For Keras optimizer, we try to place the variable weights on the TPU
# device. Keras creates optimizer variables (e.g. momentum values for
# the Momentum optimizer) when _make_train_function is invoked.
with keras_tpu_variables.replicated_variable_for_optimizer(
self._tpu_assignment.num_towers):
self._cloned_model._make_fit_function()
else:
self._cloned_model._make_fit_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model._fit_function.outputs
]
return [
self._cloned_model._fit_function.updates_op,
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model._fit_function.outputs,
name='outfeed-enqueue-train')
]
elif is_test:
self._cloned_model._make_eval_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model._eval_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model._eval_function.outputs,
name='outfeed-enqueue-test')
]
elif is_predict:
self._cloned_model._make_predict_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model.predict_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model.predict_function.outputs,
name='outfeed-enqueue-predict',
)
]
else:
assert False, 'Unexpected execution mode: %s' % self.execution_mode
# Capture outfeed metadata computed during the rewrite.
self._outfeed_spec = None
# Generate out TPU operations using `tpu.split_compile_and_replicate`.
# `compile_op` can be used to test the TPU model compiles before execution.
# `execute op` replicates `_model_fn` `num_replicas` times, with each shard
# running on a different logical core.
compile_op, execute_op = tpu.split_compile_and_replicate(
_model_fn, inputs=[[] for _ in range(self._tpu_assignment.num_towers)])
# Generate CPU side operations to enqueue features/labels and dequeue
# outputs from the model call.
sized_infeed = infeed_manager.build_infeed_from_input_specs(
input_specs, self.execution_mode)
# Build output ops.
outfeed_op = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
outfeed_op.extend(
tpu_ops.outfeed_dequeue_tuple(
dtypes=[spec.dtype for spec in self._outfeed_spec],
shapes=[spec.shape for spec in self._outfeed_spec],
name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id),
device_ordinal=shard_id))
return TPUModelOp(
compile_op,
execute_op,
infeed_tensors=sized_infeed.sharded_infeed_tensors,
infeed_op=sized_infeed.infeed_ops,
outfeed_op=outfeed_op)
def _test_model_compiles(self, tpu_model_ops):
"""Verifies that the given TPUModelOp can be compiled via XLA."""
logging.info('Started compiling')
start_time = time.time()
result = K.get_session().run(tpu_model_ops.compile_op)
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
raise RuntimeError('Compilation failed: {}'.format(
proto.status_error_message))
end_time = time.time()
logging.info('Finished compiling. Time elapsed: %s secs',
end_time - start_time)
def _lookup_infeed_manager(self, inputs):
"""Return an existing manager, or construct a new InfeedManager for inputs.
_lookup_infeed_manager will return an existing InfeedManager if one has been
previously assigned for this model and input. If not, it will construct a
new TPUNumpyInfeedManager.
Args:
inputs: A NumPy input to the model.
Returns:
A `TPUInfeedManager` object to manage infeeds for this input.
"""
if inputs is None:
return None
for x, mgr in self.model._numpy_to_infeed_manager_list:
if inputs[0] is x:
return mgr
return TPUNumpyInfeedManager(self.model._tpu_assignment)
def _tpu_model_ops_for_input_specs(self, input_specs, infeed_manager):
"""Looks up the corresponding `TPUModelOp` for a given `input_specs`.
It instantiates a new copy of the model for each unique input shape.
Args:
input_specs: The specification of the inputs to train on.
infeed_manager: The infeed manager responsible for feeding in data.
Returns:
A `TPUModelOp` instance that can be used to execute a step of the model.
"""
if input_specs is None or infeed_manager is None:
# Note: this condition is possible during the prologue or epilogue of the
# pipelined loop.
return None
# XLA requires every operation in the graph has a fixed shape. To
# handle varying batch sizes we recompile a new sub-graph for each
# unique input shape.
shape_key = tuple([tuple(spec.shape.as_list()) for spec in input_specs])
if shape_key not in self._compilation_cache:
logging.info(
'New input shapes; (re-)compiling: mode=%s '
'(# of cores %d), %s', self.execution_mode,
self._tpu_assignment.num_towers, input_specs)
new_tpu_model_ops = self._specialize_model(input_specs,
infeed_manager)
self._compilation_cache[shape_key] = new_tpu_model_ops
self._test_model_compiles(new_tpu_model_ops)
return self._compilation_cache[shape_key]
def _construct_input_tensors_and_inputs(self, inputs):
"""Returns input tensors and numpy array inputs corresponding to `inputs`.
Args:
inputs: NumPy inputs.
Returns:
A tuple of `input_tensors`, and `inputs`.
"""
if inputs is None:
# Note: this condition is possible during the prologue or epilogue of the
# pipelined loop.
return None, None
if isinstance(inputs[-1], int):
# Remove the learning_phase flag at the end. We currently hard code the
# learning_phase in TPUFunction.
inputs = inputs[:-1]
if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or
self.execution_mode == model_fn_lib.ModeKeys.EVAL):
# Strip sample weight from inputs.
input_tensors = self.model._feed_inputs + self.model._feed_targets
else:
input_tensors = self.model._feed_inputs
inputs = inputs[:len(input_tensors)]
input_tensors, inputs = (
_inject_tpu_inputs_for_infeed(
self._tpu_assignment, self.execution_mode,
self._core_id_place_holder, input_tensors, inputs))
return input_tensors, inputs
def _process_outputs(self, outfeed_outputs):
"""Processes the outputs of a model function execution.
Args:
outfeed_outputs: The sharded outputs of the TPU computation.
Returns:
The aggregated outputs of the TPU computation to be used in the rest of
the model execution.
"""
# TODO(xiejw): Decide how to reduce outputs, or discard all but first.
if self.execution_mode == model_fn_lib.ModeKeys.PREDICT:
outputs = [[] for _ in range(len(self._outfeed_spec))]
outputs_per_replica = len(self._outfeed_spec)
for i in range(self._tpu_assignment.num_towers):
output_group = outfeed_outputs[i * outputs_per_replica:(i + 1) *
outputs_per_replica]
for j in range(outputs_per_replica):
outputs[j].append(output_group[j])
return [np.concatenate(group) for group in outputs]
else:
return outfeed_outputs[:len(outfeed_outputs) //
self._tpu_assignment.num_towers]
def __call__(self, inputs):
"""__call__ executes the function on the computational hardware.
It handles executing infeed, and preprocessing in addition to executing the
model on the TPU hardware.
Note: `__call__` has a sibling method `pipeline_run` which performs the same
operations, but with software pipelining.
Args:
inputs: The inputs to use to train.
Returns:
The output of the computation for the given mode it is executed in.
Raises:
RuntimeError: If there is an inappropriate use of the function.
"""
assert isinstance(inputs, list)
infeed_manager = self._lookup_infeed_manager(inputs)
input_tensors, inputs = self._construct_input_tensors_and_inputs(inputs)
infeed_instance = infeed_manager.make_infeed_instance(inputs)
del inputs # To avoid accident usage.
input_specs = infeed_instance.make_input_specs(input_tensors)
tpu_model_ops = self._tpu_model_ops_for_input_specs(input_specs,
infeed_manager)
infeed_dict = infeed_instance.make_feed_dict(tpu_model_ops)
# Initialize our TPU weights on the first compile.
self.model._initialize_weights(self._cloned_model)
_, _, outfeed_outputs = K.get_session().run([
tpu_model_ops.infeed_op, tpu_model_ops.execute_op,
tpu_model_ops.outfeed_op
], infeed_dict)
return self._process_outputs(outfeed_outputs)
def pipeline_run(self, cur_step_inputs, next_step_inputs):
"""pipeline_run executes the function on the computational hardware.
pipeline_run performs the same computation as __call__, however it runs the
infeed in a software pipelined fashion compared to the on-device execution.
Note: it is the responsibility of the caller to call `pipeline_run` in the
following sequence:
- Once with `cur_step_inputs=None` and `next_step_inputs=list(...)`
- `n` times with `cur_step_inputs` and `next_step_inputs` as `list`s
- Once with `cur_step_inputs=list(...)` and `next_step_inputs=None`
Additionally, it is the responsibility of the caller to pass
`next_step_inputs` as `cur_step_inputs` on the next invocation of
`pipeline_run`.
Args:
cur_step_inputs: The current step's inputs.
next_step_inputs: The next step's inputs.
Returns:
The output of the computation for the given mode it is executed in.
Raises:
RuntimeError: If there is an inappropriate use of the function.
"""
# Software pipelined case.
next_step_infeed_manager = self._lookup_infeed_manager(next_step_inputs)
cur_step_infeed_manager = self._lookup_infeed_manager(cur_step_inputs)
if (next_step_infeed_manager is not None and
cur_step_infeed_manager is not None):
assert type(next_step_infeed_manager) is type(cur_step_infeed_manager)
next_input_tensors, next_step_inputs = (
self._construct_input_tensors_and_inputs(next_step_inputs))
cur_input_tensors, cur_step_inputs = (
self._construct_input_tensors_and_inputs(cur_step_inputs))
cur_infeed_instance = None
if cur_step_infeed_manager:
cur_infeed_instance = cur_step_infeed_manager.make_infeed_instance(
cur_step_inputs)
next_infeed_instance = None
if next_step_infeed_manager:
next_infeed_instance = next_step_infeed_manager.make_infeed_instance(
next_step_inputs)
del cur_step_inputs # Avoid accidental re-use.
del next_step_inputs # Avoid accidental re-use.
cur_tpu_model_ops = None
next_tpu_model_ops = None
infeed_dict = None
if cur_infeed_instance and cur_input_tensors and cur_step_infeed_manager:
cur_input_specs = cur_infeed_instance.make_input_specs(cur_input_tensors)
cur_tpu_model_ops = self._tpu_model_ops_for_input_specs(
cur_input_specs, cur_step_infeed_manager)
if (next_infeed_instance and next_input_tensors and
next_step_infeed_manager):
next_input_specs = next_infeed_instance.make_input_specs(
next_input_tensors)
next_tpu_model_ops = self._tpu_model_ops_for_input_specs(
next_input_specs, next_step_infeed_manager)
infeed_dict = next_infeed_instance.make_feed_dict(next_tpu_model_ops)
# Initialize our TPU weights on the first compile.
self.model._initialize_weights(self._cloned_model)
if next_tpu_model_ops and cur_tpu_model_ops:
_, _, outfeed_outputs = K.get_session().run([
next_tpu_model_ops.infeed_op, cur_tpu_model_ops.execute_op,
cur_tpu_model_ops.outfeed_op
], infeed_dict)
return self._process_outputs(outfeed_outputs)
if cur_tpu_model_ops:
_, outfeed_outputs = K.get_session().run(
[cur_tpu_model_ops.execute_op, cur_tpu_model_ops.outfeed_op])
return self._process_outputs(outfeed_outputs)
if next_tpu_model_ops:
K.get_session().run(next_tpu_model_ops.infeed_op, infeed_dict)
return None
raise RuntimeError('Internal error: both current & next tpu_model_ops '
'were None')
class KerasTPUModel(models.Model):
"""TPU compatible Keras model wrapper."""
def __init__(self, cpu_model, strategy):
super(models.Model, self).__init__( # pylint: disable=bad-super-call
inputs=cpu_model.inputs,
outputs=cpu_model.outputs,
name=cpu_model.name,
)
# Create a mapping from numpy arrays to infeed managers.
# Note: uses a list of tuples instead of a map because numpy arrays are
# not hashable.
self._numpy_to_infeed_manager_list = []
# Add distribution specific arguments since we don't call the Model init.
self._distribution_strategy = None
self._compile_distribution = None
self.predict_function = None
self.test_function = None
self.train_function = None
self._fit_function = None
self._eval_function = None
self._stateful_metric_functions = []
cluster_resolver = strategy._tpu_cluster_resolver
self._tpu_name_or_address = cluster_resolver.get_master()
self._cpu_model = cpu_model
self._tpu_assignment = strategy._make_assignment_for_model(cpu_model)
self._tpu_model = None
self._tpu_weights_initialized = False
# If the input CPU model has already been compiled, compile our TPU model
# immediately.
if self._cpu_model.optimizer:
self.compile(
self._cpu_model.optimizer,
self._cpu_model.loss,
self._cpu_model._compile_metrics,
self._cpu_model.loss_weights,
self._cpu_model.sample_weight_mode,
self._cpu_model._compile_weighted_metrics,
self._cpu_model.target_tensors,
)
# This flag must be disabled upon model mutation, such as changing the model
# layers or recompiling the model to use a different optimizer. New function
# definitions are generated whenever this flag is disabled, ensuring that
# internal graph functions are always using the current model structure.
#
# Requires declaration here because this constructor skips the
# Model constructor.
self._built_graph_functions = False
def get_config(self):
return {
'cpu_model': self._cpu_model,
'tpu_name_or_address': self._tpu_name_or_address,
'tpu_assignment': self._tpu_assignment,
}
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
if sample_weight_mode:
raise ValueError('sample_weight_mode not supported for TPU execution.')
if weighted_metrics:
raise ValueError('weighted_metrics not supported for TPU execution.')
if target_tensors:
raise ValueError('target_tensors is not supported for TPU execution.')
self._cpu_model.compile(
_clone_optimizer(optimizer), loss,
metrics_module.clone_metrics(metrics), loss_weights, sample_weight_mode,
metrics_module.clone_metrics(weighted_metrics), target_tensors,
**kwargs)
super(KerasTPUModel, self).compile(optimizer, loss, metrics, loss_weights,
sample_weight_mode, weighted_metrics,
target_tensors, **kwargs)
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
**kwargs):
if context.executing_eagerly():
raise EnvironmentError('KerasTPUModel currently does not support eager '
'mode.')
with _tpu_session_context():
assert not self._numpy_to_infeed_manager_list # Ensure empty.
infeed_managers = [] # Managers to clean up at the end of the fit call.
if isinstance(x, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(x):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
dataset = x()
if steps_per_epoch is None:
raise ValueError('When using tf.data as input to a model, you '
'should specify the steps_per_epoch argument.')
if y is not None:
raise ValueError('When using tf.data as input to a model, y must '
'be None')
infeed_manager = TPUDatasetInfeedManager(
dataset, self._tpu_assignment, model_fn_lib.ModeKeys.TRAIN)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
x = infeed_manager.dummy_x
y = infeed_manager.dummy_y
infeed_managers.append((x, infeed_manager))
if isinstance(validation_data, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(validation_data):
dataset = validation_data()
if validation_steps is None:
raise ValueError('When using tf.data as validation for a model, you '
'should specify the validation_steps argument.')
infeed_manager = TPUDatasetInfeedManager(dataset, self._tpu_assignment,
model_fn_lib.ModeKeys.EVAL)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
val_x = infeed_manager.dummy_x
val_y = infeed_manager.dummy_y
infeed_managers.append((val_x, infeed_manager))
validation_data = (val_x, val_y)
self._numpy_to_infeed_manager_list = infeed_managers
try:
pipeline = kwargs.get('_pipeline', True)
if '_pipeline' in kwargs:
kwargs.pop('_pipeline')
if not pipeline:
logging.info('Running non-pipelined training loop (`_pipeline=%s`).',
pipeline)
return super(KerasTPUModel, self).fit(
x, y, batch_size, epochs, verbose, callbacks, validation_split,
validation_data, shuffle, class_weight, sample_weight,
initial_epoch, steps_per_epoch, validation_steps, **kwargs)
return self._pipeline_fit(x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle,
class_weight, sample_weight, initial_epoch,
steps_per_epoch, validation_steps, **kwargs)
finally:
self._numpy_to_infeed_manager_list = []
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
original_numpy_to_infeed_manager_list = []
if self._numpy_to_infeed_manager_list:
# evaluate call may be executed as callbacks during the training. In this
# case, _numpy_to_infeed_manager_list is not empty, so save it for
# recovery at the end of evaluate call.
original_numpy_to_infeed_manager_list = self._numpy_to_infeed_manager_list
self._numpy_to_infeed_manager_list = []
with _tpu_session_context():
# Managers to clean up at the end of the evaluate call.
infeed_managers = []
if isinstance(x, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(x):
dataset = x()
if steps is None:
raise ValueError('When using tf.data as input to a model, you '
'should specify the steps argument.')
if y is not None:
raise ValueError('When using tf.data as input to a model, y must be '
'None')
infeed_manager = TPUDatasetInfeedManager(dataset, self._tpu_assignment,
model_fn_lib.ModeKeys.EVAL)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
x = infeed_manager.dummy_x
y = infeed_manager.dummy_y
infeed_managers.append((x, infeed_manager))
self._numpy_to_infeed_manager_list = infeed_managers
try:
return super(KerasTPUModel, self).evaluate(x, y, batch_size, verbose,
sample_weight, steps)
finally:
self._numpy_to_infeed_manager_list = (
original_numpy_to_infeed_manager_list)
def _pipeline_fit(self, x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle, class_weight,
sample_weight, initial_epoch, steps_per_epoch,
validation_steps, **kwargs):
# Similar to super.fit(...), but modified to support software pipelining.
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning('The `nb_epoch` argument in `fit` has been renamed '
'`epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate and standardize user data
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split)
# Prepare validation data
val_x, val_y, val_sample_weights = self._prepare_validation_data(
validation_data, validation_split, validation_steps, x, y,
sample_weights, batch_size)
return self._pipeline_fit_loop(
x,
y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def _pipeline_fit_loop(self,
inputs,
targets,
sample_weights,
batch_size,
epochs,
verbose,
callbacks,
val_inputs,
val_targets,
val_sample_weights,
shuffle,
initial_epoch,
steps_per_epoch,
validation_steps):
self._make_train_function()
sample_weights = sample_weights or []
val_sample_weights = val_sample_weights or []
if not isinstance(K.learning_phase(), int):
ins = inputs + targets + sample_weights + [1]
else:
ins = inputs + targets + sample_weights
do_validation = False
if val_inputs:
do_validation = True
if (steps_per_epoch is None and verbose and inputs and
hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(inputs[0].shape[0], val_inputs[0].shape[0]))
if validation_steps:
do_validation = True
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` when doing step-wise '
'training, i.e. `steps_per_epoch` must be set.')
num_training_samples = training_utils.check_num_samples(
ins, batch_size, steps_per_epoch, 'steps_per_epoch')
count_mode = 'steps' if steps_per_epoch else 'samples'
callbacks = cbks.configure_callbacks(
callbacks,
self,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=num_training_samples,
verbose=verbose,
count_mode=count_mode)
if num_training_samples is not None:
index_array = np.arange(num_training_samples)
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = self._feed_inputs + self._feed_targets + self._feed_sample_weights
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
# Reset stateful metrics
for m in self.metrics:
m.reset_states()
# Update callbacks
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
# Step-wise fit loop.
self._pipeline_fit_loop_step_wise(
ins=ins,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
do_validation=do_validation,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
validation_steps=validation_steps,
epoch_logs=epoch_logs)
else:
# Sample-wise fit loop.
self._pipeline_fit_loop_sample_wise(
ins=ins,
callbacks=callbacks,
index_array=index_array,
shuffle=shuffle,
batch_size=batch_size,
num_training_samples=num_training_samples,
indices_for_conversion_to_dense=indices_for_conversion_to_dense,
do_validation=do_validation,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
validation_steps=validation_steps,
epoch_logs=epoch_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks.on_train_end()
return self.history
def _pipeline_fit_loop_sample_wise(self,
ins,
callbacks,
index_array,
shuffle,
batch_size,
num_training_samples,
indices_for_conversion_to_dense,
do_validation,
val_inputs,
val_targets,
val_sample_weights,
validation_steps,
epoch_logs):
f = self.train_function
if shuffle == 'batch':
index_array = training_utils.batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_training_samples, batch_size)
ins_last_batch = None
last_batch_logs = None
batch_index = 0
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. If using HDF5 '
'input data, pass shuffle="batch".')
# Pipeline batch logs
next_batch_logs = {}
next_batch_logs['batch'] = batch_index
next_batch_logs['size'] = len(batch_ids)
if batch_index > 0:
# Callbacks operate one step behind in software pipeline.
callbacks.on_batch_begin(batch_index - 1, last_batch_logs)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
outs = f.pipeline_run(
cur_step_inputs=ins_last_batch, next_step_inputs=ins_batch)
ins_last_batch = ins_batch
if batch_index == 0:
assert outs is None
else:
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
last_batch_logs[l] = o # pylint: disable=unsupported-assignment-operation
callbacks.on_batch_end(batch_index - 1, last_batch_logs)
if callbacks.model.stop_training:
return
last_batch_logs = next_batch_logs
# Final batch
callbacks.on_batch_begin(batch_index, last_batch_logs)
outs = f.pipeline_run(cur_step_inputs=ins_last_batch, next_step_inputs=None)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
last_batch_logs[l] = o
callbacks.on_batch_end(batch_index, last_batch_logs)
if callbacks.model.stop_training:
return
if do_validation:
val_outs = training_arrays.test_loop(
self,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
batch_size=batch_size,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(self.metrics_names, val_outs):
epoch_logs['val_' + l] = o
def _pipeline_fit_loop_step_wise(self,
ins,
callbacks,
steps_per_epoch,
epochs,
do_validation,
val_inputs,
val_targets,
val_sample_weights,
validation_steps,
epoch_logs):
f = self.train_function
# Loop prologue
try:
outs = f.pipeline_run(cur_step_inputs=None, next_step_inputs=ins)
assert outs is None # Function shouldn't return anything!
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data on the first step '
'of the epoch, preventing further training. Check to '
'make sure your paths are correct and you have '
'permissions to read the files. Skipping validation')
for step_index in range(steps_per_epoch):
batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
try:
if step_index < steps_per_epoch - 1:
next_step_inputs = ins
else:
next_step_inputs = None
outs = f.pipeline_run(
cur_step_inputs=ins, next_step_inputs=next_step_inputs)
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your '
'dataset can generate at least `steps_per_batch * '
'epochs` batches (in this case, %d batches). You '
'may need to use the repeat() function when '
'building your dataset.' % steps_per_epoch * epochs)
break
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callbacks.model.stop_training:
break
if do_validation:
val_outs = training_arrays.test_loop(
self,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(self.metrics_names, val_outs):
epoch_logs['val_' + l] = o
def _prepare_validation_data(self, validation_data, validation_split,
validation_steps, x, y, sample_weights,
batch_size):
"""Prepares the validation dataset.
Args:
validation_data: The validation data (if provided)
validation_split: The validation split (if provided)
validation_steps: The validation steps (if provided)
x: The main training data x (if provided)
y: The main training data y (if provided)
sample_weights: The sample weights (if provided)
batch_size: The training batch size (if provided)
Returns:
A 3-tuple of (val_x, val_y, val_sample_weights).
Raises:
ValueError: If the provided arguments are not compatible with
`KerasTPUModel`.
"""
# Note: this is similar to a section of $tf/python/keras/engine/training.py
# It differns in that tf.data objects are not allowed to be passed directly.
# Additionally, it handles validating shapes & types appropriately for use
# in TPUs.
if validation_data:
if (isinstance(validation_data, iterator_ops.Iterator) or
isinstance(validation_data, iterator_ops.EagerIterator) or
isinstance(validation_data, dataset_ops.DatasetV2)):
raise ValueError('KerasTPUModel cannot handle a Dataset or Iterator '
'for validation_data. Please instead pass a function '
'that returns a `tf.data.Dataset`.')
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError('When passing a `validation_data` argument, it must '
'contain either 2 items (x_val, y_val), or 3 items '
'(x_val, y_val, val_sample_weights). However we '
'received `validation_data=%s`' % validation_data)
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size,
steps=validation_steps)
elif validation_split and 0. < validation_split < 1.:
if training_utils.has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, you '
'cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (
slice_arrays(sample_weights, 0, split_at),
slice_arrays(sample_weights, split_at)
)
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
return val_x, val_y, val_sample_weights
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
with _tpu_session_context():
return super(KerasTPUModel, self).predict(
x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
@property
def optimizer(self):
if self._tpu_model:
return self._tpu_model.optimizer
return self._cpu_model.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self._optimizer = optimizer
@property
def metrics(self):
if self._tpu_model:
return self._tpu_model.metrics
return self._stateful_metric_functions
@metrics.setter
def metrics(self, metrics):
self._stateful_metric_functions = metrics
def _make_train_function(self):
if not self.train_function:
self.train_function = TPUFunction(
self,
model_fn_lib.ModeKeys.TRAIN,
tpu_assignment=self._tpu_assignment)
return self.train_function
def _make_test_function(self):
if not self.test_function:
self.test_function = TPUFunction(
self, model_fn_lib.ModeKeys.EVAL, tpu_assignment=self._tpu_assignment)
return self.test_function
def _make_fit_function(self):
if not self._fit_function:
self._fit_function = TPUFunction(
self,
model_fn_lib.ModeKeys.TRAIN,
tpu_assignment=self._tpu_assignment)
return self._fit_function
def _make_eval_function(self):
if not self._eval_function:
self._eval_function = TPUFunction(
self, model_fn_lib.ModeKeys.EVAL, tpu_assignment=self._tpu_assignment)
return self._eval_function
def _make_predict_function(self):
if not self.predict_function:
self.predict_function = TPUFunction(
self,
model_fn_lib.ModeKeys.PREDICT,
tpu_assignment=self._tpu_assignment)
return self.predict_function
def _initialize_weights(self, cloned_model):
"""Initialize TPU weights.
This is called on the first compile of the TPU model (first call to
fit/predict/evaluate).
Args:
cloned_model: `keras.Model`, TPU model to initialize.
"""
if self._tpu_weights_initialized:
return
self._tpu_model = cloned_model
self._tpu_weights_initialized = True
weights = self._cpu_model.get_weights()
if isinstance(self.cpu_optimizer, keras_optimizers.TFOptimizer):
cpu_optimizer_config = {}
else:
cpu_optimizer_config = self.cpu_optimizer.get_config()
logging.info('Setting weights on TPU model.')
cloned_model.set_weights(weights)
if self._tpu_model.optimizer is None:
# tpu_model may not be compiled, e.g., loading weights and then predict.
return
for k, v in six.iteritems(cpu_optimizer_config):
if k == 'name':
continue
opt_var = getattr(self._tpu_model.optimizer, k)
if isinstance(opt_var, variables.Variable):
logging.info('CPU -> TPU %s: %s {%s}', k, v, K.get_value(opt_var))
K.get_session().run(opt_var.assign(v))
else:
logging.warning('Cannot update non-variable config: %s', k)
@property
def cpu_optimizer(self):
return self._cpu_model.optimizer
def sync_to_cpu(self):
"""Copy weights from the CPU, returning a synchronized CPU model."""
if not self._tpu_weights_initialized:
return self._cpu_model
logging.info('Copying TPU weights to the CPU')
tpu_weights = self._tpu_model.get_weights()
# TFOptimizers have no configurable options
if isinstance(self.cpu_optimizer, keras_optimizers.TFOptimizer):
tpu_optimizer_config = {}
else:
tpu_optimizer_config = self._tpu_model.optimizer.get_config()
self._cpu_model.set_weights(tpu_weights)
for k, v in six.iteritems(tpu_optimizer_config):
logging.info('TPU -> CPU %s: %s', k, v)
if k == 'name':
continue
opt_var = getattr(self.cpu_optimizer, k)
if isinstance(opt_var, variables.Variable):
K.get_session().run(opt_var.assign(v))
else:
logging.warning('Cannot update non-variable config: %s', k)
return self._cpu_model
def get_weights(self):
return self.sync_to_cpu().get_weights()
def save_weights(self, *args, **kw):
return self.sync_to_cpu().save_weights(*args, **kw)
def save(self, *args, **kw):
return self.sync_to_cpu().save(*args, **kw)
def set_weights(self, weights):
# We may not have a TPU model available if we haven't run fit/predict, so
# we can't directly set the TPU weights here.
# Instead, reset CPU model weights and force TPU re-initialization at the
# next call.
self._cpu_model.set_weights(weights)
self._tpu_weights_initialized = False
def load_weights(self, filepath, by_name=False):
self._cpu_model.load_weights(filepath, by_name)
self._tpu_weights_initialized = False
# pylint: disable=bad-continuation
def _validate_shapes(model):
"""Validate that all layers in `model` have constant shape."""
for layer in model.layers:
if isinstance(layer.input_shape, tuple):
input_shapes = [layer.input_shape]
else:
input_shapes = layer.input_shape
if isinstance(layer.output_shape, tuple):
output_shapes = [layer.output_shape]
else:
output_shapes = layer.output_shape
for shape in input_shapes + output_shapes:
for dim in shape[1:]:
if dim is None:
raise ValueError(
"""
Layer %(layer)s has a variable shape in a non-batch dimension. TPU models must
have constant shapes for all operations.
You may have to specify `input_length` for RNN/TimeDistributed layers.
Layer: %(layer)s
Input shape: %(input_shape)s
Output shape: %(output_shape)s
""" % {
'layer': layer,
'input_shape': layer.input_shape,
'output_shape': layer.output_shape
})
# pylint: enable=bad-continuation
@experimental
def tpu_model(model, strategy=None):
"""Copy `model` along with weights to the TPU.
Returns a TPU model.
Usage:
```
a = Input(shape=(32,))
b = Dense(32)(a)
model = Model(inputs=a, outputs=b)
# If `num_cores_per_host` is greater than one, batch parallelism will be used
# to run on multiple TPU cores.
strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
model = keras_support.tpu_model(model, strategy)
model.compile(
optimizer=tf.train.GradientDescentOptimizer(learning_rate=1.0),
...)
```
Args:
model: A `tf.keras.Model` instance.
strategy: `TPUDistributionStrategy`. The strategy to use for replicating
model across multiple TPU cores.
Returns:
A new `KerasTPUModel` instance.
"""
_validate_shapes(model)
# TODO(xiejw): Validate TPU model. TPUModel only?
# TODO(xiejw): Validate replicas. Full or 1. Shall we allow subset?
# TODO(xiejw): Adds reduction option.
if strategy is None:
strategy = TPUDistributionStrategy()
else:
if not isinstance(strategy, TPUDistributionStrategy):
raise TypeError(
'`strategy` must have type `tf.contrib.tpu.TPUDistributionStrategy`. '
'Got: {}'.format(type(strategy)))
# If the model has already been initialized, grab the optimizer configuration
# and model weights before entering the TPU session.
if model.optimizer:
if (isinstance(model.optimizer, keras_optimizers.Optimizer) and not
isinstance(model.optimizer, keras_optimizers.TFOptimizer)):
optimizer_config = model.optimizer.get_config()
else:
optimizer_config = None
model_weights = model.get_weights()
else:
model_weights = None
setup_tpu_session(strategy._tpu_cluster_resolver)
# Force initialization of the CPU model in the TPU session.
cpu_model = models.clone_model(model)
if model.optimizer:
cpu_model.compile(
_clone_optimizer(model.optimizer, optimizer_config),
model.loss,
metrics_module.clone_metrics(model._compile_metrics),
model.loss_weights,
model.sample_weight_mode,
metrics_module.clone_metrics(model._compile_weighted_metrics),
)
if model_weights:
cpu_model.set_weights(model_weights)
cpu_model.reset_states()
return KerasTPUModel(cpu_model=cpu_model, strategy=strategy)
Allow tf.contrib.tpu._prepare_validation_data to modify x,y
When the user passes a validation_split parameter, the samples allocated to the val_x, val_y populations should be removed from the training data set (x, y). In the _prepare_validation_split function, the validation split parameter branch already creates the correctly truncated x, y but does not return those modified x, y values back. As a result, the validation samples are in both the training data set and the validation data sets. The validation loss/metrics are giving artificially good results since the model is training also the samples in the validation set.
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""*Experimental* support for running Keras models on the TPU.
To use, wrap your model with the `keras_support.tpu_model` function.
Example usage:
```
image = tf.keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3))( image)
flattened = tf.keras.layers.Flatten()(c1)
logits = tf.keras.layers.Dense(10, activation='softmax')(flattened)
model = tf.keras.Model(inputs=[image], outputs=[logits])
resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu=tpu_name)
strategy = keras_support.TPUDistributionStrategy(resolver)
model = keras_support.tpu_model(model, strategy=strategy)
# Only TF optimizers are currently supported.
model.compile(optimizer=tf.train.AdamOptimizer(), ...)
# `images` and `labels` should be Numpy arrays. Support for tensor input
# (e.g. datasets) is planned.
model.fit(images, labels)
```
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import re
import sys
import time
import numpy as np
import six
from tensorflow.contrib.cluster_resolver.python.training import tpu_cluster_resolver as tpu_cluster_resolver_lib
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import keras_tpu_variables
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers as keras_optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.utils.generic_utils import make_batches
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
# TODO(b/114775106): temporary shim to optionally initialize the TPU
# This increases the odds our session is initialized, but shouldn't be needed.
_TEST_REWRITE_OP = None
def _maybe_initialize_tpu(session):
"""Initialize the TPU if it has not already been initialized."""
global _TEST_REWRITE_OP
try:
# Try to use cached version to avoid another ground of graph optimization.
test_rewrite_op = _TEST_REWRITE_OP
if (test_rewrite_op is None or
test_rewrite_op[0].graph != ops.get_default_graph()):
def test_op():
return constant_op.constant(1) + constant_op.constant(1)
test_rewrite_op = tpu.rewrite(test_op)
_TEST_REWRITE_OP = test_rewrite_op
session.run(test_rewrite_op)
except errors.FailedPreconditionError as _:
session.run(tpu.initialize_system())
@contextlib.contextmanager
def _tpu_session_context():
"""Initialize the TPU and cleans cache entries for bad sessions."""
try:
_maybe_initialize_tpu(K.get_session())
yield
except (errors.FailedPreconditionError, errors.AbortedError) as e:
K.clear_session()
raise Exception("""
An error occurred connecting or initializing your TPU.
The session has been reset. re-run keras_to_tpu_model to create a new session.
""" + str(e))
def setup_tpu_session(cluster_resolver):
"""Construct or return a `tf.Session` connected to the given cluster."""
master = cluster_resolver.master()
# Use the existing session if we're already connected to this TPU
# N.B K.get_session() is a non-trivial operation, and may fail if the remote
# session has been reset.
try:
default_session = K.get_session()
if (default_session._target == master and
getattr(default_session, '_tpu_initialized', None)):
return
except errors.AbortedError as _:
# We lost the remote session and need to re-initialize.
logging.warning('Lost remote session: creating a new session.')
cluster_spec = cluster_resolver.cluster_spec()
config = config_pb2.ConfigProto(isolate_session_state=True)
if cluster_spec:
config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
tpu_session = tf_session.Session(target=master, config=config)
tpu_session.run(tpu.initialize_system())
tpu_session._tpu_initialized = True
# N.B. We have to call `K.set_session()` AND set our session as the
# TF default. `K.get_session()` surprisingly does not return the value
# supplied by K.set_session otherwise.
K.set_session(tpu_session)
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master, cluster_def=cluster_def, query_topology=False))
return tpu_system_metadata
class TPUDistributionStrategy(object):
"""The strategy to run Keras model on TPU."""
def __init__(self, tpu_cluster_resolver=None, using_single_core=False):
"""Construct a TPUDistributionStrategy.
Args:
tpu_cluster_resolver: Any instance of `TPUClusterResolver`. If None, will
create one with '' as master address.
using_single_core: Bool. This is the debugging option, which might be
removed in future once the model replication functionality is mature
enough. If `False` (default behavior), the system automatically finds
the best configuration, in terms of number of TPU cores, for the model
replication, typically using all avaiable TPU cores. If overwrites as
`True`, force the model replication using single core, i.e., no
replication.
Raises:
Exception: No TPU Found on the given worker.
"""
if tpu_cluster_resolver is None:
tpu_cluster_resolver = tpu_cluster_resolver_lib.TPUClusterResolver('')
metadata = get_tpu_system_metadata(tpu_cluster_resolver)
self._tpu_metadata = metadata
self._tpu_cluster_resolver = tpu_cluster_resolver
self._num_cores = 1 if using_single_core else metadata.num_cores
# Walk device list to identify TPU worker for enqueue/dequeue operations.
worker_re = re.compile('/job:([^/]+)')
for device in metadata.devices:
if 'TPU:0' in device.name:
self._worker_name = worker_re.search(device.name).group(1)
return
raise Exception('No TPU found on given worker.')
def _make_assignment_for_model(self, cpu_model):
"""Makes a `TPUAssignment` for the passed in `cpu_model`."""
num_cores = self._num_cores
if num_cores > 1 and cpu_model.stateful:
logging.warning(
'Model replication does not currently support stateful models. '
'Degrading to a single core.')
num_cores = 1
return TPUAssignment(worker_name=self._worker_name, num_cores=num_cores)
class TPUAssignment(object):
"""This is object holding TPU resources assignment for the concrete model.
`TPUDistributionStrategy` is responsible to create the instance of
`TPUAssignment`, so, it can dynamically adjust the `num_cores` to use based on
model and input batch sizes.
"""
def __init__(self, worker_name, num_cores):
self._worker_name = worker_name
self._num_cores = num_cores
@property
def worker_name(self):
return self._worker_name
@property
def num_towers(self):
# TODO(xiejw): Support automatically assign num_cores based on inputs.
return self._num_cores
class TPUEmbedding(embeddings.Embedding):
"""TPU compatible embedding layer.
The default Keras layer is not TPU compatible. This layer is a drop-in
replacement: it has the same behavior and will work on CPU and GPU devices.
"""
def build(self, input_shape):
if input_shape[0] is None:
raise ValueError(
'TPUEmbeddings must have a fixed input_length or input shape.')
return super(TPUEmbedding, self).build(input_shape)
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = math_ops.cast(inputs, 'int32')
inputs = array_ops.one_hot(inputs, self.input_dim)
return math_ops.tensordot(inputs, self.embeddings, 1)
def _cross_replica_concat(tensor, core_id, num_cores, name):
"""Concatenate `tensor` across cores.
Args:
tensor: The tensor to be concatenated. Must be [int32 and float32].
core_id: Tensor indicating the current TPU core.
num_cores: Python int. The total number of TPU cores in the system.
name: The string name to print for debugging.
Returns:
The same concatenated Tensor on each core.
"""
input_dtype = tensor.dtype
if input_dtype not in [dtypes.bfloat16, dtypes.float32, dtypes.int32]:
raise TypeError('For model replication, only (bfloat16, float32 and int32) '
'is supported for model outputs and targets. Got {} for '
'{}.'.format(input_dtype, name))
batch_size = tensor.shape[0]
mask = math_ops.to_float(
math_ops.equal(np.arange(num_cores, dtype=np.int32), core_id))
mask = array_ops.reshape(mask, [num_cores] + [1] * tensor.shape.ndims)
result = mask * math_ops.to_float(tensor)
local_tensor_with_holes = array_ops.reshape(result,
[-1] + result.shape.as_list()[2:])
concat_tensor = tpu_ops.cross_replica_sum(local_tensor_with_holes)
concat_tensor.set_shape((num_cores * batch_size,) + tuple(tensor.shape[1:]))
if concat_tensor != input_dtype:
concat_tensor = math_ops.cast(concat_tensor, input_dtype)
return concat_tensor
class KerasCrossShardOptimizer(keras_optimizers.Optimizer):
"""An optimizer that averages gradients across TPU shards."""
def __init__(self, opt, name='KerasCrossShardOptimizer'):
"""Construct a new cross-shard optimizer.
Args:
opt: An existing `Optimizer` to encapsulate.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "KerasCrossShardOptimizer".
Raises:
ValueError: If reduction is not a valid cross-shard reduction.
"""
super(KerasCrossShardOptimizer, self).__init__()
self._name = name
self._opt = opt
logging.info('KerasCrossShard: %s %s', self._opt, self._opt.weights)
def get_updates(self, loss, params):
self._opt.get_gradients = self.get_gradients
return self._opt.get_updates(loss, params)
def get_gradients(self, loss, params):
num_shards = tpu_function.get_tpu_context().number_of_shards
grads = super(KerasCrossShardOptimizer, self).get_gradients(loss, params)
return [tpu_ops.cross_replica_sum(grad) / num_shards for grad in grads]
def get_weights(self):
return self._opt.get_weights()
def get_config(self):
return self._opt.get_config()
# Defer remaining operations to the underlying optimizer
def __getattr__(self, key):
return getattr(self._opt, key)
class TPUModelOp(
collections.namedtuple('TPUModelOp', [
'compile_op', 'execute_op', 'infeed_tensors', 'infeed_op', 'outfeed_op'
])):
pass
def _valid_name(tensor_name):
"""Return a valid tensor name (strips '/', ':', etc)."""
return re.sub('[^a-zA-Z0-9_-]+', '', tensor_name)
def _replicated_optimizer(opt):
"""Wrap the optimizer `opt` with CrossShardOptimizer if applicable."""
# Always wrap `opt` with CrossShardOptimizer, even if we are running on a
# single core. This ensures Keras properly tracks and initializes optimizer
# variables.
if isinstance(opt, keras_optimizers.TFOptimizer):
return tpu_optimizer.CrossShardOptimizer(opt.optimizer)
else:
return KerasCrossShardOptimizer(opt)
def _clone_optimizer(optimizer, config=None, worker_name=None):
"""Returns a cloned optimizer with the provided optimizer.config or config."""
if not isinstance(optimizer, keras_optimizers.Optimizer):
# In the first call to tpu_model(model), Keras may not have wrapped the TF
# optimizer in the TFOptimizer helper, e.g., the given model isn't compiled
# or optimizer isn't set, and later generated tpu_model compiles with a TF
# optimizer.
return optimizer
if isinstance(optimizer, keras_optimizers.TFOptimizer):
return keras_optimizers.TFOptimizer(optimizer.optimizer)
if config is None:
config = optimizer.get_config()
logging.info('Cloning %s %s', optimizer.__class__.__name__, config)
with ops.device(
'%s/device:CPU:0' % ('/job:%s' % worker_name if worker_name else '')):
# Explicitly put optimizer parameter variables on TPU worker.
return optimizer.__class__.from_config(config)
class TPURewriteContext(object):
"""Prepare the environment for a Keras model during `tpu.rewrite`.
This overrides the default placeholder behaviour to instead refer to a preset
input mapping. Placeholders are unsupported in TPU compiled code, and must
be replaced with explicit inputs or values from the infeed queue.
Instead of explicitly threading inputs all the way through the Keras codebase,
we override the behavior of the placeholder while compiling and inject the
Tensors from the infeed in place of the placeholder.
Similarly, as we compile a new sub-graph for each unique shape and execution
mode, we need to override the behavior of an embedded `name_scope` call in
the base Keras layer code. This allows us to re-use the same weights across
many compiles and share a single session/graph.
"""
def __init__(self, input_map):
self._input_map = input_map
self._default_placeholder = None
self._default_name_scope = None
def __enter__(self):
def _placeholder(dtype, shape=None, name=None): # pylint: disable=unused-argument
logging.info('Remapping placeholder for %s', name)
if name in self._input_map:
return self._input_map[name]
else:
logging.info('Default: %s', name)
return self._default_placeholder(dtype, shape, name)
def _name_scope(name, default_name=None, values=None):
caller_frame = sys._getframe().f_back
caller_obj = caller_frame.f_locals.get('self')
if (caller_obj is not None and
isinstance(caller_obj, base_layer.Layer) and name is not None):
return variable_scope.variable_scope(
name, default_name, values, reuse=variable_scope.AUTO_REUSE)
return self._default_name_scope(name, default_name, values)
self._default_placeholder = array_ops.placeholder
self._default_name_scope = ops.name_scope
self._default_make_variable = base_layer_utils.make_variable
self._default_random_normal = random_ops.random_normal
self._default_qr = gen_linalg_ops.qr
array_ops.placeholder = _placeholder
# Replace random_ops.random_normal with a dummy function because
# `random_normal` isn't yet implemented on the TPU. Because these
# initialized values are overwritten by the CPU values, this is okay.
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
del mean
del stddev
del seed
return array_ops.zeros(shape, dtype=dtype, name=name)
random_ops.random_normal = random_normal
# Replace gen_linalg_ops.qr because QR decomposition is not yet implemented.
# TODO(saeta): Remove qr override once we confirm the qr implementation is
# ok.
# pylint: disable=redefined-builtin
def qr(input, full_matrices=False, name=None):
"""Dummy implementation of qr decomposition."""
del full_matrices # TODO(saeta): Properly handle the full matrix case.
input_shape = input.shape
if len(input_shape) < 2:
raise ValueError('Invalid shape passed to qr: %s' % input_shape)
p = min(input_shape[-1], input_shape[-2])
if len(input_shape) == 2:
q = array_ops.zeros((p, p), name=name)
r = array_ops.zeros(input_shape, name=name)
return (r, q)
elif len(input_shape) == 3:
n = input_shape[0]
q = array_ops.zeros((n, p, p), name=name)
r = array_ops.zeros(input_shape, name=name)
return (r, q)
else:
raise ValueError('Invalid shape passed to qr: %s' % input_shape)
gen_linalg_ops.qr = qr
ops.name_scope = _name_scope
base_layer_utils.make_variable = variable_scope.get_variable
logging.info('Overriding default placeholder.')
return
def __exit__(self, exc_type, exc_val, exc_tb):
array_ops.placeholder = self._default_placeholder
ops.name_scope = self._default_name_scope
base_layer_utils.make_variable = self._default_make_variable
random_ops.random_normal = self._default_random_normal
gen_linalg_ops.qr = self._default_qr
class SizedInfeed(
collections.namedtuple('SizedInfeed',
['sharded_infeed_tensors', 'infeed_ops'])):
"""Represents an instantiation of the infeed ops for a concrete input shape.
sharded_infeed_tensors: A data structure of Tensors used to represent the
placeholder tensors that must be fed when using feed_dicts.
infeed_ops: the set of ops that will be run to drive infeed for a single step.
"""
pass
class TPUInfeedInstance(object):
"""TPUInfeedInstance represents the logic to manage feeding in a single step.
See the comments on the `TPUInfeedManager` for a description for how infeed
is managed.
"""
@abc.abstractmethod
def make_input_specs(self, input_tensors):
"""Constructs the infeed_specs for the given Infeed instance.
Args:
input_tensors: The inputs to the model.
Returns:
A list of
"""
pass
def make_feed_dict(self, tpu_model_op):
"""Constructs a feed_dict for this instance, given the tpu_model_op.
Args:
tpu_model_op: A `TPUModelOp` representing the TPU Model for this
instance's input spec.
Returns:
A dictionary to use as the feed_dict of a `session.run` call.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class TPUInfeedManager(object):
"""TPUInfeedManager manages the data infeeding of data to a TPU computation.
Because there are multiple data sources (e.g. in-memory NumPy arrays,
`tf.data.Dataset`s), we abstract the different logic behind a single
interface: the `TPUInfeedManager`.
(1) A `TPUFunction` is called with a set of inputs. Based on the inputs,
`TPUFunction` retrieves the corresponding `TPUInfeedManager` (or constructs a
new one if required).
(2) The `TPUFunction` calls `make_infeed_instance` on the `TPUInfeedManager`
which returns a `TPUInfeedInstance`.
(3) The `TPUFunction` checks in the shape cache for a pre-compiled instance of
the model based on the returned `input_specs` from `TPUInfeedInstance`.
(4) [Optional.] If the model has not already been instantiated for the given
input spec, the `TPUFunction` compiles the model for the input spec (using the
`TPUInfeedManager`).
(5) The `TPUInfeedInstance` constructs the session.run's feed_dict given the
compiled model instance corresponding to its shape.
"""
@abc.abstractmethod
def make_infeed_instance(self, inputs):
"""Given a single step's input, construct a `TPUInfeedInstance`.
Args:
inputs: The inputs to a given step.
Returns:
A subclass of `TPUInfeedInstance`.
"""
pass
@abc.abstractmethod
def build_infeed_from_input_specs(self, input_specs, execution_mode):
"""For a given input specification (size, type), construct the infeed ops.
This is called only once for a given input specification and builds the
graph ops. It does not have a pointer to the actual infeed data.
Args:
input_specs: TODO(saeta): Document me!
execution_mode: TODO(saeta): Document me!
Returns:
A `SizedInfeed` instance.
"""
pass
class TPUNumpyInfeedManager(TPUInfeedManager):
"""TPU Infeed manager for Numpy inputs."""
class NumpyInfeedInstance(TPUInfeedInstance):
"""Infeed instance for Numpy inputs."""
def __init__(self, sharded_inputs):
self._sharded_inputs = sharded_inputs
def make_input_specs(self, input_tensors):
# Compute an input specification (used to generate infeed enqueue and
# dequeue operations). We use the shape from our input array and the
# dtype from our model. A user may pass in a float64 for a float32
# input: for model compatibility we still must generate a float32 infeed.
input_specs = []
# We use the shape and dtype from the first shard to compute the input
# metadata (`input_specs`); all replicas have the same type and shape.
for tensor, ary in zip(input_tensors, self._sharded_inputs[0]):
input_specs.append(
tensor_spec.TensorSpec(ary.shape, tensor.dtype,
_valid_name(tensor.name)))
return input_specs
def make_feed_dict(self, tpu_model_op):
infeed_dict = {}
for infeed_tensors, inputs in zip(tpu_model_op.infeed_tensors,
self._sharded_inputs):
for tensor, value in zip(infeed_tensors, inputs):
infeed_dict[tensor] = value
return infeed_dict
def __init__(self, tpu_assignment):
self._tpu_assignment = tpu_assignment
def _split_tensors(self, inputs):
"""Split input data across shards.
Each input is sliced along the batch axis.
Args:
inputs: List of Numpy arrays to run on the TPU.
Returns:
List of lists containing the input to feed to each TPU shard.
"""
if self._tpu_assignment.num_towers == 1:
return [inputs]
batch_size = inputs[0].shape[0]
assert batch_size % self._tpu_assignment.num_towers == 0, (
'batch_size must be divisible by the number of TPU cores in use (%s '
'vs %s)' % (batch_size, self._tpu_assignment.num_towers))
shard_size = batch_size // self._tpu_assignment.num_towers
input_list = []
for index in range(self._tpu_assignment.num_towers):
shard_inputs = [
x[index * shard_size:(index + 1) * shard_size] for x in inputs
]
input_list.append(shard_inputs)
return input_list
def make_infeed_instance(self, inputs):
sharded_inputs = self._split_tensors(inputs)
return self.NumpyInfeedInstance(sharded_inputs)
def build_infeed_from_input_specs(self, input_specs, execution_mode):
infeed_op = []
shard_infeed_tensors = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
infeed_tensors = []
with ops.device('/device:TPU:%d' % shard_id):
for spec in input_specs:
# Construct placeholders for each of the inputs.
infeed_tensors.append(
array_ops.placeholder(
dtype=spec.dtype,
shape=spec.shape,
name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
shard_infeed_tensors.append(infeed_tensors)
infeed_op.append(
tpu_ops.infeed_enqueue_tuple(
infeed_tensors, [spec.shape for spec in input_specs],
name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
device_ordinal=shard_id))
return SizedInfeed(
infeed_ops=infeed_op, sharded_infeed_tensors=shard_infeed_tensors)
class TPUDatasetInfeedManager(TPUInfeedManager):
"""Manages infeed for a `tf.data.Dataset` into a TPU computation.
"""
class DatasetInfeedInstance(TPUInfeedInstance):
"""An instance of the TPU infeed."""
def __init__(self, input_specs):
self._input_specs = input_specs
def make_input_specs(self, input_tensors):
# TODO(saeta): Do error checking here!
return self._input_specs
def make_feed_dict(self, tpu_model_op):
# TODO(saeta): Verify tpu_model_op is as expected!
return {}
# pylint: disable=redefined-outer-name
def __init__(self, dataset, tpu_assignment, mode):
"""Constructs a TPUDatasetInfeedManager.
Args:
dataset: A `tf.data.Dataset` to infeed.
tpu_assignment: The `TPUAssignment` used to configure the
Keras TPU model.
mode: ModeKeys enum.
"""
self._verify_dataset_shape(dataset)
self._dataset = dataset
self._tpu_assignment = tpu_assignment
dummy_x_shape = dataset.output_shapes[0].as_list()
dummy_x_shape[0] *= tpu_assignment.num_towers
dummy_y_shape = dataset.output_shapes[1].as_list()
dummy_y_shape[0] *= tpu_assignment.num_towers
self._iterator = dataset_ops.make_initializable_iterator(dataset)
K.get_session().run(self._iterator.initializer)
self._get_next_ops = []
ctrl_deps = []
for i in range(tpu_assignment.num_towers):
with ops.control_dependencies(ctrl_deps): # Ensure deterministic
# TODO(saeta): Ensure correct placement!
get_next_op = self._iterator.get_next()
self._get_next_ops.append(get_next_op)
ctrl_deps.extend(get_next_op)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
self._dummy_x = np.zeros(
dummy_x_shape, dtype=dataset.output_types[0].as_numpy_dtype)
self._dummy_y = np.zeros(
dummy_y_shape, dtype=dataset.output_types[1].as_numpy_dtype)
input_specs = []
if isinstance(self._iterator.output_shapes, tuple):
assert isinstance(self._iterator.output_types, tuple)
assert len(self._iterator.output_shapes) == len(
self._iterator.output_types)
for i in range(len(self._iterator.output_shapes)):
spec = tensor_spec.TensorSpec(self._iterator.output_shapes[i],
self._iterator.output_types[i])
input_specs.append(spec)
elif isinstance(self._iterator.output_shapes, tensor_shape.TensorShape):
spec = tensor_spec.TensorSpec(self._iterator.output_shapes,
self._iterator.output_types)
input_specs.append(spec)
# Pre-process the inputs and get_next_ops before caching.
input_specs, self._get_next_ops = (
_inject_tpu_inputs_for_dataset(
tpu_assignment, mode, input_specs, self._get_next_ops))
self._infeed_instance = self.DatasetInfeedInstance(input_specs)
def _verify_dataset_shape(self, dataset):
"""Verifies a dataset is of an appropriate shape for TPUs."""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise ValueError('The function passed as the `x` parameter did not '
'return a `tf.data.Dataset`.')
if not isinstance(dataset.output_classes, tuple):
raise ValueError('The dataset must return a tuple of tf.Tensors, '
'instead it returns: %s' % dataset.output_classes)
if len(dataset.output_classes) != 2:
raise ValueError('The dataset must return a 2-element tuple, got '
'%s output classes instead.' % (dataset.output_classes,))
for i, cls in enumerate(dataset.output_classes):
if cls != ops.Tensor:
raise ValueError('The dataset returned a non-Tensor type (%s) at '
'index %d.' % (cls, i))
for i, shape in enumerate(dataset.output_shapes):
if not shape:
raise ValueError('The dataset returns a scalar tensor in '
'tuple index %d. Did you forget to batch? '
'(Output shapes: %s).' % (i, dataset.output_shapes))
for j, dim in enumerate(shape):
if dim.value is None:
if j == 0:
hint = (' Hint: did you use `ds.batch(BATCH_SIZE, '
'drop_remainder=True)`?')
else:
hint = ''
raise ValueError(
'The Keras-TPU integration for `tf.data` '
'currently requires static shapes. The provided '
'dataset only has a partially defined shape. '
'(Dimension %d of output tensor %d is not statically known '
'for output shapes: %s.%s)' % (j, i, dataset.output_shapes, hint))
@property
def dummy_x(self):
return self._dummy_x
@property
def dummy_y(self):
return self._dummy_y
def make_infeed_instance(self, inputs):
# TODO(saeta): Verify inputs is as expected.
return self._infeed_instance
def build_infeed_from_input_specs(self, input_specs, execution_mode):
shard_infeed_tensors = self._get_next_ops
assert len(shard_infeed_tensors) == self._tpu_assignment.num_towers
infeed_ops = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
infeed_ops.append(
tpu_ops.infeed_enqueue_tuple(
shard_infeed_tensors[shard_id],
[spec.shape for spec in input_specs],
name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
device_ordinal=shard_id))
return SizedInfeed(
infeed_ops=infeed_ops, sharded_infeed_tensors=shard_infeed_tensors)
def _inject_tpu_inputs_for_dataset(tpu_assignment, mode,
input_specs, get_next_ops):
"""Append core information to the set of dataset inputs."""
# This is used during compilation to identify the current TPU core and enable
# concatenation operations across cores.
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return input_specs, get_next_ops
# Dataset inputs operate on per core basis.
per_core_batch_size = input_specs[0].shape.as_list()[0]
# Insert, at head, the tensor for core_id.
assert len(get_next_ops) == tpu_assignment.num_towers
for i in range(tpu_assignment.num_towers):
core_id_constant = constant_op.constant(
np.array([i] * per_core_batch_size).astype('int32'),
dtype=dtypes.int32,
name='cord_id_constant')
get_next_ops[i] = [core_id_constant] + list(get_next_ops[i])
# Insert the input spec at head also.
input_specs = [tensor_spec.TensorSpec([per_core_batch_size], dtypes.int32)
] + input_specs
return input_specs, get_next_ops
def _inject_tpu_inputs_for_infeed(tpu_assignment, mode,
core_id_place_holder, input_tensors, inputs):
"""Append core information to the set of inputs."""
# This is used during compilation to identify the current TPU core and enable
# concatenation operations across cores.
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return input_tensors, inputs
# Puts a place holder in input spec.
input_tensors = [core_id_place_holder] + input_tensors
# Now fill the core id. For `num_cores` = 2, `batch_size` = 8, we fill the
# core id inputs as [0, 0, 0, 0, 1, 1, 1, 1], so each core sees its core id
# (duplicated).
num_cores = tpu_assignment.num_towers
per_core_batch_size = inputs[0].shape[0] // num_cores
core_ids = np.arange(num_cores).repeat(per_core_batch_size)
inputs = [core_ids] + inputs
return input_tensors, inputs
def _read_tpu_coreid_from_infeed(mode, infeed_tensors):
"""Popping out the core ids from infeed."""
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return None, infeed_tensors
if len(infeed_tensors) <= 1:
raise RuntimeError(
'The infeed tensors on TPU core has only {} tensors. '
'This is not expected. Please report a bug.\nTensors: {}'.format(
len(infeed_tensors), infeed_tensors))
core_id = infeed_tensors[0][0] # Pop out the scalar version.
rest = infeed_tensors[1:]
return core_id, rest
class TPUFunction(object):
"""K.function compatible interface for invoking a TPU compiled function.
Recompilation is triggered on-demand for each set of new inputs shapes: the
results are cached for future execution. We expect most computations will
be dominated by a standard batch-size, followed by a straggler batch for
the end of training or evaluation.
All `inputs` and `outputs` will be loaded via the infeed and outfeed queues
instead of being injected as `feed_dict` items or fetches.
"""
def __init__(self, model, execution_mode, tpu_assignment):
self.model = model
self.execution_mode = execution_mode
self._tpu_assignment = tpu_assignment
self._compilation_cache = {}
self._cloned_model = None
self._cloned_optimizer = None
# Create a placeholder for the TPU core ID. Cache the placeholder to avoid
# modifying the graph for every batch.
self._core_id_place_holder = array_ops.placeholder(
dtype=dtypes.int32, shape=[1], name='core_id')
def _specialize_model(self, input_specs, infeed_manager):
"""Specialize `self.model` (a Keras model) for the given input shapes."""
# Re-create our input and output layers inside our subgraph. They will be
# attached to the true computation when we clone our model in `tpu_fn`.
K.set_learning_phase(self.execution_mode == model_fn_lib.ModeKeys.TRAIN)
# functools.partial and callable objects are not supported by tpu.rewrite
def _model_fn():
"""Compute fit/eval/predict for the TPU."""
is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT
# During train/eval, we infeed our features as well as labels.
if is_training or is_test:
infeed_layers = self.model._input_layers + self.model._output_layers
else:
infeed_layers = self.model._input_layers
# Generate our infeed operation to read features & labels.
infeed_tensors = tpu_ops.infeed_dequeue_tuple(
dtypes=[spec.dtype for spec in input_specs],
shapes=[spec.shape for spec in input_specs],
name='infeed-%s' % self.execution_mode)
core_id, infeed_tensors = (
_read_tpu_coreid_from_infeed(
mode=self.execution_mode, infeed_tensors=infeed_tensors))
assert len(infeed_tensors) == len(infeed_layers), (
'Infeed inputs did not match model: %s vs %s' % (infeed_layers,
infeed_tensors))
tpu_targets = []
tpu_input_map = {}
# Sort infeed outputs into inputs and labels for calling our Keras model.
for tensor, layer in zip(infeed_tensors, infeed_layers):
if layer in self.model._input_layers:
tpu_input_map[layer.name] = tensor
if layer in self.model._output_layers:
tpu_targets.append(tensor)
# Clone our CPU model, running within the TPU device context.
#
# We use the id of the original model as a key to avoid weight collisions
# (if a user re-runs the same model multiple times, in e.g. Colab).
with TPURewriteContext(tpu_input_map):
with variable_scope.variable_scope('tpu_%s' % id(self.model)):
with keras_tpu_variables.replicated_scope(
self._tpu_assignment.num_towers):
if not self._cloned_optimizer:
self._cloned_optimizer = _clone_optimizer(
self.model.cpu_optimizer,
worker_name=self._tpu_assignment.worker_name)
self._cloned_model = models.clone_model(self.model)
# When running on more than one core, concatenate outputs at the end
# of processing. In backprop stage, the gradients will be
# calculated according to the local inputs as gradient of
# cross-replica-concat being zero for any outputs other than those
# from mlocal core so the loss calculation is identical.
num_towers = self.model._tpu_assignment.num_towers
if num_towers > 1 and (is_training or is_test):
new_outputs = [
_cross_replica_concat(
o, core_id, num_towers,
name='model output ({})'.format(o.name))
for o in self._cloned_model.outputs
]
# Recast all low precision outputs back to float32 since we only
# casted the inputs to bfloat16 and not targets. This is done so
# that we can preserve precision when calculating the loss value.
if new_outputs and new_outputs[0].dtype == dtypes.bfloat16:
new_outputs = [
math_ops.cast(o, dtypes.float32) for o in new_outputs]
self._cloned_model.outputs = new_outputs
tpu_targets = [
_cross_replica_concat(
tensor,
core_id,
num_towers,
name='model target ({})'.format(tensor.name))
for tensor in tpu_targets
]
if is_training or is_test:
with variable_scope.variable_scope(
'metrics', reuse=variable_scope.AUTO_REUSE):
self._cloned_model.compile(
optimizer=_replicated_optimizer(self._cloned_optimizer),
loss=self.model.loss,
loss_weights=self.model.loss_weights,
metrics=metrics_module.clone_metrics(
self.model._compile_metrics),
weighted_metrics=metrics_module.clone_metrics(
self.model._compile_weighted_metrics),
target_tensors=tpu_targets,
)
# Compute our outfeed depending on the execution mode
if is_training:
if not isinstance(self._cloned_optimizer, keras_optimizers.TFOptimizer):
# For Keras optimizer, we try to place the variable weights on the TPU
# device. Keras creates optimizer variables (e.g. momentum values for
# the Momentum optimizer) when _make_train_function is invoked.
with keras_tpu_variables.replicated_variable_for_optimizer(
self._tpu_assignment.num_towers):
self._cloned_model._make_fit_function()
else:
self._cloned_model._make_fit_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model._fit_function.outputs
]
return [
self._cloned_model._fit_function.updates_op,
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model._fit_function.outputs,
name='outfeed-enqueue-train')
]
elif is_test:
self._cloned_model._make_eval_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model._eval_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model._eval_function.outputs,
name='outfeed-enqueue-test')
]
elif is_predict:
self._cloned_model._make_predict_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model.predict_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model.predict_function.outputs,
name='outfeed-enqueue-predict',
)
]
else:
assert False, 'Unexpected execution mode: %s' % self.execution_mode
# Capture outfeed metadata computed during the rewrite.
self._outfeed_spec = None
# Generate out TPU operations using `tpu.split_compile_and_replicate`.
# `compile_op` can be used to test the TPU model compiles before execution.
# `execute op` replicates `_model_fn` `num_replicas` times, with each shard
# running on a different logical core.
compile_op, execute_op = tpu.split_compile_and_replicate(
_model_fn, inputs=[[] for _ in range(self._tpu_assignment.num_towers)])
# Generate CPU side operations to enqueue features/labels and dequeue
# outputs from the model call.
sized_infeed = infeed_manager.build_infeed_from_input_specs(
input_specs, self.execution_mode)
# Build output ops.
outfeed_op = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
outfeed_op.extend(
tpu_ops.outfeed_dequeue_tuple(
dtypes=[spec.dtype for spec in self._outfeed_spec],
shapes=[spec.shape for spec in self._outfeed_spec],
name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id),
device_ordinal=shard_id))
return TPUModelOp(
compile_op,
execute_op,
infeed_tensors=sized_infeed.sharded_infeed_tensors,
infeed_op=sized_infeed.infeed_ops,
outfeed_op=outfeed_op)
def _test_model_compiles(self, tpu_model_ops):
"""Verifies that the given TPUModelOp can be compiled via XLA."""
logging.info('Started compiling')
start_time = time.time()
result = K.get_session().run(tpu_model_ops.compile_op)
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
raise RuntimeError('Compilation failed: {}'.format(
proto.status_error_message))
end_time = time.time()
logging.info('Finished compiling. Time elapsed: %s secs',
end_time - start_time)
def _lookup_infeed_manager(self, inputs):
"""Return an existing manager, or construct a new InfeedManager for inputs.
_lookup_infeed_manager will return an existing InfeedManager if one has been
previously assigned for this model and input. If not, it will construct a
new TPUNumpyInfeedManager.
Args:
inputs: A NumPy input to the model.
Returns:
A `TPUInfeedManager` object to manage infeeds for this input.
"""
if inputs is None:
return None
for x, mgr in self.model._numpy_to_infeed_manager_list:
if inputs[0] is x:
return mgr
return TPUNumpyInfeedManager(self.model._tpu_assignment)
def _tpu_model_ops_for_input_specs(self, input_specs, infeed_manager):
"""Looks up the corresponding `TPUModelOp` for a given `input_specs`.
It instantiates a new copy of the model for each unique input shape.
Args:
input_specs: The specification of the inputs to train on.
infeed_manager: The infeed manager responsible for feeding in data.
Returns:
A `TPUModelOp` instance that can be used to execute a step of the model.
"""
if input_specs is None or infeed_manager is None:
# Note: this condition is possible during the prologue or epilogue of the
# pipelined loop.
return None
# XLA requires every operation in the graph has a fixed shape. To
# handle varying batch sizes we recompile a new sub-graph for each
# unique input shape.
shape_key = tuple([tuple(spec.shape.as_list()) for spec in input_specs])
if shape_key not in self._compilation_cache:
logging.info(
'New input shapes; (re-)compiling: mode=%s '
'(# of cores %d), %s', self.execution_mode,
self._tpu_assignment.num_towers, input_specs)
new_tpu_model_ops = self._specialize_model(input_specs,
infeed_manager)
self._compilation_cache[shape_key] = new_tpu_model_ops
self._test_model_compiles(new_tpu_model_ops)
return self._compilation_cache[shape_key]
def _construct_input_tensors_and_inputs(self, inputs):
"""Returns input tensors and numpy array inputs corresponding to `inputs`.
Args:
inputs: NumPy inputs.
Returns:
A tuple of `input_tensors`, and `inputs`.
"""
if inputs is None:
# Note: this condition is possible during the prologue or epilogue of the
# pipelined loop.
return None, None
if isinstance(inputs[-1], int):
# Remove the learning_phase flag at the end. We currently hard code the
# learning_phase in TPUFunction.
inputs = inputs[:-1]
if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or
self.execution_mode == model_fn_lib.ModeKeys.EVAL):
# Strip sample weight from inputs.
input_tensors = self.model._feed_inputs + self.model._feed_targets
else:
input_tensors = self.model._feed_inputs
inputs = inputs[:len(input_tensors)]
input_tensors, inputs = (
_inject_tpu_inputs_for_infeed(
self._tpu_assignment, self.execution_mode,
self._core_id_place_holder, input_tensors, inputs))
return input_tensors, inputs
def _process_outputs(self, outfeed_outputs):
"""Processes the outputs of a model function execution.
Args:
outfeed_outputs: The sharded outputs of the TPU computation.
Returns:
The aggregated outputs of the TPU computation to be used in the rest of
the model execution.
"""
# TODO(xiejw): Decide how to reduce outputs, or discard all but first.
if self.execution_mode == model_fn_lib.ModeKeys.PREDICT:
outputs = [[] for _ in range(len(self._outfeed_spec))]
outputs_per_replica = len(self._outfeed_spec)
for i in range(self._tpu_assignment.num_towers):
output_group = outfeed_outputs[i * outputs_per_replica:(i + 1) *
outputs_per_replica]
for j in range(outputs_per_replica):
outputs[j].append(output_group[j])
return [np.concatenate(group) for group in outputs]
else:
return outfeed_outputs[:len(outfeed_outputs) //
self._tpu_assignment.num_towers]
def __call__(self, inputs):
"""__call__ executes the function on the computational hardware.
It handles executing infeed, and preprocessing in addition to executing the
model on the TPU hardware.
Note: `__call__` has a sibling method `pipeline_run` which performs the same
operations, but with software pipelining.
Args:
inputs: The inputs to use to train.
Returns:
The output of the computation for the given mode it is executed in.
Raises:
RuntimeError: If there is an inappropriate use of the function.
"""
assert isinstance(inputs, list)
infeed_manager = self._lookup_infeed_manager(inputs)
input_tensors, inputs = self._construct_input_tensors_and_inputs(inputs)
infeed_instance = infeed_manager.make_infeed_instance(inputs)
del inputs # To avoid accident usage.
input_specs = infeed_instance.make_input_specs(input_tensors)
tpu_model_ops = self._tpu_model_ops_for_input_specs(input_specs,
infeed_manager)
infeed_dict = infeed_instance.make_feed_dict(tpu_model_ops)
# Initialize our TPU weights on the first compile.
self.model._initialize_weights(self._cloned_model)
_, _, outfeed_outputs = K.get_session().run([
tpu_model_ops.infeed_op, tpu_model_ops.execute_op,
tpu_model_ops.outfeed_op
], infeed_dict)
return self._process_outputs(outfeed_outputs)
def pipeline_run(self, cur_step_inputs, next_step_inputs):
"""pipeline_run executes the function on the computational hardware.
pipeline_run performs the same computation as __call__, however it runs the
infeed in a software pipelined fashion compared to the on-device execution.
Note: it is the responsibility of the caller to call `pipeline_run` in the
following sequence:
- Once with `cur_step_inputs=None` and `next_step_inputs=list(...)`
- `n` times with `cur_step_inputs` and `next_step_inputs` as `list`s
- Once with `cur_step_inputs=list(...)` and `next_step_inputs=None`
Additionally, it is the responsibility of the caller to pass
`next_step_inputs` as `cur_step_inputs` on the next invocation of
`pipeline_run`.
Args:
cur_step_inputs: The current step's inputs.
next_step_inputs: The next step's inputs.
Returns:
The output of the computation for the given mode it is executed in.
Raises:
RuntimeError: If there is an inappropriate use of the function.
"""
# Software pipelined case.
next_step_infeed_manager = self._lookup_infeed_manager(next_step_inputs)
cur_step_infeed_manager = self._lookup_infeed_manager(cur_step_inputs)
if (next_step_infeed_manager is not None and
cur_step_infeed_manager is not None):
assert type(next_step_infeed_manager) is type(cur_step_infeed_manager)
next_input_tensors, next_step_inputs = (
self._construct_input_tensors_and_inputs(next_step_inputs))
cur_input_tensors, cur_step_inputs = (
self._construct_input_tensors_and_inputs(cur_step_inputs))
cur_infeed_instance = None
if cur_step_infeed_manager:
cur_infeed_instance = cur_step_infeed_manager.make_infeed_instance(
cur_step_inputs)
next_infeed_instance = None
if next_step_infeed_manager:
next_infeed_instance = next_step_infeed_manager.make_infeed_instance(
next_step_inputs)
del cur_step_inputs # Avoid accidental re-use.
del next_step_inputs # Avoid accidental re-use.
cur_tpu_model_ops = None
next_tpu_model_ops = None
infeed_dict = None
if cur_infeed_instance and cur_input_tensors and cur_step_infeed_manager:
cur_input_specs = cur_infeed_instance.make_input_specs(cur_input_tensors)
cur_tpu_model_ops = self._tpu_model_ops_for_input_specs(
cur_input_specs, cur_step_infeed_manager)
if (next_infeed_instance and next_input_tensors and
next_step_infeed_manager):
next_input_specs = next_infeed_instance.make_input_specs(
next_input_tensors)
next_tpu_model_ops = self._tpu_model_ops_for_input_specs(
next_input_specs, next_step_infeed_manager)
infeed_dict = next_infeed_instance.make_feed_dict(next_tpu_model_ops)
# Initialize our TPU weights on the first compile.
self.model._initialize_weights(self._cloned_model)
if next_tpu_model_ops and cur_tpu_model_ops:
_, _, outfeed_outputs = K.get_session().run([
next_tpu_model_ops.infeed_op, cur_tpu_model_ops.execute_op,
cur_tpu_model_ops.outfeed_op
], infeed_dict)
return self._process_outputs(outfeed_outputs)
if cur_tpu_model_ops:
_, outfeed_outputs = K.get_session().run(
[cur_tpu_model_ops.execute_op, cur_tpu_model_ops.outfeed_op])
return self._process_outputs(outfeed_outputs)
if next_tpu_model_ops:
K.get_session().run(next_tpu_model_ops.infeed_op, infeed_dict)
return None
raise RuntimeError('Internal error: both current & next tpu_model_ops '
'were None')
class KerasTPUModel(models.Model):
"""TPU compatible Keras model wrapper."""
def __init__(self, cpu_model, strategy):
super(models.Model, self).__init__( # pylint: disable=bad-super-call
inputs=cpu_model.inputs,
outputs=cpu_model.outputs,
name=cpu_model.name,
)
# Create a mapping from numpy arrays to infeed managers.
# Note: uses a list of tuples instead of a map because numpy arrays are
# not hashable.
self._numpy_to_infeed_manager_list = []
# Add distribution specific arguments since we don't call the Model init.
self._distribution_strategy = None
self._compile_distribution = None
self.predict_function = None
self.test_function = None
self.train_function = None
self._fit_function = None
self._eval_function = None
self._stateful_metric_functions = []
cluster_resolver = strategy._tpu_cluster_resolver
self._tpu_name_or_address = cluster_resolver.get_master()
self._cpu_model = cpu_model
self._tpu_assignment = strategy._make_assignment_for_model(cpu_model)
self._tpu_model = None
self._tpu_weights_initialized = False
# If the input CPU model has already been compiled, compile our TPU model
# immediately.
if self._cpu_model.optimizer:
self.compile(
self._cpu_model.optimizer,
self._cpu_model.loss,
self._cpu_model._compile_metrics,
self._cpu_model.loss_weights,
self._cpu_model.sample_weight_mode,
self._cpu_model._compile_weighted_metrics,
self._cpu_model.target_tensors,
)
# This flag must be disabled upon model mutation, such as changing the model
# layers or recompiling the model to use a different optimizer. New function
# definitions are generated whenever this flag is disabled, ensuring that
# internal graph functions are always using the current model structure.
#
# Requires declaration here because this constructor skips the
# Model constructor.
self._built_graph_functions = False
def get_config(self):
return {
'cpu_model': self._cpu_model,
'tpu_name_or_address': self._tpu_name_or_address,
'tpu_assignment': self._tpu_assignment,
}
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
if sample_weight_mode:
raise ValueError('sample_weight_mode not supported for TPU execution.')
if weighted_metrics:
raise ValueError('weighted_metrics not supported for TPU execution.')
if target_tensors:
raise ValueError('target_tensors is not supported for TPU execution.')
self._cpu_model.compile(
_clone_optimizer(optimizer), loss,
metrics_module.clone_metrics(metrics), loss_weights, sample_weight_mode,
metrics_module.clone_metrics(weighted_metrics), target_tensors,
**kwargs)
super(KerasTPUModel, self).compile(optimizer, loss, metrics, loss_weights,
sample_weight_mode, weighted_metrics,
target_tensors, **kwargs)
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
**kwargs):
if context.executing_eagerly():
raise EnvironmentError('KerasTPUModel currently does not support eager '
'mode.')
with _tpu_session_context():
assert not self._numpy_to_infeed_manager_list # Ensure empty.
infeed_managers = [] # Managers to clean up at the end of the fit call.
if isinstance(x, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(x):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
dataset = x()
if steps_per_epoch is None:
raise ValueError('When using tf.data as input to a model, you '
'should specify the steps_per_epoch argument.')
if y is not None:
raise ValueError('When using tf.data as input to a model, y must '
'be None')
infeed_manager = TPUDatasetInfeedManager(
dataset, self._tpu_assignment, model_fn_lib.ModeKeys.TRAIN)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
x = infeed_manager.dummy_x
y = infeed_manager.dummy_y
infeed_managers.append((x, infeed_manager))
if isinstance(validation_data, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(validation_data):
dataset = validation_data()
if validation_steps is None:
raise ValueError('When using tf.data as validation for a model, you '
'should specify the validation_steps argument.')
infeed_manager = TPUDatasetInfeedManager(dataset, self._tpu_assignment,
model_fn_lib.ModeKeys.EVAL)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
val_x = infeed_manager.dummy_x
val_y = infeed_manager.dummy_y
infeed_managers.append((val_x, infeed_manager))
validation_data = (val_x, val_y)
self._numpy_to_infeed_manager_list = infeed_managers
try:
pipeline = kwargs.get('_pipeline', True)
if '_pipeline' in kwargs:
kwargs.pop('_pipeline')
if not pipeline:
logging.info('Running non-pipelined training loop (`_pipeline=%s`).',
pipeline)
return super(KerasTPUModel, self).fit(
x, y, batch_size, epochs, verbose, callbacks, validation_split,
validation_data, shuffle, class_weight, sample_weight,
initial_epoch, steps_per_epoch, validation_steps, **kwargs)
return self._pipeline_fit(x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle,
class_weight, sample_weight, initial_epoch,
steps_per_epoch, validation_steps, **kwargs)
finally:
self._numpy_to_infeed_manager_list = []
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
original_numpy_to_infeed_manager_list = []
if self._numpy_to_infeed_manager_list:
# evaluate call may be executed as callbacks during the training. In this
# case, _numpy_to_infeed_manager_list is not empty, so save it for
# recovery at the end of evaluate call.
original_numpy_to_infeed_manager_list = self._numpy_to_infeed_manager_list
self._numpy_to_infeed_manager_list = []
with _tpu_session_context():
# Managers to clean up at the end of the evaluate call.
infeed_managers = []
if isinstance(x, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(x):
dataset = x()
if steps is None:
raise ValueError('When using tf.data as input to a model, you '
'should specify the steps argument.')
if y is not None:
raise ValueError('When using tf.data as input to a model, y must be '
'None')
infeed_manager = TPUDatasetInfeedManager(dataset, self._tpu_assignment,
model_fn_lib.ModeKeys.EVAL)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
x = infeed_manager.dummy_x
y = infeed_manager.dummy_y
infeed_managers.append((x, infeed_manager))
self._numpy_to_infeed_manager_list = infeed_managers
try:
return super(KerasTPUModel, self).evaluate(x, y, batch_size, verbose,
sample_weight, steps)
finally:
self._numpy_to_infeed_manager_list = (
original_numpy_to_infeed_manager_list)
def _pipeline_fit(self, x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle, class_weight,
sample_weight, initial_epoch, steps_per_epoch,
validation_steps, **kwargs):
# Similar to super.fit(...), but modified to support software pipelining.
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning('The `nb_epoch` argument in `fit` has been renamed '
'`epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate and standardize user data
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split)
# Prepare validation data
x, y, val_x, val_y, val_sample_weights = self._prepare_validation_data(
validation_data, validation_split, validation_steps, x, y,
sample_weights, batch_size)
return self._pipeline_fit_loop(
x,
y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def _pipeline_fit_loop(self,
inputs,
targets,
sample_weights,
batch_size,
epochs,
verbose,
callbacks,
val_inputs,
val_targets,
val_sample_weights,
shuffle,
initial_epoch,
steps_per_epoch,
validation_steps):
self._make_train_function()
sample_weights = sample_weights or []
val_sample_weights = val_sample_weights or []
if not isinstance(K.learning_phase(), int):
ins = inputs + targets + sample_weights + [1]
else:
ins = inputs + targets + sample_weights
do_validation = False
if val_inputs:
do_validation = True
if (steps_per_epoch is None and verbose and inputs and
hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(inputs[0].shape[0], val_inputs[0].shape[0]))
if validation_steps:
do_validation = True
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` when doing step-wise '
'training, i.e. `steps_per_epoch` must be set.')
num_training_samples = training_utils.check_num_samples(
ins, batch_size, steps_per_epoch, 'steps_per_epoch')
count_mode = 'steps' if steps_per_epoch else 'samples'
callbacks = cbks.configure_callbacks(
callbacks,
self,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=num_training_samples,
verbose=verbose,
count_mode=count_mode)
if num_training_samples is not None:
index_array = np.arange(num_training_samples)
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = self._feed_inputs + self._feed_targets + self._feed_sample_weights
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
# Reset stateful metrics
for m in self.metrics:
m.reset_states()
# Update callbacks
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
# Step-wise fit loop.
self._pipeline_fit_loop_step_wise(
ins=ins,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
do_validation=do_validation,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
validation_steps=validation_steps,
epoch_logs=epoch_logs)
else:
# Sample-wise fit loop.
self._pipeline_fit_loop_sample_wise(
ins=ins,
callbacks=callbacks,
index_array=index_array,
shuffle=shuffle,
batch_size=batch_size,
num_training_samples=num_training_samples,
indices_for_conversion_to_dense=indices_for_conversion_to_dense,
do_validation=do_validation,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
validation_steps=validation_steps,
epoch_logs=epoch_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks.on_train_end()
return self.history
def _pipeline_fit_loop_sample_wise(self,
ins,
callbacks,
index_array,
shuffle,
batch_size,
num_training_samples,
indices_for_conversion_to_dense,
do_validation,
val_inputs,
val_targets,
val_sample_weights,
validation_steps,
epoch_logs):
f = self.train_function
if shuffle == 'batch':
index_array = training_utils.batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_training_samples, batch_size)
ins_last_batch = None
last_batch_logs = None
batch_index = 0
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. If using HDF5 '
'input data, pass shuffle="batch".')
# Pipeline batch logs
next_batch_logs = {}
next_batch_logs['batch'] = batch_index
next_batch_logs['size'] = len(batch_ids)
if batch_index > 0:
# Callbacks operate one step behind in software pipeline.
callbacks.on_batch_begin(batch_index - 1, last_batch_logs)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
outs = f.pipeline_run(
cur_step_inputs=ins_last_batch, next_step_inputs=ins_batch)
ins_last_batch = ins_batch
if batch_index == 0:
assert outs is None
else:
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
last_batch_logs[l] = o # pylint: disable=unsupported-assignment-operation
callbacks.on_batch_end(batch_index - 1, last_batch_logs)
if callbacks.model.stop_training:
return
last_batch_logs = next_batch_logs
# Final batch
callbacks.on_batch_begin(batch_index, last_batch_logs)
outs = f.pipeline_run(cur_step_inputs=ins_last_batch, next_step_inputs=None)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
last_batch_logs[l] = o
callbacks.on_batch_end(batch_index, last_batch_logs)
if callbacks.model.stop_training:
return
if do_validation:
val_outs = training_arrays.test_loop(
self,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
batch_size=batch_size,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(self.metrics_names, val_outs):
epoch_logs['val_' + l] = o
def _pipeline_fit_loop_step_wise(self,
ins,
callbacks,
steps_per_epoch,
epochs,
do_validation,
val_inputs,
val_targets,
val_sample_weights,
validation_steps,
epoch_logs):
f = self.train_function
# Loop prologue
try:
outs = f.pipeline_run(cur_step_inputs=None, next_step_inputs=ins)
assert outs is None # Function shouldn't return anything!
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data on the first step '
'of the epoch, preventing further training. Check to '
'make sure your paths are correct and you have '
'permissions to read the files. Skipping validation')
for step_index in range(steps_per_epoch):
batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
try:
if step_index < steps_per_epoch - 1:
next_step_inputs = ins
else:
next_step_inputs = None
outs = f.pipeline_run(
cur_step_inputs=ins, next_step_inputs=next_step_inputs)
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your '
'dataset can generate at least `steps_per_batch * '
'epochs` batches (in this case, %d batches). You '
'may need to use the repeat() function when '
'building your dataset.' % steps_per_epoch * epochs)
break
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callbacks.model.stop_training:
break
if do_validation:
val_outs = training_arrays.test_loop(
self,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(self.metrics_names, val_outs):
epoch_logs['val_' + l] = o
def _prepare_validation_data(self, validation_data, validation_split,
validation_steps, x, y, sample_weights,
batch_size):
"""Prepares the validation dataset.
Args:
validation_data: The validation data (if provided)
validation_split: The validation split (if provided)
validation_steps: The validation steps (if provided)
x: The main training data x (if provided)
y: The main training data y (if provided)
sample_weights: The sample weights (if provided)
batch_size: The training batch size (if provided)
Returns:
A 5-tuple of (x, y, val_x, val_y, val_sample_weights).
Raises:
ValueError: If the provided arguments are not compatible with
`KerasTPUModel`.
"""
# Note: this is similar to a section of $tf/python/keras/engine/training.py
# It differns in that tf.data objects are not allowed to be passed directly.
# Additionally, it handles validating shapes & types appropriately for use
# in TPUs.
if validation_data:
if (isinstance(validation_data, iterator_ops.Iterator) or
isinstance(validation_data, iterator_ops.EagerIterator) or
isinstance(validation_data, dataset_ops.DatasetV2)):
raise ValueError('KerasTPUModel cannot handle a Dataset or Iterator '
'for validation_data. Please instead pass a function '
'that returns a `tf.data.Dataset`.')
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError('When passing a `validation_data` argument, it must '
'contain either 2 items (x_val, y_val), or 3 items '
'(x_val, y_val, val_sample_weights). However we '
'received `validation_data=%s`' % validation_data)
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size,
steps=validation_steps)
elif validation_split and 0. < validation_split < 1.:
if training_utils.has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, you '
'cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (
slice_arrays(sample_weights, 0, split_at),
slice_arrays(sample_weights, split_at)
)
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
return x, y, val_x, val_y, val_sample_weights
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
with _tpu_session_context():
return super(KerasTPUModel, self).predict(
x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
@property
def optimizer(self):
if self._tpu_model:
return self._tpu_model.optimizer
return self._cpu_model.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self._optimizer = optimizer
@property
def metrics(self):
if self._tpu_model:
return self._tpu_model.metrics
return self._stateful_metric_functions
@metrics.setter
def metrics(self, metrics):
self._stateful_metric_functions = metrics
def _make_train_function(self):
if not self.train_function:
self.train_function = TPUFunction(
self,
model_fn_lib.ModeKeys.TRAIN,
tpu_assignment=self._tpu_assignment)
return self.train_function
def _make_test_function(self):
if not self.test_function:
self.test_function = TPUFunction(
self, model_fn_lib.ModeKeys.EVAL, tpu_assignment=self._tpu_assignment)
return self.test_function
def _make_fit_function(self):
if not self._fit_function:
self._fit_function = TPUFunction(
self,
model_fn_lib.ModeKeys.TRAIN,
tpu_assignment=self._tpu_assignment)
return self._fit_function
def _make_eval_function(self):
if not self._eval_function:
self._eval_function = TPUFunction(
self, model_fn_lib.ModeKeys.EVAL, tpu_assignment=self._tpu_assignment)
return self._eval_function
def _make_predict_function(self):
if not self.predict_function:
self.predict_function = TPUFunction(
self,
model_fn_lib.ModeKeys.PREDICT,
tpu_assignment=self._tpu_assignment)
return self.predict_function
def _initialize_weights(self, cloned_model):
"""Initialize TPU weights.
This is called on the first compile of the TPU model (first call to
fit/predict/evaluate).
Args:
cloned_model: `keras.Model`, TPU model to initialize.
"""
if self._tpu_weights_initialized:
return
self._tpu_model = cloned_model
self._tpu_weights_initialized = True
weights = self._cpu_model.get_weights()
if isinstance(self.cpu_optimizer, keras_optimizers.TFOptimizer):
cpu_optimizer_config = {}
else:
cpu_optimizer_config = self.cpu_optimizer.get_config()
logging.info('Setting weights on TPU model.')
cloned_model.set_weights(weights)
if self._tpu_model.optimizer is None:
# tpu_model may not be compiled, e.g., loading weights and then predict.
return
for k, v in six.iteritems(cpu_optimizer_config):
if k == 'name':
continue
opt_var = getattr(self._tpu_model.optimizer, k)
if isinstance(opt_var, variables.Variable):
logging.info('CPU -> TPU %s: %s {%s}', k, v, K.get_value(opt_var))
K.get_session().run(opt_var.assign(v))
else:
logging.warning('Cannot update non-variable config: %s', k)
@property
def cpu_optimizer(self):
return self._cpu_model.optimizer
def sync_to_cpu(self):
"""Copy weights from the CPU, returning a synchronized CPU model."""
if not self._tpu_weights_initialized:
return self._cpu_model
logging.info('Copying TPU weights to the CPU')
tpu_weights = self._tpu_model.get_weights()
# TFOptimizers have no configurable options
if isinstance(self.cpu_optimizer, keras_optimizers.TFOptimizer):
tpu_optimizer_config = {}
else:
tpu_optimizer_config = self._tpu_model.optimizer.get_config()
self._cpu_model.set_weights(tpu_weights)
for k, v in six.iteritems(tpu_optimizer_config):
logging.info('TPU -> CPU %s: %s', k, v)
if k == 'name':
continue
opt_var = getattr(self.cpu_optimizer, k)
if isinstance(opt_var, variables.Variable):
K.get_session().run(opt_var.assign(v))
else:
logging.warning('Cannot update non-variable config: %s', k)
return self._cpu_model
def get_weights(self):
return self.sync_to_cpu().get_weights()
def save_weights(self, *args, **kw):
return self.sync_to_cpu().save_weights(*args, **kw)
def save(self, *args, **kw):
return self.sync_to_cpu().save(*args, **kw)
def set_weights(self, weights):
# We may not have a TPU model available if we haven't run fit/predict, so
# we can't directly set the TPU weights here.
# Instead, reset CPU model weights and force TPU re-initialization at the
# next call.
self._cpu_model.set_weights(weights)
self._tpu_weights_initialized = False
def load_weights(self, filepath, by_name=False):
self._cpu_model.load_weights(filepath, by_name)
self._tpu_weights_initialized = False
# pylint: disable=bad-continuation
def _validate_shapes(model):
"""Validate that all layers in `model` have constant shape."""
for layer in model.layers:
if isinstance(layer.input_shape, tuple):
input_shapes = [layer.input_shape]
else:
input_shapes = layer.input_shape
if isinstance(layer.output_shape, tuple):
output_shapes = [layer.output_shape]
else:
output_shapes = layer.output_shape
for shape in input_shapes + output_shapes:
for dim in shape[1:]:
if dim is None:
raise ValueError(
"""
Layer %(layer)s has a variable shape in a non-batch dimension. TPU models must
have constant shapes for all operations.
You may have to specify `input_length` for RNN/TimeDistributed layers.
Layer: %(layer)s
Input shape: %(input_shape)s
Output shape: %(output_shape)s
""" % {
'layer': layer,
'input_shape': layer.input_shape,
'output_shape': layer.output_shape
})
# pylint: enable=bad-continuation
@experimental
def tpu_model(model, strategy=None):
"""Copy `model` along with weights to the TPU.
Returns a TPU model.
Usage:
```
a = Input(shape=(32,))
b = Dense(32)(a)
model = Model(inputs=a, outputs=b)
# If `num_cores_per_host` is greater than one, batch parallelism will be used
# to run on multiple TPU cores.
strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
model = keras_support.tpu_model(model, strategy)
model.compile(
optimizer=tf.train.GradientDescentOptimizer(learning_rate=1.0),
...)
```
Args:
model: A `tf.keras.Model` instance.
strategy: `TPUDistributionStrategy`. The strategy to use for replicating
model across multiple TPU cores.
Returns:
A new `KerasTPUModel` instance.
"""
_validate_shapes(model)
# TODO(xiejw): Validate TPU model. TPUModel only?
# TODO(xiejw): Validate replicas. Full or 1. Shall we allow subset?
# TODO(xiejw): Adds reduction option.
if strategy is None:
strategy = TPUDistributionStrategy()
else:
if not isinstance(strategy, TPUDistributionStrategy):
raise TypeError(
'`strategy` must have type `tf.contrib.tpu.TPUDistributionStrategy`. '
'Got: {}'.format(type(strategy)))
# If the model has already been initialized, grab the optimizer configuration
# and model weights before entering the TPU session.
if model.optimizer:
if (isinstance(model.optimizer, keras_optimizers.Optimizer) and not
isinstance(model.optimizer, keras_optimizers.TFOptimizer)):
optimizer_config = model.optimizer.get_config()
else:
optimizer_config = None
model_weights = model.get_weights()
else:
model_weights = None
setup_tpu_session(strategy._tpu_cluster_resolver)
# Force initialization of the CPU model in the TPU session.
cpu_model = models.clone_model(model)
if model.optimizer:
cpu_model.compile(
_clone_optimizer(model.optimizer, optimizer_config),
model.loss,
metrics_module.clone_metrics(model._compile_metrics),
model.loss_weights,
model.sample_weight_mode,
metrics_module.clone_metrics(model._compile_weighted_metrics),
)
if model_weights:
cpu_model.set_weights(model_weights)
cpu_model.reset_states()
return KerasTPUModel(cpu_model=cpu_model, strategy=strategy)
|
#!/usr/bin/python
import numpy as np
import math
import sys
import os
import random
from PIL import Image
def overlapDistances(refPatch, patches):
'''
This function computes the distance of refPatch to all patches in patches, returning a 1D array of all distances.
Returns 1-D array of distances over all patches.
'''
# find refPatch area of each sample patch
ov = patches[:, :refPatch.shape[0], :refPatch.shape[1], :]
# find distances of refPatch area of sample patches from refPatch itself
numPatches = patches.shape[0]
distances = ov - np.tile(refPatch, (numPatches,1,1,1))
# calculate L2 norm and sum over all reference patch pixels
# TODO #1: parallelize
distances = np.sqrt(np.sum(np.square(distances), axis=3))
distances = np.sum(distances, axis=1)
distances = np.sum(distances, axis=1)
return distances
def makePatches(img, patchSize):
'''
This function takes in an img with size img.shape and a patch size patchSize, returns an array of shape
(patchSize, patchSize, #num of patches), so (:,:,idx) returns the idx'th patch
'''
#check that img should have channel axis, so (x,y,channel)
assert img.ndim == 3, "image should have channel axis"
print "Making patches..."
nX = img.shape[0] - patchSize
nY = img.shape[1] - patchSize
nChannels = img.shape[2]
patches = np.zeros((nX*nY, patchSize, patchSize, nChannels), img.dtype)
#iterate through all patches from img and store in patches
k = 0
for i in range(nX):
for j in range(nY):
x,X = i, i+patchSize
y,Y = j, j+patchSize
patches[k,:,:,:] = img[x:X,y:Y,:]
k += 1
return patches
def getMatchingPatch(distances, thresholdFactor):
'''
Given a 1-D array of patch distances, choose matching patch index that is within threshold.
'''
d = distances
# do not select current patch
d[d < np.finfo(d.dtype).eps] = 99999
m = np.min(d)
# choose random index such that the distance is within threshold factor of minimum distance
# TODO: make default thresholdFactor
threshold = thresholdFactor * m
indices = np.where(d < threshold)[0]
idx = indices[np.random.randint(0,len(indices))]
return idx
def insert(target, patch, i, j):
'''
This function inserts a patch into img at position (i,j).
'''
patchSize = patch.shape[0]
patchV = min(i+patchSize, target.shape[0]) - i
patchH = min(j+patchSize, target.shape[1]) - j
target[i:min(i+patchSize, target.shape[0]), j:min(j+patchSize, target.shape[1]), :] = patch[:patchV, :patchH, :]
def makeCostMap(img1, img2):
'''
This function takes in 2 overlapping image regions, computes pixel-wise L2 norm and returns cost map.
'''
return np.sqrt(np.sum(np.square(img1-img2), axis=2))
def calcMinCosts(costMap):
'''
DP this shit, yo
'''
cumuCosts = np.ones(costMap.shape)
x = costMap.shape[1]
y = costMap.shape[0]
cumuCosts[:] = costMap[:]
for i in range(y - 1):
for j in range(x):
if j == 0:
c = cumuCosts[i, 0:2]
elif j == x - 1:
c = cumuCosts[i, x - 2:x]
else:
c = cumuCosts[i, j - 1:j + 2]
cumuCosts[i + 1,j] += np.min(c)
return cumuCosts
def pathBacktrace(cumuCosts):
'''
trace DP shit backwards, yo
'''
x = cumuCosts.shape[1]
y = cumuCosts.shape[0]
pathCosts = np.zeros(cumuCosts.shape)
minIdx = 0
maxIdx = x - 1
for row in range(y - 1, -1, -1):
i = np.argmin(cumuCosts[row, minIdx:maxIdx + 1])
pathCosts[row, i] = 1
minIdx = np.max([0, i - 1])
maxIdx = np.min([x - 1, i + 1])
return pathCosts
def cheapVertPath(costMap):
costs = calcMinCosts(costMap)
path = pathBacktrace(costs)
return path
def cheapVertCut(costMap):
'''
Generate binary mask
'''
path = cheapVertPath(costMap)
for row in range(path.shape[0]):
path[row, 0:np.argmax(path[row, :])] = 1
return path
def cheapHorizCut(costMap):
path = cheapVertCut(costMap.T).T
return path
if __name__ == "__main__":
print "Starting..."
# read in original image using Python Image Library (PIL)
orig_img = Image.open("text.png")
(width, height) = orig_img.size
print width, height
# extract list of pixels in RGB/grayscale format
pixels = list(orig_img.getdata())
sample_2d = np.array(pixels, np.int32)
sample_2d = sample_2d.reshape((height,-1,3))
# ensure that img is either an RGB or grayscale image
assert sample_2d.ndim == 3 and (sample_2d.shape[2] == 3 or sample_2d.shape[2] == 1), sample_2d.shape
# choose patch from input sample by slicing
patchSize = 30
sl = (slice(0,patchSize), slice(0,patchSize), slice(0,3))
# TODO: randomly select initial patch
initialPatch = sample_2d[sl[0], sl[1], sl[2]]
# define textureSize, tileSize and initialize blank canvas
textureSize = (width * 2, height * 2)
overlap = patchSize / 6
tileSize = patchSize - overlap
texture = np.zeros((textureSize[1], textureSize[0], 3), dtype=np.float32)
# generate all sample patches
patches = makePatches(sample_2d, patchSize)
N = int(math.ceil(textureSize[0]/float(tileSize)))
M = int(math.ceil(textureSize[1]/float(tileSize)))
k = -1
for i in range(M): # height M
for j in range(N): # width N
k += 1
print "On iteration %i" % k
# insert default initial top-left patch
if k == 0:
insert(texture, initialPatch, i, j)
continue
blockLeft = j>0
blockUp = i>0
# find reference patchs and calculate overlap distances over all sample patches
if blockLeft:
refPatchLeft = texture[i*tileSize:min(i*tileSize + patchSize, textureSize[1]),
j*tileSize:min(j*tileSize + overlap, textureSize[0]), :]
distLeft = overlapDistances(refPatchLeft, patches)
d = distLeft
if blockUp:
refPatchUp = texture[i*tileSize:min(i*tileSize + overlap, textureSize[1]),
j*tileSize:min(j*tileSize + patchSize, textureSize[0]), :]
distUp = overlapDistances(refPatchUp, patches)
d = distUp
if blockLeft and blockUp:
refPatchBoth = texture[i*tileSize:min(i*tileSize + overlap, textureSize[1]),
j*tileSize:min(j*tileSize + overlap, textureSize[0]), :]
distBoth = overlapDistances(refPatchBoth, patches)
d = distLeft + distUp - distBoth
# finds appropriate random patch
chosenIdx = getMatchingPatch(d, 1.1)
chosenPatch = patches[chosenIdx, :, :, :]
if blockLeft:
costMap = makeCostMap(refPatchLeft, chosenPatch[:refPatchLeft.shape[0], :overlap, :])
pathMaskLeft = cheapVertCut(costMap)
overlapLeft = np.where(np.dstack([pathMaskLeft] * 3), refPatchLeft, chosenPatch[:refPatchLeft.shape[0], :overlap, :])
# overwrite with min cut
chosenPatch[:refPatchLeft.shape[0],:overlap,:] = overlapLeft
if blockUp:
# chosenSize = min(j*tileSize + patchSize, textureSize[0]) - j*tileSize
# TODO: stupid solution; find better one
costMap = makeCostMap(refPatchUp, chosenPatch[:overlap, :refPatchUp.shape[1], :])
pathMaskUp = cheapHorizCut(costMap)
overlapUp = np.where(np.dstack([pathMaskUp] * 3), refPatchUp, chosenPatch[:overlap, :refPatchUp.shape[1], :])
# overwrite with min cut
chosenPatch[:overlap,:refPatchUp.shape[1],:] = overlapUp
if blockLeft and blockUp:
pathMaskBoth = np.zeros((refPatchUp.shape[0], refPatchLeft.shape[1]))
for p in range(refPatchUp.shape[0]):
for q in range(refPatchLeft.shape[1]):
pathMaskBoth[p][q] = 1 - ((1-pathMaskUp[p][q]) * (1-pathMaskLeft[p][q]))
# pathMaskBoth[p][q] = pathMaskUp[p][q] | pathMaskLeft[p][q]
pathMaskLeft[:pathMaskBoth.shape[0],:] = pathMaskBoth
pathMaskUp[:,:pathMaskBoth.shape[1]] = pathMaskBoth
overlapBothLeft = np.where(np.dstack([pathMaskLeft] * 3), refPatchLeft, chosenPatch[:refPatchLeft.shape[0], :overlap, :])
overlapBothUp = np.where(np.dstack([pathMaskUp] * 3), refPatchUp, chosenPatch[:overlap, :refPatchUp.shape[1], :])
# overwrite with min cut
chosenPatch[:refPatchLeft.shape[0],:overlap,:] = overlapBothLeft
chosenPatch[:overlap,:refPatchUp.shape[1],:] = overlapBothUp
insert(texture, chosenPatch, i*tileSize, j*tileSize)
# convert texture into flattened array pixels_out for exporting as PNG
pixels_out = np.reshape(texture, (textureSize[0] * textureSize[1], 3), order='C')
pixels_out = map(lambda x: (x[0],x[1],x[2]), pixels_out)
img_out = Image.new(orig_img.mode, textureSize)
img_out.putdata(pixels_out)
img_out.save("text_generated_30.png", "png")
img_out.show()
print "donedonedone!"
removed some hard coding in driver_serial.py
#!/usr/bin/python
import numpy as np
import math
import sys
import os
import random
from PIL import Image
def overlapDistances(refPatch, patches):
'''
This function computes the distance of refPatch to all patches in patches, returning a 1D array of all distances.
Returns 1-D array of distances over all patches.
'''
# find refPatch area of each sample patch
ov = patches[:, :refPatch.shape[0], :refPatch.shape[1], :]
# find distances of refPatch area of sample patches from refPatch itself
numPatches = patches.shape[0]
distances = ov - np.tile(refPatch, (numPatches,1,1,1))
# calculate L2 norm and sum over all reference patch pixels
# TODO #1: parallelize
distances = np.sqrt(np.sum(np.square(distances), axis=3))
distances = np.sum(distances, axis=1)
distances = np.sum(distances, axis=1)
return distances
def makePatches(img, patchSize):
'''
This function takes in an img with size img.shape and a patch size patchSize, returns an array of shape
(patchSize, patchSize, #num of patches), so (:,:,idx) returns the idx'th patch
'''
#check that img should have channel axis, so (x,y,channel)
assert img.ndim == 3, "image should have channel axis"
print "Making patches..."
nX = img.shape[0] - patchSize
nY = img.shape[1] - patchSize
nChannels = img.shape[2]
patches = np.zeros((nX*nY, patchSize, patchSize, nChannels), img.dtype)
#iterate through all patches from img and store in patches
k = 0
for i in range(nX):
for j in range(nY):
x,X = i, i+patchSize
y,Y = j, j+patchSize
patches[k,:,:,:] = img[x:X,y:Y,:]
k += 1
return patches
def getMatchingPatch(distances, thresholdFactor):
'''
Given a 1-D array of patch distances, choose matching patch index that is within threshold.
'''
d = distances
# do not select current patch
d[d < np.finfo(d.dtype).eps] = 99999
m = np.min(d)
# choose random index such that the distance is within threshold factor of minimum distance
# TODO: make default thresholdFactor
threshold = thresholdFactor * m
indices = np.where(d < threshold)[0]
idx = indices[np.random.randint(0,len(indices))]
return idx
def insert(target, patch, i, j):
'''
This function inserts a patch into img at position (i,j).
'''
patchSize = patch.shape[0]
patchV = min(i+patchSize, target.shape[0]) - i
patchH = min(j+patchSize, target.shape[1]) - j
target[i:min(i+patchSize, target.shape[0]), j:min(j+patchSize, target.shape[1]), :] = patch[:patchV, :patchH, :]
def makeCostMap(img1, img2):
'''
This function takes in 2 overlapping image regions, computes pixel-wise L2 norm and returns cost map.
'''
return np.sqrt(np.sum(np.square(img1-img2), axis=2))
def calcMinCosts(costMap):
'''
DP this shit, yo
'''
cumuCosts = np.ones(costMap.shape)
x = costMap.shape[1]
y = costMap.shape[0]
cumuCosts[:] = costMap[:]
for i in range(y - 1):
for j in range(x):
if j == 0:
c = cumuCosts[i, 0:2]
elif j == x - 1:
c = cumuCosts[i, x - 2:x]
else:
c = cumuCosts[i, j - 1:j + 2]
cumuCosts[i + 1,j] += np.min(c)
return cumuCosts
def pathBacktrace(cumuCosts):
'''
trace DP shit backwards, yo
'''
x = cumuCosts.shape[1]
y = cumuCosts.shape[0]
pathCosts = np.zeros(cumuCosts.shape)
minIdx = 0
maxIdx = x - 1
for row in range(y - 1, -1, -1):
i = np.argmin(cumuCosts[row, minIdx:maxIdx + 1])
pathCosts[row, i] = 1
minIdx = np.max([0, i - 1])
maxIdx = np.min([x - 1, i + 1])
return pathCosts
def cheapVertPath(costMap):
costs = calcMinCosts(costMap)
path = pathBacktrace(costs)
return path
def cheapVertCut(costMap):
'''
Generate binary mask
'''
path = cheapVertPath(costMap)
for row in range(path.shape[0]):
path[row, 0:np.argmax(path[row, :])] = 1
return path
def cheapHorizCut(costMap):
path = cheapVertCut(costMap.T).T
return path
if __name__ == "__main__":
print "Starting..."
# read in original image using Python Image Library (PIL)
orig_img = Image.open("text.png")
(width, height) = orig_img.size
vertScaleFactor = 2
horizScaleFactor = 2
assert vertScaleFactor >= 1 and horizScaleFactor >= 1, "cannot scale down"
# extract list of pixels in RGB/grayscale format
pixels = list(orig_img.getdata())
sample_2d = np.array(pixels, np.int32)
sample_2d = sample_2d.reshape((height,-1,3))
# ensure that img is either an RGB or grayscale image
assert sample_2d.ndim == 3 and (sample_2d.shape[2] == 3 or sample_2d.shape[2] == 1), sample_2d.shape
# choose patch from input sample by slicing
# TODO hard coded
patchSize = 30
sl = (slice(0,patchSize), slice(0,patchSize), slice(0,3))
# TODO: randomly select initial patch
initialPatch = sample_2d[sl[0], sl[1], sl[2]]
# define textureSize, tileSize and initialize blank canvas
textureSize = (width * horizScaleFactor, height * vertScaleFactor)
overlap = patchSize / 6
tileSize = patchSize - overlap
texture = np.zeros((textureSize[1], textureSize[0], 3), dtype=np.float32)
# generate all sample patches
patches = makePatches(sample_2d, patchSize)
N = int(math.ceil(textureSize[0]/float(tileSize)))
M = int(math.ceil(textureSize[1]/float(tileSize)))
k = -1
for i in range(M): # height M
for j in range(N): # width N
k += 1
print "On iteration %i" % k
# insert default initial top-left patch
if k == 0:
insert(texture, initialPatch, i, j)
continue
blockLeft = j>0
blockUp = i>0
# find reference patchs and calculate overlap distances over all sample patches
if blockLeft:
refPatchLeft = texture[i*tileSize:min(i*tileSize + patchSize, textureSize[1]),
j*tileSize:min(j*tileSize + overlap, textureSize[0]), :]
distLeft = overlapDistances(refPatchLeft, patches)
d = distLeft
if blockUp:
refPatchUp = texture[i*tileSize:min(i*tileSize + overlap, textureSize[1]),
j*tileSize:min(j*tileSize + patchSize, textureSize[0]), :]
distUp = overlapDistances(refPatchUp, patches)
d = distUp
if blockLeft and blockUp:
refPatchBoth = texture[i*tileSize:min(i*tileSize + overlap, textureSize[1]),
j*tileSize:min(j*tileSize + overlap, textureSize[0]), :]
distBoth = overlapDistances(refPatchBoth, patches)
d = distLeft + distUp - distBoth
# finds appropriate random patch
chosenIdx = getMatchingPatch(d, 1.1)
chosenPatch = patches[chosenIdx, :, :, :]
if blockLeft:
costMap = makeCostMap(refPatchLeft, chosenPatch[:refPatchLeft.shape[0], :overlap, :])
pathMaskLeft = cheapVertCut(costMap)
overlapLeft = np.where(np.dstack([pathMaskLeft] * 3), refPatchLeft, chosenPatch[:refPatchLeft.shape[0], :overlap, :])
# overwrite with min cut
chosenPatch[:refPatchLeft.shape[0],:overlap,:] = overlapLeft
if blockUp:
# chosenSize = min(j*tileSize + patchSize, textureSize[0]) - j*tileSize
# TODO: stupid solution; find better one
costMap = makeCostMap(refPatchUp, chosenPatch[:overlap, :refPatchUp.shape[1], :])
pathMaskUp = cheapHorizCut(costMap)
overlapUp = np.where(np.dstack([pathMaskUp] * 3), refPatchUp, chosenPatch[:overlap, :refPatchUp.shape[1], :])
# overwrite with min cut
chosenPatch[:overlap,:refPatchUp.shape[1],:] = overlapUp
if blockLeft and blockUp:
pathMaskBoth = np.zeros((refPatchUp.shape[0], refPatchLeft.shape[1]))
for p in range(refPatchUp.shape[0]):
for q in range(refPatchLeft.shape[1]):
pathMaskBoth[p][q] = 1 - ((1-pathMaskUp[p][q]) * (1-pathMaskLeft[p][q]))
# pathMaskBoth[p][q] = pathMaskUp[p][q] | pathMaskLeft[p][q]
pathMaskLeft[:pathMaskBoth.shape[0],:] = pathMaskBoth
pathMaskUp[:,:pathMaskBoth.shape[1]] = pathMaskBoth
overlapBothLeft = np.where(np.dstack([pathMaskLeft] * 3), refPatchLeft, chosenPatch[:refPatchLeft.shape[0], :overlap, :])
overlapBothUp = np.where(np.dstack([pathMaskUp] * 3), refPatchUp, chosenPatch[:overlap, :refPatchUp.shape[1], :])
# overwrite with min cut
chosenPatch[:refPatchLeft.shape[0],:overlap,:] = overlapBothLeft
chosenPatch[:overlap,:refPatchUp.shape[1],:] = overlapBothUp
insert(texture, chosenPatch, i*tileSize, j*tileSize)
# convert texture into flattened array pixels_out for exporting as PNG
pixels_out = np.reshape(texture, (textureSize[0] * textureSize[1], 3), order='C')
pixels_out = map(lambda x: (x[0],x[1],x[2]), pixels_out)
img_out = Image.new(orig_img.mode, textureSize)
img_out.putdata(pixels_out)
# img_out.save("text_generated_30.png", "png")
img_out.show()
print "donedonedone!"
|
from mythril.rpc.client import EthJsonRpc
from mythril.ipc.client import EthIpc
from mythril.ether.ethcontract import ETHContract, InstanceList
import hashlib
import os
import persistent
import persistent.list
import transaction
from BTrees.OOBTree import BTree
import ZODB
from ZODB import FileStorage
def get_persistent_storage(db_dir = None):
if not db_dir:
db_dir = os.path.join(os.path.expanduser('~'), ".mythril")
if not os.path.exists(db_dir):
os.makedirs(db_dir)
db_path = os.path.join(db_dir, "contractstorage.fs")
storage = FileStorage.FileStorage(db_path)
db = ZODB.DB(storage)
connection = db.open()
storage_root = connection.root()
try:
contract_storage = storage_root['contractStorage']
except KeyError:
contract_storage = ContractStorage()
storage_root['contractStorage'] = contract_storage
return contract_storage
class ContractStorage(persistent.Persistent):
def __init__(self):
self.contracts = BTree()
self.instance_lists= BTree()
self.last_block = 0
def get_contract_by_hash(self, contract_hash):
return self.contracts[contract_hash]
def initialize(self, rpchost, rpcport, rpctls, sync_all, ipc):
if ipc:
eth = EthIpc()
else:
eth = EthJsonRpc(rpchost, rpcport, rpctls)
if self.last_block:
blockNum = self.last_block
print("Resuming synchronization from block " + str(blockNum))
else:
blockNum = eth.eth_blockNumber()
print("Starting synchronization from latest block: " + str(blockNum))
while(blockNum > 0):
if not blockNum % 1000:
print("Processing block " + str(blockNum) + ", " + str(len(self.contracts.keys())) + " unique contracts in database")
block = eth.eth_getBlockByNumber(blockNum)
for tx in block['transactions']:
if not tx['to']:
receipt = eth.eth_getTransactionReceipt(tx['hash'])
if receipt is not None:
contract_address = receipt['contractAddress']
contract_code = eth.eth_getCode(contract_address)
contract_balance = eth.eth_getBalance(contract_address)
if not contract_balance or sync_all:
# skip contracts with zero balance (disable with --sync-all)
continue
code = ETHContract(contract_code, tx['input'])
m = hashlib.md5()
m.update(contract_code.encode('UTF-8'))
contract_hash = m.digest()
try:
self.contracts[contract_hash]
except KeyError:
self.contracts[contract_hash] = code
m = InstanceList()
self.instance_lists[contract_hash] = m
self.instance_lists[contract_hash].add(contract_address, contract_balance)
transaction.commit()
self.last_block = blockNum
blockNum -= 1
def search(self, expression, callback_func):
all_keys = list(self.contracts)
for k in all_keys:
if self.contracts[k].matches_expression(expression):
m = self.instance_lists[k]
callback_func(k.hex(), self.contracts[k], m.addresses, m.balances)
Once database initialization is complete, start from the end of the chain again
from mythril.rpc.client import EthJsonRpc
from mythril.ipc.client import EthIpc
from mythril.ether.ethcontract import ETHContract, InstanceList
import hashlib
import os
import persistent
import persistent.list
import transaction
from BTrees.OOBTree import BTree
import ZODB
from ZODB import FileStorage
def get_persistent_storage(db_dir = None):
if not db_dir:
db_dir = os.path.join(os.path.expanduser('~'), ".mythril")
if not os.path.exists(db_dir):
os.makedirs(db_dir)
db_path = os.path.join(db_dir, "contractstorage.fs")
storage = FileStorage.FileStorage(db_path)
db = ZODB.DB(storage)
connection = db.open()
storage_root = connection.root()
try:
contract_storage = storage_root['contractStorage']
except KeyError:
contract_storage = ContractStorage()
storage_root['contractStorage'] = contract_storage
return contract_storage
class ContractStorage(persistent.Persistent):
def __init__(self):
self.contracts = BTree()
self.instance_lists= BTree()
self.last_block = 0
def get_contract_by_hash(self, contract_hash):
return self.contracts[contract_hash]
def initialize(self, rpchost, rpcport, rpctls, sync_all, ipc):
if ipc:
eth = EthIpc()
else:
eth = EthJsonRpc(rpchost, rpcport, rpctls)
if self.last_block:
blockNum = self.last_block
print("Resuming synchronization from block " + str(blockNum))
else:
blockNum = eth.eth_blockNumber()
print("Starting synchronization from latest block: " + str(blockNum))
while(blockNum > 0):
if not blockNum % 1000:
print("Processing block " + str(blockNum) + ", " + str(len(self.contracts.keys())) + " unique contracts in database")
block = eth.eth_getBlockByNumber(blockNum)
for tx in block['transactions']:
if not tx['to']:
receipt = eth.eth_getTransactionReceipt(tx['hash'])
if receipt is not None:
contract_address = receipt['contractAddress']
contract_code = eth.eth_getCode(contract_address)
contract_balance = eth.eth_getBalance(contract_address)
if not contract_balance or sync_all:
# skip contracts with zero balance (disable with --sync-all)
continue
code = ETHContract(contract_code, tx['input'])
m = hashlib.md5()
m.update(contract_code.encode('UTF-8'))
contract_hash = m.digest()
try:
self.contracts[contract_hash]
except KeyError:
self.contracts[contract_hash] = code
m = InstanceList()
self.instance_lists[contract_hash] = m
self.instance_lists[contract_hash].add(contract_address, contract_balance)
transaction.commit()
self.last_block = blockNum
blockNum -= 1
# If we've finished initializing the database, start over from the end of the chain if we want to initialize again
self.last_block = 0
transaction.commit()
def search(self, expression, callback_func):
all_keys = list(self.contracts)
for k in all_keys:
if self.contracts[k].matches_expression(expression):
m = self.instance_lists[k]
callback_func(k.hex(), self.contracts[k], m.addresses, m.balances)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
from scipy.odr.odrpack import Model, RealData, ODR
from numpy import *
from pylab import *
# from scipy import optimize
def read_cd_data(cd_file):
data = []
for line in cd_file:
if "$" in line:
continue
cells = line.split()
if len(cells) != 5:
continue
try:
data += [float(i) for i in cells],
except ValueError:
continue
data = array(data)
return data[:,0], data[:,1], data[:,2]
def gibbs_free_energy(h, cp, tm, t):
return h * (1 - t / tm) - cp * ((tm - t) + t * log(t / tm))
def fit_cd_melt(T, sig, error, show_graph = True, name = "melt"):
R = 8.314
T += 273
def k(h, cp, tm, t):
return exp(gibbs_free_energy(h, cp, tm, t) / (R * t))
def alpha(h, cp, tm, t):
k_calc = k(h, cp, tm, t)
return k_calc / (1 + k_calc)
def fit_func(h, cp, tm, sig_f, sig_u, t):
return alpha(h, cp, tm, t) * (sig_f - sig_u) + sig_u
# def residuals(p, t, signal):
# return fit_func(p[0], p[1], p[2], p[3], p[4], t) - signal
def fit_func_2(B, t):
return fit_func(B[0], B[1], B[2], B[3], B[4], t)
sig_f_guess, sig_u_guess = min(sig), max(sig)
sig_mid = (sig_f_guess + sig_u_guess) / 2
tm_guess = min(enumerate(T), key = lambda x: abs(sig[x[0]] - sig_mid))[1]
guesses = [0, 0, tm_guess, sig_f_guess, sig_u_guess]
# p = optimize.leastsq(residuals, guesses[:], args = (T, sig))[0]
# res_var = sum(i ** 2 for i in residuals(p, T, sig)) / (len(sig) - len(guesses))
# ss_tot = sum((sig - sig.mean()) ** 2)
# res_var = 1 - ss_err / ss_tot
# Instead of using least squares, use orthogonal distance regression. This
# lets us account for errors in the measurements of the data.
# See: http://docs.scipy.org/doc/scipy/reference/odr.html
linear = Model(fit_func_2)
data = RealData(T, sig, sy = error)
odr = ODR(data, linear, beta0 = guesses)
output = odr.run()
if show_graph:
temp = linspace(T.min(), T.max(), 100)
plot(T, sig, "ro", temp, fit_func_2(output.beta, temp), "r-")
title("Temperature melt of {}".format(name))
xlabel("Temperature (K)")
ylabel("CD Signal (millidegrees)")
show()
savefig("{}.png".format(os.path.splitext(name)[0]))
return output.beta, output.sd_beta, output.res_var
# return p, [0.] * len(p), res_var
def main(args):
for arg in args:
print("{}:".format(arg))
with open(arg) as cd_input:
T, sig, error = read_cd_data(cd_input)
p, p_sd, res_var = fit_cd_melt(T, sig, error, name = arg)
h, cp, tm = p[:3]
h_sd, cp_sd, tm_sd = p_sd[:3]
# Covert the enthalpy from J/mol to kJ/mol
h, h_sd = h / 1000, h_sd / 1000
delta = u"\N{GREEK CAPITAL LETTER DELTA}".encode("utf-8")
print(" {}H: {:.6} +/- {:.4} kJ/mol".format(delta, h, h_sd))
print(" Cp: {:.6} +/- {:.4} J/mol/K".format(cp, cp_sd))
print(" Tm: {:.6} +/- {:.4} K".format(tm, tm_sd))
dg_t = 25 + 273
dg = gibbs_free_energy(h, cp, tm, dg_t) / 1000
print(" {}G @ {} K: {:.5} kJ/mol".format(delta, dg_t, dg))
print(" Residual variance: {:.3}".format(res_var))
if __name__ == "__main__":
main(sys.argv[1:])
More code cleaning
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
from scipy.odr.odrpack import Model, RealData, ODR
from numpy import *
from pylab import *
def read_cd_data(cd_file):
data = []
for line in cd_file:
if "$" in line:
continue
cells = line.split()
if len(cells) != 5:
continue
try:
data += [float(i) for i in cells],
except ValueError:
continue
data = array(data)
T = data[:,0] + 273
signal = data[:,1]
error = data[:,2]
return T, signal, error
def gibbs_free_energy(dH, C_p, T_m, t):
return dH * (1 - t / T_m) - C_p * ((T_m - t) + t * log(t / T_m))
def k(dH, C_p, T_m, t):
R = 8.314
return exp(gibbs_free_energy(dH, C_p, T_m, t) / (R * t))
def alpha(dH, C_p, T_m, t):
k_calc = k(dH, C_p, T_m, t)
return k_calc / (1 + k_calc)
def _fit_func(dH, C_p, T_m, sig_f, sig_u, t):
return alpha(dH, C_p, T_m, t) * (sig_f - sig_u) + sig_u
def _fit_func_2(B, t):
return _fit_func(B[0], B[1], B[2], B[3], B[4], t)
def fit_cd_melt(T, sig, error):
# Set up the guesses for the sig_f, sig_u, and T_m, all easy to find
sig_f_guess, sig_u_guess = min(sig), max(sig)
sig_mid = (sig_f_guess + sig_u_guess) / 2
T_m_guess = min(enumerate(T), key = lambda x: abs(sig[x[0]] - sig_mid))[1]
dH_guess = 0
C_p_guess = 0
guesses = [dH_guess, C_p_guess, T_m_guess, sig_f_guess, sig_u_guess]
# Instead of using least squares, use orthogonal distance regression. This
# lets us account for errors in the measurements of the data.
# See: http://docs.scipy.org/doc/scipy/reference/odr.html
linear = Model(_fit_func_2)
data = RealData(T, sig, sy = error)
odr = ODR(data, linear, beta0 = guesses)
output = odr.run()
return output.beta, output.sd_beta, output.res_var
def main(args, show_graph = True):
for arg in args:
print("{}:".format(arg))
with open(arg) as cd_input:
T, sig, error = read_cd_data(cd_input)
p, p_sd, res_var = fit_cd_melt(T, sig, error)
dH, C_p, T_m = p[:3]
dH_sd, C_p_sd, T_m_sd = p_sd[:3]
# Covert the enthalpy from J/mol to kJ/mol
dH, dH_sd = dH / 1000, dH_sd / 1000
delta = u"\N{GREEK CAPITAL LETTER DELTA}".encode("utf-8")
print(" {}H: {:.6} +/- {:.4} kJ/mol".format(delta, dH, dH_sd))
print(" C_p: {:.6} +/- {:.4} J/mol/K".format(C_p, C_p_sd))
print(" T_m: {:.6} +/- {:.4} K".format(T_m, T_m_sd))
dg_t = 25 + 273
dg = gibbs_free_energy(dH, C_p, T_m, dg_t) / 1000
print(" {}G @ {} K: {:.5} kJ/mol".format(delta, dg_t, dg))
print(" Residual variance: {:.3}".format(res_var))
if show_graph:
temp = linspace(T.min(), T.max(), 100)
plot(T, sig, "ro", temp, _fit_func_2(p, temp), "k-")
title("Temperature Melt of {}".format(arg))
xlabel("Temperature (K)")
ylabel("CD Signal (millidegrees)")
show()
savefig("{}.png".format(os.path.splitext(arg)[0]))
if __name__ == "__main__":
main(sys.argv[1:])
|
from urllib.error import URLError
import numpy
import smopy
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import math
from gtfspy.gtfs import GTFS
from gtfspy.stats import get_spatial_bounds, get_percentile_stop_bounds, get_median_lat_lon_of_stops
from gtfspy.route_types import ROUTE_TYPE_TO_COLOR, ROUTE_TYPE_TO_ZORDER, ROUTE_TYPE_TO_SHORT_DESCRIPTION
import matplotlib as mpl
from matplotlib_scalebar.scalebar import ScaleBar
from gtfspy import util
"""
This module contains functions for plotting (static) visualizations of the public transport networks using matplotlib.
"""
from gtfspy.extended_route_types import ROUTE_TYPE_CONVERSION
MAP_STYLES = [
"rastertiles/voyager",
"rastertiles/voyager_nolabels",
"rastertiles/voyager_only_labels",
"rastertiles/voyager_labels_under",
"light_all",
"dark_all",
"light_nolabels",
"light_only_labels",
"dark_nolabels",
"dark_only_labels"
]
def _get_median_centered_plot_bounds(g):
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
lat_median, lon_median = get_median_lat_lon_of_stops(g)
lon_diff = max(abs(lon_median - lon_min), abs(lon_median - lon_max))
lat_diff = max(abs(lat_median - lat_min), abs(lat_median - lat_max))
plot_lon_min = lon_median - lon_diff
plot_lon_max = lon_median + lon_diff
plot_lat_min = lat_median - lat_diff
plot_lat_max = lat_median + lat_diff
return plot_lon_min, plot_lon_max, plot_lat_min, plot_lat_max
def plot_route_network_from_gtfs(g, ax=None, spatial_bounds=None, map_alpha=0.8, scalebar=True, legend=True,
return_smopy_map=False, map_style=None):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
Where to get the data from?
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created
spatial_bounds: dict, optional
with str keys: lon_min, lon_max, lat_min, lat_max
return_smopy_map: bool, optional
defaulting to false
Returns
-------
ax: matplotlib.Axes
"""
assert(isinstance(g, GTFS))
route_shapes = g.get_all_route_shapes()
if spatial_bounds is None:
spatial_bounds = get_spatial_bounds(g, as_dict=True)
return plot_as_routes(route_shapes,
ax=ax,
spatial_bounds=spatial_bounds,
map_alpha=map_alpha,
plot_scalebar=scalebar,
legend=legend,
return_smopy_map=return_smopy_map,
map_style=map_style)
def plot_as_routes(route_shapes, ax=None, spatial_bounds=None, map_alpha=0.8, plot_scalebar=True, legend=True,
return_smopy_map=False, line_width_attribute=None, line_width_scale=1.0, map_style=None):
"""
Parameters
----------
route_shapes: list of dicts that should have the following keys
name, type, agency, lats, lons
with types
list, list, str, list, list
ax: axis object
spatial_bounds: dict
map_alpha:
plot_scalebar: bool
legend:
return_smopy_map:
line_width_attribute:
line_width_scale:
Returns
-------
ax: matplotlib.axes object
"""
line_width = None
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max, map_style=map_style)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
route_types_to_lines = {}
for shape in route_shapes:
route_type = ROUTE_TYPE_CONVERSION[shape['type']]
lats = numpy.array(shape['lats'])
lons = numpy.array(shape['lons'])
if line_width_attribute:
line_width = line_width_scale * shape[line_width_attribute]
xs, ys = smopy_map.to_pixels(lats, lons)
line, = ax.plot(xs, ys, linewidth=line_width, color=ROUTE_TYPE_TO_COLOR[route_type], zorder=ROUTE_TYPE_TO_ZORDER[route_type])
route_types_to_lines[route_type] = line
if legend:
lines = list(route_types_to_lines.values())
labels = [ROUTE_TYPE_TO_SHORT_DESCRIPTION[route_type] for route_type in route_types_to_lines.keys()]
ax.legend(lines, labels)
if plot_scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
def plot_routes_as_stop_to_stop_network(from_lats, from_lons, to_lats, to_lons, attributes=None, color_attributes=None,
zorders=None,
line_labels=None,
ax=None,
spatial_bounds=None,
alpha=1,
map_alpha=0.8,
scalebar=True,
return_smopy_map=False,
c=None, linewidth=None,
linewidth_multiplier=1,
use_log_scale=False):
if attributes is None:
attributes = len(list(from_lats))*[None]
if not linewidth:
linewidth = 1
if color_attributes is None:
color_attributes = len(list(from_lats))*[None]
assert c is not None
if zorders is None:
zorders = len(list(from_lats))*[1]
if line_labels is None:
line_labels = len(list(from_lats))*[None]
if spatial_bounds is None:
lon_min = min(list(from_lons) + list(to_lons))
lon_max = max(list(from_lons) + list(to_lons))
lat_min = min(list(from_lats) + list(to_lats))
lat_max = max(list(from_lats) + list(to_lats))
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
for from_lat, from_lon, to_lat, to_lon, attribute, color_attribute, zorder, line_label in zip(from_lats,
from_lons,
to_lats,
to_lons,
attributes,
color_attributes,
zorders,
line_labels):
if color_attribute is None:
color = c
else:
a = ROUTE_TYPE_CONVERSION[color_attribute]
color = ROUTE_TYPE_TO_COLOR[a]
zorder = ROUTE_TYPE_TO_ZORDER[a]
if not attribute:
attribute = linewidth
if use_log_scale:
attribute = math.log10(attribute)
xs, ys = smopy_map.to_pixels(numpy.array([from_lat, to_lat]), numpy.array([from_lon, to_lon]))
ax.plot(xs, ys, color=color, linewidth=attribute*linewidth_multiplier, zorder=zorder, alpha=alpha)
if line_label:
ax.text(xs.mean(), ys.mean(), line_label,
# verticalalignment='bottom', horizontalalignment='right',
color='green', fontsize=15)
legend = True if color_attributes[0] is not None else False
import matplotlib.lines as mlines
if legend:
unique_types = set(color_attributes)
lines = []
for i in unique_types:
line = mlines.Line2D([], [], color=ROUTE_TYPE_TO_COLOR[i], markersize=15,
label=ROUTE_TYPE_TO_SHORT_DESCRIPTION[i])
lines.append(line)
handles = lines
labels = [h.get_label() for h in handles]
ax.legend(handles=handles, labels=labels, loc=4)
if scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
def _add_scale_bar(ax, lat, lon_min, lon_max, width_pixels):
distance_m = util.wgs84_distance(lat, lon_min, lat, lon_max)
scalebar = ScaleBar(distance_m / width_pixels) # 1 pixel = 0.2 meter
ax.add_artist(scalebar)
def plot_route_network_thumbnail(g):
width = 512 # pixels
height = 300 # pixels
scale = 24
dpi = mpl.rcParams["figure.dpi"]
width_m = width * scale
height_m = height * scale
median_lat, median_lon = get_median_lat_lon_of_stops(g)
dlat = util.wgs84_height(height_m)
dlon = util.wgs84_width(width_m, median_lat)
spatial_bounds = {
"lon_min": median_lon - dlon,
"lon_max": median_lon + dlon,
"lat_min": median_lat - dlat,
"lat_max": median_lat + dlat
}
fig = plt.figure(figsize=(width/dpi, height/dpi))
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.0, left=0.0, right=1.0, top=1.0)
return plot_route_network_from_gtfs(g, ax, spatial_bounds, map_alpha=1.0, scalebar=False, legend=False)
def plot_stops_with_categorical_attributes(lats_list, lons_list, attributes_list, s=0.5, spatial_bounds=None, colorbar=False, ax=None, cmap=None, norm=None, alpha=None):
if not spatial_bounds:
lon_min = min([min(x) for x in lons_list])
lon_max = max([max(x) for x in lons_list])
lat_min = min([min(x) for x in lats_list])
lat_max = max([max(x) for x in lats_list])
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
min_x = max_x = min_y = max_y = None
for lat in [lat_min, lat_max]:
for lon in [lon_min, lon_max]:
x, y = smopy_map.to_pixels(lat, lon)
if not min_x:
min_x = x
max_x = x
min_y = y
max_y = y
else:
max_x = max(max_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
min_x = min(min_x, x)
ax.set_xlim(min_x, max_x)
ax.set_ylim(max_y, min_y)
ax.set_xticks([])
ax.set_yticks([])
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
axes = []
for lats, lons, attributes, c in zip(lats_list, lons_list, attributes_list, mcolors.BASE_COLORS):
x, y = zip(*[smopy_map.to_pixels(lat, lon) for lat, lon in zip(lats, lons)])
ax = plt.scatter(x, y, s=s, c=c) #, marker=".")
axes.append(ax)
return axes
def plot_stops_with_attributes(lats, lons, attribute, s=0.5, spatial_bounds=None, colorbar=False, ax=None, cmap=None, norm=None, alpha=None):
if not spatial_bounds:
lon_min = min(lons)
lon_max = max(lons)
lat_min = min(lats)
lat_max = max(lats)
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
xs, ys = smopy_map.to_pixels(lats, lons)
cax = ax.scatter(xs, ys, c=attribute, s=s, cmap=cmap, norm=norm, alpha=alpha)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
if colorbar:
return ax, cax, smopy_map
return ax
def plot_all_stops(g, ax=None, scalebar=False):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created, otherwise results are plotted on the axis.
scalebar: bool, optional
Whether to include a scalebar to the plot.
Returns
-------
ax: matplotlib.Axes
"""
assert(isinstance(g, GTFS))
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
stops = g.stops()
lats = numpy.array(stops['lat'])
lons = numpy.array(stops['lon'])
xs, ys = smopy_map.to_pixels(lats, lons)
ax.scatter(xs, ys, color="red", s=10)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
return ax
def get_smopy_map(lon_min, lon_max, lat_min, lat_max, z=None, map_style=None):
ORIG_TILE_SERVER = smopy.TILE_SERVER
if map_style is not None:
assert map_style in MAP_STYLES, map_style + \
" (map_style parameter) is not a valid CartoDB mapping style. Options are " + \
str(MAP_STYLES)
smopy.TILE_SERVER = "http://1.basemaps.cartocdn.com/" + map_style + "/{z}/{x}/{y}.png"
args = (lat_min, lat_max, lon_min, lon_max, map_style, z)
if args not in get_smopy_map.maps:
kwargs = {}
if z is not None: # this hack may not work
smopy.Map.get_allowed_zoom = lambda self, z: z
kwargs['z'] = z
try:
get_smopy_map.maps[args] = smopy.Map((lat_min, lon_min, lat_max, lon_max), **kwargs)
except URLError:
raise RuntimeError("\n Could not load background map from the tile server: "
+ smopy.TILE_SERVER +
"\n Please check that the tile server exists and "
"that your are connected to the internet.")
smopy.TILE_SERVER = ORIG_TILE_SERVER
return get_smopy_map.maps[args]
get_smopy_map.maps = {}
Fit map to axis bounds
from urllib.error import URLError
import numpy
import smopy
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import math
from gtfspy.gtfs import GTFS
from gtfspy.stats import get_spatial_bounds, get_percentile_stop_bounds, get_median_lat_lon_of_stops
from gtfspy.route_types import ROUTE_TYPE_TO_COLOR, ROUTE_TYPE_TO_ZORDER, ROUTE_TYPE_TO_SHORT_DESCRIPTION
import matplotlib as mpl
from matplotlib_scalebar.scalebar import ScaleBar
from gtfspy import util
"""
This module contains functions for plotting (static) visualizations of the public transport networks using matplotlib.
"""
from gtfspy.extended_route_types import ROUTE_TYPE_CONVERSION
MAP_STYLES = [
"rastertiles/voyager",
"rastertiles/voyager_nolabels",
"rastertiles/voyager_only_labels",
"rastertiles/voyager_labels_under",
"light_all",
"dark_all",
"light_nolabels",
"light_only_labels",
"dark_nolabels",
"dark_only_labels"
]
def _get_median_centered_plot_bounds(g):
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
lat_median, lon_median = get_median_lat_lon_of_stops(g)
lon_diff = max(abs(lon_median - lon_min), abs(lon_median - lon_max))
lat_diff = max(abs(lat_median - lat_min), abs(lat_median - lat_max))
plot_lon_min = lon_median - lon_diff
plot_lon_max = lon_median + lon_diff
plot_lat_min = lat_median - lat_diff
plot_lat_max = lat_median + lat_diff
return plot_lon_min, plot_lon_max, plot_lat_min, plot_lat_max
def plot_route_network_from_gtfs(g, ax=None, spatial_bounds=None, map_alpha=0.8, scalebar=True, legend=True,
return_smopy_map=False, map_style=None):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
Where to get the data from?
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created
spatial_bounds: dict, optional
with str keys: lon_min, lon_max, lat_min, lat_max
return_smopy_map: bool, optional
defaulting to false
Returns
-------
ax: matplotlib.axes.Axes
"""
assert(isinstance(g, GTFS))
route_shapes = g.get_all_route_shapes()
if spatial_bounds is None:
spatial_bounds = get_spatial_bounds(g, as_dict=True)
if ax is not None:
bbox = ax.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
spatial_bounds = _expand_spatial_bounds_to_fit_axes(spatial_bounds, width, height)
return plot_as_routes(route_shapes,
ax=ax,
spatial_bounds=spatial_bounds,
map_alpha=map_alpha,
plot_scalebar=scalebar,
legend=legend,
return_smopy_map=return_smopy_map,
map_style=map_style)
def plot_as_routes(route_shapes, ax=None, spatial_bounds=None, map_alpha=0.8, plot_scalebar=True, legend=True,
return_smopy_map=False, line_width_attribute=None, line_width_scale=1.0, map_style=None):
"""
Parameters
----------
route_shapes: list of dicts that should have the following keys
name, type, agency, lats, lons
with types
list, list, str, list, list
ax: axis object
spatial_bounds: dict
map_alpha:
plot_scalebar: bool
legend:
return_smopy_map:
line_width_attribute:
line_width_scale:
Returns
-------
ax: matplotlib.axes object
"""
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is not None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max, map_style=map_style)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
route_types_to_lines = {}
for shape in route_shapes:
route_type = ROUTE_TYPE_CONVERSION[shape['type']]
lats = numpy.array(shape['lats'])
lons = numpy.array(shape['lons'])
if line_width_attribute:
line_width = line_width_scale * shape[line_width_attribute]
else:
line_width = 1
xs, ys = smopy_map.to_pixels(lats, lons)
line, = ax.plot(xs, ys, linewidth=line_width, color=ROUTE_TYPE_TO_COLOR[route_type], zorder=ROUTE_TYPE_TO_ZORDER[route_type])
route_types_to_lines[route_type] = line
if legend:
lines = list(route_types_to_lines.values())
labels = [ROUTE_TYPE_TO_SHORT_DESCRIPTION[route_type] for route_type in route_types_to_lines.keys()]
ax.legend(lines, labels, loc="upper left")
if plot_scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
def plot_routes_as_stop_to_stop_network(from_lats, from_lons, to_lats, to_lons, attributes=None, color_attributes=None,
zorders=None,
line_labels=None,
ax=None,
spatial_bounds=None,
alpha=1,
map_alpha=0.8,
scalebar=True,
return_smopy_map=False,
c=None, linewidth=None,
linewidth_multiplier=1,
use_log_scale=False):
if attributes is None:
attributes = len(list(from_lats))*[None]
if not linewidth:
linewidth = 1
if color_attributes is None:
color_attributes = len(list(from_lats))*[None]
assert c is not None
if zorders is None:
zorders = len(list(from_lats))*[1]
if line_labels is None:
line_labels = len(list(from_lats))*[None]
if spatial_bounds is None:
lon_min = min(list(from_lons) + list(to_lons))
lon_max = max(list(from_lons) + list(to_lons))
lat_min = min(list(from_lats) + list(to_lats))
lat_max = max(list(from_lats) + list(to_lats))
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
for from_lat, from_lon, to_lat, to_lon, attribute, color_attribute, zorder, line_label in zip(from_lats,
from_lons,
to_lats,
to_lons,
attributes,
color_attributes,
zorders,
line_labels):
if color_attribute is None:
color = c
else:
a = ROUTE_TYPE_CONVERSION[color_attribute]
color = ROUTE_TYPE_TO_COLOR[a]
zorder = ROUTE_TYPE_TO_ZORDER[a]
if not attribute:
attribute = linewidth
if use_log_scale:
attribute = math.log10(attribute)
xs, ys = smopy_map.to_pixels(numpy.array([from_lat, to_lat]), numpy.array([from_lon, to_lon]))
ax.plot(xs, ys, color=color, linewidth=attribute*linewidth_multiplier, zorder=zorder, alpha=alpha)
if line_label:
ax.text(xs.mean(), ys.mean(), line_label,
# verticalalignment='bottom', horizontalalignment='right',
color='green', fontsize=15)
legend = True if color_attributes[0] is not None else False
import matplotlib.lines as mlines
if legend:
unique_types = set(color_attributes)
lines = []
for i in unique_types:
line = mlines.Line2D([], [], color=ROUTE_TYPE_TO_COLOR[i], markersize=15,
label=ROUTE_TYPE_TO_SHORT_DESCRIPTION[i])
lines.append(line)
handles = lines
labels = [h.get_label() for h in handles]
ax.legend(handles=handles, labels=labels, loc=4)
if scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
def _add_scale_bar(ax, lat, lon_min, lon_max, width_pixels):
distance_m = util.wgs84_distance(lat, lon_min, lat, lon_max)
scalebar = ScaleBar(distance_m / width_pixels) # 1 pixel = 0.2 meter
ax.add_artist(scalebar)
def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):
"""
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
"""
b = bounds
height_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_max'], b['lon_min'])
width_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_min'], b['lon_max'])
x_per_y_meters = width_meters / height_meters
x_per_y_axes = ax_width / ax_height
if x_per_y_axes > x_per_y_meters: # x-axis
# axis x_axis has slack -> the spatial longitude bounds need to be extended
width_meters_new = (height_meters * x_per_y_axes)
d_lon_new = ((b['lon_max'] - b['lon_min']) / width_meters) * width_meters_new
mean_lon = (b['lon_min'] + b['lon_max'])/2.
lon_min = mean_lon - d_lon_new / 2.
lon_max = mean_lon + d_lon_new / 2.
spatial_bounds = {
"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": b['lat_min'],
"lat_max": b['lat_max']
}
else:
# axis y_axis has slack -> the spatial latitude bounds need to be extended
height_meters_new = (width_meters / x_per_y_axes)
d_lat_new = ((b['lat_max'] - b['lat_min']) / height_meters) * height_meters_new
mean_lat = (b['lat_min'] + b['lat_max']) / 2.
lat_min = mean_lat - d_lat_new / 2.
lat_max = mean_lat + d_lat_new / 2.
spatial_bounds = {
"lon_min": b['lon_min'],
"lon_max": b['lon_max'],
"lat_min": lat_min,
"lat_max": lat_max
}
return spatial_bounds
def plot_route_network_thumbnail(g):
width = 512 # pixels
height = 300 # pixels
scale = 24
dpi = mpl.rcParams["figure.dpi"]
width_m = width * scale
height_m = height * scale
median_lat, median_lon = get_median_lat_lon_of_stops(g)
dlat = util.wgs84_height(height_m)
dlon = util.wgs84_width(width_m, median_lat)
spatial_bounds = {
"lon_min": median_lon - dlon,
"lon_max": median_lon + dlon,
"lat_min": median_lat - dlat,
"lat_max": median_lat + dlat
}
fig = plt.figure(figsize=(width/dpi, height/dpi))
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.0, left=0.0, right=1.0, top=1.0)
return plot_route_network_from_gtfs(g, ax, spatial_bounds, map_alpha=1.0, scalebar=False, legend=False)
def plot_stops_with_categorical_attributes(lats_list, lons_list, attributes_list, s=0.5, spatial_bounds=None, colorbar=False, ax=None, cmap=None, norm=None, alpha=None):
if not spatial_bounds:
lon_min = min([min(x) for x in lons_list])
lon_max = max([max(x) for x in lons_list])
lat_min = min([min(x) for x in lats_list])
lat_max = max([max(x) for x in lats_list])
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
min_x = max_x = min_y = max_y = None
for lat in [lat_min, lat_max]:
for lon in [lon_min, lon_max]:
x, y = smopy_map.to_pixels(lat, lon)
if not min_x:
min_x = x
max_x = x
min_y = y
max_y = y
else:
max_x = max(max_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
min_x = min(min_x, x)
ax.set_xlim(min_x, max_x)
ax.set_ylim(max_y, min_y)
ax.set_xticks([])
ax.set_yticks([])
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
axes = []
for lats, lons, attributes, c in zip(lats_list, lons_list, attributes_list, mcolors.BASE_COLORS):
x, y = zip(*[smopy_map.to_pixels(lat, lon) for lat, lon in zip(lats, lons)])
ax = plt.scatter(x, y, s=s, c=c) #, marker=".")
axes.append(ax)
return axes
def plot_stops_with_attributes(lats, lons, attribute, s=0.5, spatial_bounds=None, colorbar=False, ax=None, cmap=None, norm=None, alpha=None):
if not spatial_bounds:
lon_min = min(lons)
lon_max = max(lons)
lat_min = min(lats)
lat_max = max(lats)
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
xs, ys = smopy_map.to_pixels(lats, lons)
cax = ax.scatter(xs, ys, c=attribute, s=s, cmap=cmap, norm=norm, alpha=alpha)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
if colorbar:
return ax, cax, smopy_map
return ax
def plot_all_stops(g, ax=None, scalebar=False):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created, otherwise results are plotted on the axis.
scalebar: bool, optional
Whether to include a scalebar to the plot.
Returns
-------
ax: matplotlib.Axes
"""
assert(isinstance(g, GTFS))
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
stops = g.stops()
lats = numpy.array(stops['lat'])
lons = numpy.array(stops['lon'])
xs, ys = smopy_map.to_pixels(lats, lons)
ax.scatter(xs, ys, color="red", s=10)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
return ax
def get_smopy_map(lon_min, lon_max, lat_min, lat_max, z=None, map_style=None):
ORIG_TILE_SERVER = smopy.TILE_SERVER
if map_style is not None:
assert map_style in MAP_STYLES, map_style + \
" (map_style parameter) is not a valid CartoDB mapping style. Options are " + \
str(MAP_STYLES)
smopy.TILE_SERVER = "http://1.basemaps.cartocdn.com/" + map_style + "/{z}/{x}/{y}.png"
args = (lat_min, lat_max, lon_min, lon_max, map_style, z)
if args not in get_smopy_map.maps:
kwargs = {}
if z is not None: # this hack may not work
smopy.Map.get_allowed_zoom = lambda self, z: z
kwargs['z'] = z
try:
get_smopy_map.maps[args] = smopy.Map((lat_min, lon_min, lat_max, lon_max), **kwargs)
except URLError:
raise RuntimeError("\n Could not load background map from the tile server: "
+ smopy.TILE_SERVER +
"\n Please check that the tile server exists and "
"that your are connected to the internet.")
smopy.TILE_SERVER = ORIG_TILE_SERVER
return get_smopy_map.maps[args]
get_smopy_map.maps = {}
|
#!/usr/bin/python
"""
Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, json, h5py
import numpy as np
from dmp import dmp
class hdf5_coord:
"""
Class related to handling the functions for interacting directly with the
HDF5 files. All required information should be passed to this class.
"""
test_file = '../sample_coords.hdf5'
def __init__(self, user_id = 'test', file_id = '', resolution = None):
"""
Initialise the module and
Parameters
----------
user_id : str
Identifier to uniquely locate the users files. Can be set to
"common" if the files can be shared between users
file_id : str
Location of the file in the file system
resolution : int (Optional)
Level of resolution. This is optional, but only the functions
get_resolutions() and set_resolutions() can be called. Once the
resolution has been set then all functions are callable.
"""
self.test_file = '../sample_coords.hdf5'
# Open the hdf5 file
if user_id == 'test':
resource_path = os.path.join(os.path.dirname(__file__), self.test_file)
self.f = h5py.File(resource_path, "r")
else:
cnf_loc=os.path.dirname(os.path.abspath(__file__)) + '/mongodb.cnf'
da = dmp(cnf_loc)
file_obj = da.get_file_by_id(user_id, file_id)
self.f = h5py.File(file_obj['file_path'], 'r')
self.resolution = resolution
if self.resolution != None:
self.grp = self.f[str(self.resolution)]
self.meta = self.grp['meta']
self.mpgrp = self.meta['model_params']
self.clusters = self.meta['clusters']
self.centroids = self.meta['centroids']
dset = self.grp['data']
if 'dependencies' in dset.attrs:
self.dependencies = json.loads(dset.attrs['dependencies'])
else:
self.dependencies = []
if 'TADbit_meta' in dset.attrs:
self.meta_data = json.loads(dset.attrs['TADbit_meta'])
else:
self.meta_data = {}
if 'hic_data' in dset.attrs:
self.hic_data = json.loads(dset.attrs['hic_data'])
else:
self.hic_data = {}
if 'restraints' in dset.attrs:
self.restraints = json.loads(dset.attrs['restraints'])
else:
self.restraints = {}
def close(self):
"""
Tidy function to close file handles
"""
self.f.close()
def get_resolutions(self):
"""
List resolutions that models have been generated for
Returns
-------
list : str
Available levels of resolution that can be set
"""
return [res for res in self.f]
def set_resolution(self, resolution):
"""
Set, or change, the resolution level
Parameters
----------
resolution : int
Level of resolution
"""
self.resolution = resolution
self.grp = self.f[str(resolution)]
self.meta = grp['meta']
self.mpgrp = meta['model_params']
self.clusters = meta['clusters']
self.centroids = meta['centroids']
def get_resolution(self):
"""
List the current level of rseolution
Returns
-------
resolution : int
Current level of resolution
"""
return self.resolution
def get_region_order(self, chr_id = None, region = None):
"""
List the regions on a given chromosome ID or region ID in the order that
they are located on the chromosome
Parameters
----------
chr_id : str
Chromosome ID
region : str
Region ID
Returns
-------
list
region_id : str
List of the region IDs
"""
if region is not None:
chr_id = self.mpgrp[str(region)].attrs['chromosome']
regions = {}
for r in self.mpgrp:
if self.mpgrp[str(r)].attrs['chromosome'] == chr_id:
regions[r] = self.mpgrp[str(r)].attrs['start']
return sorted(regions, key=lambda k: regions[k])
def get_object_data(self, region_id):
"""
Prepare the object header data structure ready for printing
Parameters
----------
region_id : int
Region that is getting downloaded
Returns
-------
objectdata : dict
All headers and values required for the JSON output
"""
if self.resolution == None:
return {}
mpds = self.mpgrp[str(region_id)]
dset = self.grp['data']
return {
'title' : dset.attrs['title'].decode('utf-8'),
'experimentType' : dset.attrs['experimentType'].decode('utf-8'),
'species' : dset.attrs['species'].decode('utf-8'),
'project' : dset.attrs['project'].decode('utf-8'),
'identifier' : dset.attrs['identifier'].decode('utf-8'),
'assembly' : dset.attrs['assembly'].decode('utf-8'),
'cellType' : dset.attrs['cellType'].decode('utf-8'),
'resolution' : dset.attrs['resolution'].decode('utf-8'),
'datatype' : dset.attrs['datatype'].decode('utf-8'),
'components' : dset.attrs['components'].decode('utf-8'),
'source' : dset.attrs['source'].decode('utf-8'),
'chromEnd' : [np.asscalar(mpds.attrs['end'])],
'end' : np.asscalar(mpds.attrs['end']),
'chromStart' : [np.asscalar(mpds.attrs['start'])],
'start' : np.asscalar(mpds.attrs['start']),
'chrom' : mpds.attrs['chromosome'].decode('utf-8'),
'dependencies' : self.dependencies,
'uuid' : region_id,
}
def get_clusters(self, region_id):
"""
List all clusters of models
Returns
-------
clusters : list
List of models in each cluster
"""
if self.resolution == None:
return {}
# Need to loop through structure
clustersgrp = self.clusters[str(region_id)]
clusters = []
for i in range(len(clustersgrp)):
clusters.append([np.asscalar(x) for x in clustersgrp[str(i)][:]])
return clusters
def get_centroids(self, region_id):
"""
List the centroid models for each cluster
Returns
-------
centroids : list
List of the centroid models for each cluster
"""
if self.resolution == None:
return {}
c = [np.asscalar(x) for x in self.centroids[region_id]]
return c
def get_chromosomes(self):
"""
List of chromosomes that have models at a given resolution
Returns
-------
chromosomes : list
List of chromosomes at the set resolution
"""
if self.resolution == None:
return {}
return list(set([self.mpgrp[region_id].attrs['chromosome'].decode('utf-8') for region_id in self.mpgrp.keys()]))
def get_regions(self, chr_id, start, end):
"""
List regions that are within a given range on a chromosome
Parameters
----------
chr_id : str
Chromosome ID
start : int
Start position
end : int
Stop position
Returns
-------
regions : list
List of region IDs whose parameters match those provided
"""
if self.resolution == None:
return {}
return [region_id for region_id in self.mpgrp.keys() if self.mpgrp[region_id].attrs['start']<end and self.mpgrp[region_id].attrs['end']>start and self.mpgrp[region_id].attrs['chromosome'].decode('utf-8')==chr_id]
def get_models(self, region_id):
"""
List all models for a given region
Returns
-------
List
model_id : int
cluster_id : int
"""
if self.resolution == None:
return {}
model_param_ds = self.mpgrp[str(region_id)]
return model_param_ds[:,:]
def get_model(self, region_id, model_ids = None, page=0, mpp=10):
"""
Get the coordinates within a defined region on a specific chromosome.
If the model_id is not returned the the consensus models for that region
are returned
Parameters
----------
region_id : str
Region ID
model_ids : list
List of model IDs for the models that are required
page : int
Page number
mpp : int
Number of models per page (default: 10; max: 100)
Returns
-------
list
dict
metadata : dict
Relevant extra meta data added by TADbit
object : dict
Key value pair of information about the region
models : list
List of dictionaries for each model
clusters : list
List of models for each cluster
centroids : list
List of all centroid models
restraints : list
List of retraints for each position
hic_data : dict
dict
model_count : int
Count of the number of models for the defined region ID
page_count : int
Number of pages
"""
if self.resolution == None:
return {}
mpds = self.mpgrp[str(region_id)]
dset = self.grp['data']
if model_ids[0] == 'centroids':
model_ids = self.get_centroids(region_id)
if model_ids[0] == 'all':
model_ids = list(self.mpgrp[str(region_id)][:,0])
model_count = len(model_ids)
if mpp > 100:
mpp=100
model_count = len(model_ids)
page_count = np.ceil(float(model_count)/mpp)
model_ids.sort()
model_pages = [model_ids[i:i+mpp] for i in range(0, len(model_ids), mpp)]
models = []
model_ds = dset[mpds.attrs['i']:mpds.attrs['j'], :, :]
for mid in model_pages[page]:
model_loc = list(mpds[:,0]).index(int(mid))
# length x model_loc x coords
# Using model_ds by pre-cutting then taking slices from that array
# is much quicker as the majority of the effort is in the initial
# slice. It is also slightly quicker for getting a single model
model = model_ds[:, model_loc, :]
models.append(
{
"ref" : str(mid),
"data" : list([str(x) for coords in model for x in coords])
}
)
object_data = self.get_object_data(region_id)
clusters = self.get_clusters(region_id)
centroids = self.get_centroids(str(region_id))
model_json = {
"metadata" : self.meta_data,
"object" : object_data,
"models" : models,
"clusters" : clusters,
"centroids" : centroids,
"restraints" : self.restraints,
"hic_data" : self.hic_data,
}
model_meta = {
"model_count" : model_count,
"page_count" : int(page_count)
}
return (model_json, model_meta)
Modified the code so that it is PEP8 complient
#!/usr/bin/python
"""
Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import json
import h5py
import numpy as np
from dmp import dmp
class hdf5_coord:
"""
Class related to handling the functions for interacting directly with the
HDF5 files. All required information should be passed to this class.
"""
test_file = '../sample_coords.hdf5'
def __init__(self, user_id='test', file_id='', resolution=None):
"""
Initialise the module and set the required base parameters
Parameters
----------
user_id : str
Identifier to uniquely locate the users files. Can be set to
"common" if the files can be shared between users
file_id : str
Location of the file in the file system
resolution : int (Optional)
Level of resolution. This is optional, but only the functions
get_resolutions() and set_resolutions() can be called. Once the
resolution has been set then all functions are callable.
"""
self.test_file = '../sample_coords.hdf5'
self.file_handle = None
# Open the hdf5 file
if user_id == 'test':
resource_path = os.path.join(os.path.dirname(__file__), self.test_file)
self.file_handle = h5py.File(resource_path, "r")
else:
cnf_loc = os.path.dirname(os.path.abspath(__file__)) + '/mongodb.cnf'
dm_handle = dmp(cnf_loc)
file_obj = dm_handle.get_file_by_id(user_id, file_id)
self.file_handle = h5py.File(file_obj['file_path'], 'r')
self.resolution = resolution
if self.resolution != None:
self.grp = self.file_handle[str(self.resolution)]
self.meta = self.grp['meta']
self.mpgrp = self.meta['model_params']
self.clusters = self.meta['clusters']
self.centroids = self.meta['centroids']
dset = self.grp['data']
if 'dependencies' in dset.attrs:
self.dependencies = json.loads(dset.attrs['dependencies'])
else:
self.dependencies = []
if 'TADbit_meta' in dset.attrs:
self.meta_data = json.loads(dset.attrs['TADbit_meta'])
else:
self.meta_data = {}
if 'hic_data' in dset.attrs:
self.hic_data = json.loads(dset.attrs['hic_data'])
else:
self.hic_data = {}
if 'restraints' in dset.attrs:
self.restraints = json.loads(dset.attrs['restraints'])
else:
self.restraints = {}
def close(self):
"""
Tidy function to close file handles
"""
self.file_handle.close()
def get_resolutions(self):
"""
List resolutions that models have been generated for
Returns
-------
list : str
Available levels of resolution that can be set
"""
return [res for res in self.file_handle]
def set_resolution(self, resolution):
"""
Set, or change, the resolution level
Parameters
----------
resolution : int
Level of resolution
"""
self.resolution = resolution
self.grp = self.file_handle[str(resolution)]
self.meta = self.grp['meta']
self.mpgrp = self.meta['model_params']
self.clusters = self.meta['clusters']
self.centroids = self.meta['centroids']
def get_resolution(self):
"""
List the current level of rseolution
Returns
-------
resolution : int
Current level of resolution
"""
return self.resolution
def get_region_order(self, chr_id=None, region=None):
"""
List the regions on a given chromosome ID or region ID in the order that
they are located on the chromosome
Parameters
----------
chr_id : str
Chromosome ID
region : str
Region ID
Returns
-------
list
region_id : str
List of the region IDs
"""
if region is not None:
chr_id = self.mpgrp[str(region)].attrs['chromosome']
regions = {}
for region_id in self.mpgrp:
if self.mpgrp[str(region_id)].attrs['chromosome'] == chr_id:
regions[region_id] = self.mpgrp[str(region_id)].attrs['start']
return sorted(regions, key=lambda k: regions[k])
def get_object_data(self, region_id):
"""
Prepare the object header data structure ready for printing
Parameters
----------
region_id : int
Region that is getting downloaded
Returns
-------
objectdata : dict
All headers and values required for the JSON output
"""
if self.resolution is None:
return {}
mpds = self.mpgrp[str(region_id)]
dset = self.grp['data']
return {
'title' : dset.attrs['title'].decode('utf-8'),
'experimentType' : dset.attrs['experimentType'].decode('utf-8'),
'species' : dset.attrs['species'].decode('utf-8'),
'project' : dset.attrs['project'].decode('utf-8'),
'identifier' : dset.attrs['identifier'].decode('utf-8'),
'assembly' : dset.attrs['assembly'].decode('utf-8'),
'cellType' : dset.attrs['cellType'].decode('utf-8'),
'resolution' : dset.attrs['resolution'].decode('utf-8'),
'datatype' : dset.attrs['datatype'].decode('utf-8'),
'components' : dset.attrs['components'].decode('utf-8'),
'source' : dset.attrs['source'].decode('utf-8'),
'chromEnd' : [np.asscalar(mpds.attrs['end'])],
'end' : np.asscalar(mpds.attrs['end']),
'chromStart' : [np.asscalar(mpds.attrs['start'])],
'start' : np.asscalar(mpds.attrs['start']),
'chrom' : mpds.attrs['chromosome'].decode('utf-8'),
'dependencies' : self.dependencies,
'uuid' : region_id,
}
def get_clusters(self, region_id):
"""
List all clusters of models
Returns
-------
clusters : list
List of models in each cluster
"""
if self.resolution is None:
return {}
# Need to loop through structure
clustersgrp = self.clusters[str(region_id)]
clusters = []
for i in range(len(clustersgrp)):
clusters.append([np.asscalar(x) for x in clustersgrp[str(i)][:]])
return clusters
def get_centroids(self, region_id):
"""
List the centroid models for each cluster
Returns
-------
centroids : list
List of the centroid models for each cluster
"""
if self.resolution is None:
return {}
centroids = [np.asscalar(x) for x in self.centroids[region_id]]
return centroids
def get_chromosomes(self):
"""
List of chromosomes that have models at a given resolution
Returns
-------
chromosomes : list
List of chromosomes at the set resolution
"""
if self.resolution is None:
return {}
return list(
set(
[self.mpgrp[region_id].attrs['chromosome'].decode('utf-8') for region_id in self.mpgrp.keys()]
)
)
def get_regions(self, chr_id, start, end):
"""
List regions that are within a given range on a chromosome
Parameters
----------
chr_id : str
Chromosome ID
start : int
Start position
end : int
Stop position
Returns
-------
regions : list
List of region IDs whose parameters match those provided
"""
if self.resolution is None:
return {}
return [region_id for region_id in self.mpgrp.keys() if self.mpgrp[region_id].attrs['start']<end and self.mpgrp[region_id].attrs['end']>start and self.mpgrp[region_id].attrs['chromosome'].decode('utf-8')==chr_id]
def get_models(self, region_id):
"""
List all models for a given region
Returns
-------
List
model_id : int
cluster_id : int
"""
if self.resolution is None:
return {}
model_param_ds = self.mpgrp[str(region_id)]
return model_param_ds[:, :]
def get_model(self, region_id, model_ids=None, page=0, mpp=10):
"""
Get the coordinates within a defined region on a specific chromosome.
If the model_id is not returned the the consensus models for that region
are returned
Parameters
----------
region_id : str
Region ID
model_ids : list
List of model IDs for the models that are required
page : int
Page number
mpp : int
Number of models per page (default: 10; max: 100)
Returns
-------
list
dict
metadata : dict
Relevant extra meta data added by TADbit
object : dict
Key value pair of information about the region
models : list
List of dictionaries for each model
clusters : list
List of models for each cluster
centroids : list
List of all centroid models
restraints : list
List of retraints for each position
hic_data : dict
dict
model_count : int
Count of the number of models for the defined region ID
page_count : int
Number of pages
"""
if self.resolution is None:
return {}
mpds = self.mpgrp[str(region_id)]
dset = self.grp['data']
if model_ids[0] == 'centroids':
model_ids = self.get_centroids(region_id)
if model_ids[0] == 'all':
model_ids = list(self.mpgrp[str(region_id)][:, 0])
model_count = len(model_ids)
if mpp > 100:
mpp = 100
model_count = len(model_ids)
page_count = np.ceil(float(model_count)/mpp)
model_ids.sort()
model_pages = [model_ids[i:i+mpp] for i in range(0, len(model_ids), mpp)]
models = []
model_ds = dset[mpds.attrs['i']:mpds.attrs['j'], :, :]
for mid in model_pages[page]:
model_loc = list(mpds[:, 0]).index(int(mid))
# length x model_loc x coords
# Using model_ds by pre-cutting then taking slices from that array
# is much quicker as the majority of the effort is in the initial
# slice. It is also slightly quicker for getting a single model
model = model_ds[:, model_loc, :]
models.append(
{
"ref" : str(mid),
"data" : list([str(x) for coords in model for x in coords])
}
)
object_data = self.get_object_data(region_id)
clusters = self.get_clusters(region_id)
centroids = self.get_centroids(str(region_id))
model_json = {
"metadata" : self.meta_data,
"object" : object_data,
"models" : models,
"clusters" : clusters,
"centroids" : centroids,
"restraints" : self.restraints,
"hic_data" : self.hic_data,
}
model_meta = {
"model_count" : model_count,
"page_count" : int(page_count)
}
return (model_json, model_meta)
|
__version__ = "2.25.1"
class ConfigError(Exception):
"""Raised when the configuration of a tool contains some invalid values."""
rc = 100 # sys.exit(rc)
class CustomDatabaseNotFound(Exception):
"""Raised when the InputLocator can't find a user-provided database (region=='custom')"""
rc = 101 # sys.exit(rc)
class ScriptNotFoundException(Exception):
"""Raised when an invalid script name is used."""
rc = 102 # sys.exit(rc)
class MissingInputDataException(Exception):
"""Raised when a script can't run because some information is missing"""
rc = 103
class InvalidOccupancyNameException(Exception):
"""Raised when the occupancy.dbf has an invalid / unknown occupancy column"""
rc = 104
def suppres_3rd_party_debug_loggers():
"""set logging level to WARN for fiona and shapely and others"""
import logging
loggers_to_silence = ["shapely", "Fiona", "fiona", "matplotlib", "urllib3.connectionpool"]
for log_name in loggers_to_silence:
log = logging.getLogger(log_name)
log.setLevel(logging.ERROR)
bumping version to 2.25.2
__version__ = "2.25.2"
class ConfigError(Exception):
"""Raised when the configuration of a tool contains some invalid values."""
rc = 100 # sys.exit(rc)
class CustomDatabaseNotFound(Exception):
"""Raised when the InputLocator can't find a user-provided database (region=='custom')"""
rc = 101 # sys.exit(rc)
class ScriptNotFoundException(Exception):
"""Raised when an invalid script name is used."""
rc = 102 # sys.exit(rc)
class MissingInputDataException(Exception):
"""Raised when a script can't run because some information is missing"""
rc = 103
class InvalidOccupancyNameException(Exception):
"""Raised when the occupancy.dbf has an invalid / unknown occupancy column"""
rc = 104
def suppres_3rd_party_debug_loggers():
"""set logging level to WARN for fiona and shapely and others"""
import logging
loggers_to_silence = ["shapely", "Fiona", "fiona", "matplotlib", "urllib3.connectionpool"]
for log_name in loggers_to_silence:
log = logging.getLogger(log_name)
log.setLevel(logging.ERROR)
|
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom widgets used for Survey form fields, plus the SurveyContent form.
"""
__authors__ = [
'"Daniel Diniz" <ajaksu@gmail.com>',
'"James Levy" <jamesalexanderlevy@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from itertools import chain
import csv
import datetime
import logging
import StringIO
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
from django import forms
from django.forms import widgets
from django.forms.fields import CharField
from django.template import loader
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from soc.logic import dicts
from soc.logic.lists import Lists
from soc.models.survey import COMMENT_PREFIX
from soc.models.survey import SurveyContent
CHOICE_TYPES = set(('selection', 'pick_multi', 'choice', 'pick_quant'))
# TODO(ajaksu) add this to template
REQUIRED_COMMENT_TPL = """
<label for="required_for_{{ name }}">Required</label>
<select id="required_for_{{ name }}" name="required_for_{{ name }}">
<option value="True" {% if is_required %} selected='selected' {% endif %}
>True</option>
<option value="False" {% if not is_required %} selected='selected'
{% endif %}>False</option>
</select>
<label for="comment_for_{{ name }}">Allow Comments</label>
<select id="comment_for_{{ name }}" name="comment_for_{{ name }}">
<option value="True" {% if has_comment %} selected='selected' {% endif %}
>True</option>
<option value="False" {% if not has_comment %} selected='selected'
{% endif %}>False</option>
</select>
"""
class SurveyTakeForm(djangoforms.ModelForm):
"""SurveyContent form for recording survey answers.
This class is used to produce survey forms for survey taking:
- User taking survey
- User updating already taken survey
Using dynamic properties of the survey model (if passed as an arg) the
survey form is dynamically formed.
"""
def __init__(self, *args, **kwargs):
"""Store special kwargs as attributes.
params:
survey_content: a SuveryContent entity.
survey_logic: instance of SurveyLogic.
survey_record: a SurveyRecord entity.
read_only: controls whether the survey taking UI allows data entry.
data: dictionary mapping fields to data for validation.
"""
self.kwargs = kwargs
self.survey_content = self.kwargs.pop('survey_content', None)
self.survey_logic = self.kwargs.pop('survey_logic', None)
self.survey_record = self.kwargs.pop('survey_record', None)
self.read_only = self.kwargs.pop('read_only', None)
self.fields_map = dict(
long_answer=self.addLongField,
short_answer=self.addShortField,
selection=self.addSingleField,
pick_multi=self.addMultiField,
pick_quant=self.addQuantField,
)
# get the POST data dict if present
data = self.kwargs.pop('data', None)
# set cleaner methods for fields, only needed if we have POST data
if data:
# prepare to render a bound, validating form
clean_data = self.setCleaners(data)
else:
clean_data = self.setCleaners()
# update with fields from subclasses
if hasattr(self, 'data') and self.data:
clean_data.update(self.data)
delattr(self, 'data')
# pass data, so form is bound
if data:
self.kwargs['data'] = clean_data
super(SurveyTakeForm, self).__init__(*args, **self.kwargs)
self.fields = self.getFields(clean_data)
def setCleaners(self, post_dict=None):
"""Set cleaner methods for dynamic fields.
Used for storing textual input as Text instead of StringProperty. If
passed a dict of field names/values (as the kwarg 'data' to __init__),
it's possible to set clean_[field_id] methods for validation.
This method populates the 'data' dict used for generating form fields.
Args:
post_dict: dictionary used to populate the fields
"""
# prefix for method names
clean = 'clean_'
# data is passed to super's __init__ as the 'data' kwarg
data = {}
# flag whether we can use getlist to retrieve multiple values
is_post = hasattr(post_dict, 'getlist')
schema = {}
if self.survey_content:
schema = eval(self.survey_content.schema)
for key, val in schema.items():
if val['type'] == 'long_answer':
# store > 500 chars per long answer
setattr(self, clean + key,
lambda key=key: db.Text(self.cleaned_data.get(key))
)
if val['has_comment']:
comment = COMMENT_PREFIX + key
#store > 500 chars per comment field
setattr(self, clean + comment,
lambda comment=comment: db.Text(self.cleaned_data.get(comment))
)
# put comment in self.data
if post_dict:
comment_val = post_dict.get(comment) or None
else:
comment_val = getattr(self.survey_record, comment, None)
data[comment] = comment_val
# put POST or record value for field in self.data
is_multi = val['type'] == 'pick_multi'
if post_dict:
if is_multi and is_post:
key_val = post_dict.getlist(key)
else:
key_val = post_dict.get(key)
else:
key_val = getattr(self.survey_record, key, None)
if is_multi and isinstance(key_val, basestring):
# TODO(ajaksu): find out if we still need this safety net
key_val = key_val.split(',')
elif not is_multi and isinstance(key_val, list):
# old pick_multi record for a question that is now single choice
key_val = key_val[0] if key_val else ''
data[key] = key_val
return data
def getFields(self, post_dict=None):
"""Build the SurveyContent (questions) form fields.
params:
post_dict: dict with POST data that will be used for validation
Populates self.survey_fields, which will be ordered in self.insertFields.
"""
if not self.survey_content:
return
post_dict = post_dict or {}
self.survey_fields = {}
schema = SurveyContentSchema(self.survey_content.schema)
attrs = {}
# figure out whether we want a read-only view
read_only = self.read_only
if not read_only:
survey_content = self.survey_content
survey_entity = self.survey_logic.getSurveyForContent(survey_content)
deadline = survey_entity.survey_end
read_only = deadline and (datetime.datetime.now() > deadline)
if read_only:
attrs['disabled'] = 'disabled'
# add unordered fields to self.survey_fields
for field in self.survey_content.dynamic_properties():
value = post_dict.get(field)
# skip comments, as they should go below their corresponding questions
if field.startswith(COMMENT_PREFIX):
continue
label = schema.getLabel(field)
if label is None:
# we log this error in getLabel
continue
# find correct field type
addField = self.fields_map[schema.getType(field)]
# check if question is required, it's never required when editing
required = schema.getRequired(field)
tip = schema.getTip(field)
kwargs = dict(label=label, req=required, tip=tip)
# copy attrs
extra_attrs = attrs.copy()
# add new field
addField(field, value, extra_attrs, schema, **kwargs)
# handle comments if question allows them
if schema.getHasComment(field):
comment = post_dict.get(COMMENT_PREFIX + field)
self.addCommentField(field, comment, extra_attrs, tip='Add a comment.')
return self.insertFields()
def insertFields(self):
"""Add ordered fields to self.fields.
"""
fields = SortedDict()
survey_order = self.survey_content.getSurveyOrder()
# first, insert dynamic survey fields
for position, property in sorted(survey_order.items()):
fields.insert(len(fields) + 1, property, self.survey_fields[property])
# add comment if field has one and this isn't an edit view
property = COMMENT_PREFIX + property
if property in self.survey_fields:
fields.insert(len(fields) + 1, property, self.survey_fields[property])
return fields
def addLongField(self, field, value, attrs, schema, req=True, label='',
tip='', comment=''):
"""Add a long answer fields to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
#fix growfield wrapping
attrs['wrap'] = 'hard'
widget = widgets.Textarea(attrs=attrs)
if not tip:
tip = 'Please provide a long answer to this question.'
question = CharField(help_text=tip, required=req, label=label,
widget=widget, initial=value)
self.survey_fields[field] = question
def addShortField(self, field, value, attrs, schema, req=False, label='',
tip='', comment=''):
"""Add a short answer fields to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
attrs['class'] = "text_question"
widget = widgets.TextInput(attrs=attrs)
if not tip:
tip = 'Please provide a short answer to this question.'
question = CharField(help_text=tip, required=req, label=label,
widget=widget, max_length=140, initial=value)
self.survey_fields[field] = question
def addSingleField(self, field, value, attrs, schema, req=False, label='',
tip='', comment=''):
"""Add a selection field to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
widget = PickOneSelect(attrs)
these_choices = []
# add all properties, but select chosen one
# TODO(ajaksu): this breaks ordering and blocks merging choice methods
options = getattr(self.survey_content, field)
if self.survey_record and hasattr(self.survey_record, field):
these_choices.append((value, value))
if value in options:
options.remove(value)
for option in options:
these_choices.append((option, option))
if not tip:
tip = 'Please select an answer this question.'
question = PickOneField(help_text=tip, required=req, label=label,
choices=tuple(these_choices), widget=widget)
self.survey_fields[field] = question
def addMultiField(self, field, value, attrs, schema, req=False, label='',
tip='', comment=''):
"""Add a pick_multi field to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
widget = PickManyCheckbox(attrs)
# TODO(ajaksu) need to allow checking checkboxes by default
if self.survey_record and isinstance(value, basestring):
# pass value as 'initial' so MultipleChoiceField renders checked boxes
value = value.split(',')
these_choices = [(v,v) for v in getattr(self.survey_content, field)]
if not tip:
tip = 'Please select one or more of these choices.'
question = PickManyField(help_text=tip, required=req, label=label,
choices=tuple(these_choices), widget=widget,
initial=value)
self.survey_fields[field] = question
def addQuantField(self, field, value, attrs, schema, req=False, label='',
tip='', comment=''):
"""Add a pick_quant field to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
widget = PickQuantRadio(attrs)
if self.survey_record:
value = value
else:
value = None
these_choices = [(v,v) for v in getattr(self.survey_content, field)]
if not tip:
tip = 'Please select one of these choices.'
question = PickQuantField(help_text=tip, required=req, label=label,
choices=tuple(these_choices), widget=widget,
initial=value)
self.survey_fields[field] = question
def addCommentField(self, field, comment, attrs, tip):
"""Add comment field to a question.
params:
field: the name of the field to add the comment field to
comment: the initial value of this field.
attrs: the attrs for the widget
tip: tooltip text for this field
"""
attrs['class'] = 'comment'
attrs['rows'] = '1'
widget = widgets.Textarea(attrs=attrs)
comment_field = CharField(help_text=tip, required=False,
label='Add a Comment (optional)', widget=widget, initial=comment)
self.survey_fields[COMMENT_PREFIX + field] = comment_field
class Meta(object):
model = SurveyContent
exclude = ['schema']
class SurveyEditForm(djangoforms.ModelForm):
"""SurveyContent form for editing a survey.
This class is used to produce survey forms for several circumstances:
- Admin creating survey from scratch
- Admin updating existing survey
Using dynamic properties of the survey model (if passed as an arg) the
survey form is dynamically formed.
"""
def __init__(self, *args, **kwargs):
"""Store special kwargs as attributes.
params:
survey_content: a SurveyContent entity.
survey_logic: an instance of SurveyLogic.
"""
self.kwargs = kwargs
self.survey_content = self.kwargs.pop('survey_content', None)
self.survey_logic = self.kwargs.pop('survey_logic', None)
super(SurveyEditForm, self).__init__(*args, **self.kwargs)
def getFields(self):
"""Build the SurveyContent (questions) form fields.
params:
post_dict: dict with POST data that will be used for validation
Populates self.survey_fields, which will be ordered in self.insert_fields.
Also populates self.data, which will be used in form validation.
"""
if not self.survey_content:
return
self.survey_fields = {}
schema = SurveyContentSchema(self.survey_content.schema)
extra_attrs = {}
# add unordered fields to self.survey_fields
for field in self.survey_content.dynamic_properties():
# use prompts set by survey creator
value = getattr(self.survey_content, field)
from_content = True
label = schema.getLabel(field)
if label is None:
continue
tip = schema.getTip(field)
kwargs = schema.getEditFieldArgs(field, value, tip, label)
kwargs['widget'] = schema.getEditWidget(field, extra_attrs, tip)
# add new field
self.survey_fields[field] = schema.getEditField(field)(**kwargs)
# TODO(ajaksu): find a new way to keep fields in order
return self.insertFields()
def insertFields(self):
"""Add ordered fields to self.fields.
"""
survey_order = self.survey_content.getSurveyOrder()
# insert dynamic survey fields
for position, property in survey_order.items():
self.fields.insert(position, property, self.survey_fields[property])
return self.fields
class Meta(object):
model = SurveyContent
exclude = ['schema']
class SurveyContentSchema(object):
"""Abstract question metadata handling.
"""
def __init__(self, schema):
"""Set the dictionary that this class encapsulates.
Args:
schema: schema as stored in SurveyConent entity
"""
self.schema = eval(schema)
def getType(self, field):
"""Fetch question type for field e.g. short_answer, pick_multi, etc.
Args:
field: name of the field to get the type for
"""
return self.schema[field]["type"]
def getTip(self, field):
"""Fetch question help text, used for tooltips.
Args:
field: name of the field to get the tooltip for
"""
return self.schema[field].get('tip', '')
def getRequired(self, field):
"""Check whether survey question is required.
Args:
field: name of the field to check the required property for
"""
return self.schema[field]["required"]
def getHasComment(self, field):
"""Check whether survey question allows adding a comment.
Args:
field: name of the field to get the hasComment property for
"""
return self.schema[field]["has_comment"]
def getRender(self, field):
"""Get rendering options for choice questions.
Args:
field: name of the field to get the rendering option for
"""
return self.schema[field]["render"]
def getEditField(self, field):
"""For a given question kind, get the correct edit view field.
"""
kind = self.getType(field)
if kind in CHOICE_TYPES:
Field = PickOneField
else:
Field = CharField
return Field
def getEditFieldArgs(self, field, value, tip, label):
"""Build edit view field arguments.
params:
field: field name
value: field value (text for text questions, list for choice questions)
tipe: help text, to be used in a tooltip
label: the field's question (or identifier if question is missing)
"""
kind = self.getType(field)
kwargs = dict(help_text=tip, required=False, label=label)
if kind in CHOICE_TYPES:
kwargs['choices'] = tuple([(val, val) for val in value])
else:
kwargs['initial'] = tip
return kwargs
def getEditWidget(self, field, attrs, tip):
"""Get survey editing widget for questions.
"""
kind = self.getType(field)
is_required = self.getRequired(field)
has_comment = self.getHasComment(field)
if kind in CHOICE_TYPES:
widget = UniversalChoiceEditor
render = self.getRender(field)
args = kind, render, is_required, has_comment, tip
else:
args = is_required, has_comment
if kind == 'long_answer':
widget = LongTextarea
elif kind == 'short_answer':
widget = ShortTextInput
attrs = attrs.copy()
attrs['class'] = kind
kwargs = dict(attrs=attrs)
return widget(*args, **kwargs)
def getLabel(self, field):
"""Fetch the free text 'question' or use field name as label.
"""
if field not in self.schema:
logging.error('field %s not found in schema %s' %
(field, str(self.schema)))
return
else:
label = self.schema[field].get('question') or field
return label
class UniversalChoiceEditor(widgets.Widget):
"""Edit interface for choice questions.
Allows adding and removing options, re-ordering and editing option text.
"""
def __init__(self, kind, render, is_required, has_comment, tip,
attrs=None, choices=()):
"""
params:
kind: question kind (one of selection, pick_multi or pick_quant)
render: question widget (single_select, multi_checkbox or quant_radio)
is_required: bool, controls selection in the required_for field
has_comments: bool, controls selection in the has_comments field
"""
self.attrs = attrs or {}
# Choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
self.kind = kind
self.render_as = render
self.is_required = is_required
self.has_comment = has_comment
self.tooltip_content = tip or ''
def render(self, name, value, attrs=None, choices=()):
"""Render UCE widget.
Option reordering, editing, addition and deletion are added here.
"""
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
# find out which options should be selected in type and render drop-downs.
selected = 'selected="selected"'
context = dict(
name=name,
is_selection=selected * (self.kind == 'selection'),
is_pick_multi=selected * (self.kind == 'pick_multi'),
is_pick_quant=selected * (self.kind == 'pick_quant'),
is_select=selected * (self.render_as == 'single_select'),
is_checkboxes=selected * (self.render_as == 'multi_checkbox'),
is_radio_buttons=selected * (self.render_as == 'quant_radio'),
)
# set required and has_comment selects
context.update(dict(
is_required = self.is_required,
has_comment = self.has_comment,
))
str_value = forms.util.smart_unicode(value) # normalize to string.
chained_choices = enumerate(chain(self.choices, choices))
choices = {}
for i, (option_value, option_label) in chained_choices:
option_value = escape(forms.util.smart_unicode(option_value))
choices[i] = option_value
context['choices'] = choices
tooltip_content = escape(forms.util.smart_unicode(self.tooltip_content))
context['tooltip_content'] = tooltip_content
template = 'soc/survey/universal_choice_editor.html'
return loader.render_to_string(template, context)
class PickOneField(forms.ChoiceField):
"""Stub for customizing the single choice field.
"""
#TODO(james): Ensure that more than one option cannot be selected
def __init__(self, *args, **kwargs):
super(PickOneField, self).__init__(*args, **kwargs)
class PickManyField(forms.MultipleChoiceField):
"""Stub for customizing the multiple choice field.
"""
def __init__(self, *args, **kwargs):
super(PickManyField, self).__init__(*args, **kwargs)
class PickQuantField(forms.ChoiceField):
"""Stub for customizing the choice field.
"""
def __init__(self, *args, **kwargs):
super(PickQuantField, self).__init__(*args, **kwargs)
class LongTextarea(widgets.Textarea):
"""Set whether long question is required or allows comments.
"""
def __init__(self, is_required, has_comment, attrs=None):
"""Initialize widget and store editing mode.
params:
is_required: bool, controls selection in the 'required' extra field
has_comments: bool, controls selection in the 'has_comment' extra field
"""
self.is_required = is_required
self.has_comment = has_comment
super(LongTextarea, self).__init__(attrs)
def render(self, name, value, attrs=None):
"""Render plain textarea or widget with extra fields.
Extra fields are 'required' and 'has_comment'.
"""
# plain text area
output = super(LongTextarea, self).render(name, value, attrs)
# add 'required' and 'has_comment' fields
context = dict(name=name, is_required=self.is_required,
has_comment=self.has_comment)
template = loader.get_template_from_string(REQUIRED_COMMENT_TPL)
rendered = template.render(context=loader.Context(dict_=context))
output = rendered + output
output = '<fieldset>' + output + '</fieldset>'
return output
class ShortTextInput(widgets.TextInput):
"""Set whether short answer question is required or allows comments.
"""
def __init__(self, is_required, has_comment, attrs=None):
"""Initialize widget and store editing mode.
params:
is_required: bool, controls selection in the 'required' extra field
has_comments: bool, controls selection in the 'has_comment' extra field
"""
self.is_required = is_required
self.has_comment = has_comment
super(ShortTextInput, self).__init__(attrs)
def render(self, name, value, attrs=None):
"""Render plain text input or widget with extra fields.
Extra fields are 'required' and 'has_comment'.
"""
# plain text area
output = super(ShortTextInput, self).render(name, value, attrs)
# add 'required' and 'has_comment' fields
context = dict(name=name, is_required=self.is_required,
has_comment=self.has_comment)
template = loader.get_template_from_string(REQUIRED_COMMENT_TPL)
rendered = template.render(context=loader.Context(dict_=context))
output = rendered + output
output = '<fieldset>' + output + '</fieldset>'
return output
class PickOneSelect(forms.Select):
"""Stub for customizing the single choice select widget.
"""
def __init__(self, *args, **kwargs):
super(PickOneSelect, self).__init__(*args, **kwargs)
class PickManyCheckbox(forms.CheckboxSelectMultiple):
"""Customized multiple choice checkbox widget.
"""
def __init__(self, *args, **kwargs):
super(PickManyCheckbox, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None, choices=()):
"""Render checkboxes as list items grouped in a fieldset.
This is the pick_multi widget for survey taking
"""
if value is None:
value = []
has_id = attrs and attrs.has_key('id')
final_attrs = self.build_attrs(attrs, name=name)
# normalize to strings.
str_values = set([forms.util.smart_unicode(v) for v in value])
is_checked = lambda value: value in str_values
smart_unicode = forms.util.smart_unicode
# set container fieldset and list
output = [u'<fieldset id="id_%s">\n <ul class="pick_multi">' % name]
# add numbered checkboxes wrapped in list items
chained_choices = enumerate(chain(self.choices, choices))
for i, (option_value, option_label) in chained_choices:
option_label = escape(smart_unicode(option_label))
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
cb = widgets.CheckboxInput(final_attrs, check_test=is_checked)
rendered_cb = cb.render(name, option_value)
cb_label = (rendered_cb, option_label)
output.append(u' <li><label>%s %s</label></li>' % cb_label)
output.append(u' </ul>\n</fieldset>')
return u'\n'.join(output)
def id_for_label(self, id_):
# see the comment for RadioSelect.id_for_label()
if id_:
id_ += '_fieldset'
return id_
id_for_label = classmethod(id_for_label)
class PickQuantRadioRenderer(widgets.RadioFieldRenderer):
"""Used by PickQuantRadio to enable customization of radio widgets.
"""
def __init__(self, *args, **kwargs):
super(PickQuantRadioRenderer, self).__init__(*args, **kwargs)
def render(self):
"""Outputs set of radio fields in a div.
"""
return mark_safe(u'<div class="quant_radio">\n%s\n</div>'
% u'\n'.join([u'%s' % force_unicode(w) for w in self]))
class PickQuantRadio(forms.RadioSelect):
"""TODO(James,Ajaksu) Fix Docstring
"""
renderer = PickQuantRadioRenderer
def __init__(self, *args, **kwargs):
super(PickQuantRadio, self).__init__(*args, **kwargs)
# in the future, we'll have more widget types here
WIDGETS = {'multi_checkbox': PickManyCheckbox,
'single_select': PickOneSelect,
'quant_radio': PickQuantRadio}
class HelperForm(object):
"""Thin wrapper for adding values to params['edit_form'].fields.
"""
def __init__(self, form=None):
"""Store the edit_form.
"""
self.form = form
def __call__(self, instance=None):
"""Transparently instantiate and add initial values to the edit_form.
"""
form = self.form(instance=instance)
form.fields['created_by'].initial = instance.author.name
form.fields['last_modified_by'].initial = instance.modified_by.name
form.fields['doc_key_name'].initial = instance.key().id_or_name()
return form
def getCSVHeader(survey_entity):
"""CSV header helper, needs support for comment lines in CSV.
Args:
survey_entity: Survey entity
"""
tpl = '# %s: %s\n'
# add static properties
fields = ['# Melange Survey export for \n# %s\n#\n' % survey_entity.title]
fields += [tpl % (k,v) for k,v in survey_entity.toDict().items()]
fields += [tpl % (f, str(getattr(survey_entity, f))) for f in PLAIN.split()]
fields += [tpl % (f, str(getattr(survey_entity, f).link_id))
for f in FIELDS.split()]
fields.sort()
# add dynamic properties
fields += ['#\n#---\n#\n']
dynamic = survey_entity.survey_content.dynamic_properties()
dynamic = [(prop, getattr(survey_entity.survey_content, prop))
for prop in dynamic]
fields += [tpl % (k,v) for k,v in sorted(dynamic)]
# add schema
fields += ['#\n#---\n#\n']
schema = survey_entity.survey_content.schema
indent = '},\n#' + ' ' * 9
fields += [tpl % ('Schema', schema.replace('},', indent)) + '#\n']
return ''.join(fields).replace('\n', '\r\n')
def getRecords(recs, props):
"""Fetch properties from SurveyRecords for CSV export.
"""
records = []
props = props[1:]
for rec in recs:
values = tuple(getattr(rec, prop, None) for prop in props)
leading = (rec.user.link_id,)
records.append(leading + values)
return records
def toCSV(survey_view):
"""CSV exporter.
Args:
survey_view: instance of the SurveyView
"""
def wrapper(survey):
"""Wrapper function.
"""
survey_logic = survey_view.getParams()['logic']
record_logic = survey_logic.getRecordLogic()
# get header and properties
header = getCSVHeader(survey)
leading = ['user', 'created', 'modified']
properties = leading + survey.survey_content.orderedProperties()
# retrieve the query of the data to export
fields = {'survey': survey}
record_query = record_logic.getQueryForFields(fields)
try:
first = record_query.run().next()
except StopIteration:
# bail out early if survey_records.run() is empty
return header, survey.link_id
# generate results list
recs = record_query.run()
recs = getRecords(recs, properties)
# write results to CSV
output = StringIO.StringIO()
writer = csv.writer(output)
writer.writerow(properties)
writer.writerows(recs)
return header + output.getvalue(), survey.link_id
return wrapper
Every radio button is now properly shown on a new line.
Update issue 655
Owner: ljvderijk
Fixes Issue 655.
--HG--
extra : rebase_source : d5338a5d0863ac4ec05e6ae8c6fb8e408c20dc8a
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom widgets used for Survey form fields, plus the SurveyContent form.
"""
__authors__ = [
'"Daniel Diniz" <ajaksu@gmail.com>',
'"James Levy" <jamesalexanderlevy@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from itertools import chain
import csv
import datetime
import logging
import StringIO
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
from django import forms
from django.forms import widgets
from django.forms.fields import CharField
from django.template import loader
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.utils.html import escape
from soc.logic import dicts
from soc.logic.lists import Lists
from soc.models.survey import COMMENT_PREFIX
from soc.models.survey import SurveyContent
CHOICE_TYPES = set(('selection', 'pick_multi', 'choice', 'pick_quant'))
# TODO(ajaksu) add this to template
REQUIRED_COMMENT_TPL = """
<label for="required_for_{{ name }}">Required</label>
<select id="required_for_{{ name }}" name="required_for_{{ name }}">
<option value="True" {% if is_required %} selected='selected' {% endif %}
>True</option>
<option value="False" {% if not is_required %} selected='selected'
{% endif %}>False</option>
</select>
<label for="comment_for_{{ name }}">Allow Comments</label>
<select id="comment_for_{{ name }}" name="comment_for_{{ name }}">
<option value="True" {% if has_comment %} selected='selected' {% endif %}
>True</option>
<option value="False" {% if not has_comment %} selected='selected'
{% endif %}>False</option>
</select>
"""
class SurveyTakeForm(djangoforms.ModelForm):
"""SurveyContent form for recording survey answers.
This class is used to produce survey forms for survey taking:
- User taking survey
- User updating already taken survey
Using dynamic properties of the survey model (if passed as an arg) the
survey form is dynamically formed.
"""
def __init__(self, *args, **kwargs):
"""Store special kwargs as attributes.
params:
survey_content: a SuveryContent entity.
survey_logic: instance of SurveyLogic.
survey_record: a SurveyRecord entity.
read_only: controls whether the survey taking UI allows data entry.
data: dictionary mapping fields to data for validation.
"""
self.kwargs = kwargs
self.survey_content = self.kwargs.pop('survey_content', None)
self.survey_logic = self.kwargs.pop('survey_logic', None)
self.survey_record = self.kwargs.pop('survey_record', None)
self.read_only = self.kwargs.pop('read_only', None)
self.fields_map = dict(
long_answer=self.addLongField,
short_answer=self.addShortField,
selection=self.addSingleField,
pick_multi=self.addMultiField,
pick_quant=self.addQuantField,
)
# get the POST data dict if present
data = self.kwargs.pop('data', None)
# set cleaner methods for fields, only needed if we have POST data
if data:
# prepare to render a bound, validating form
clean_data = self.setCleaners(data)
else:
clean_data = self.setCleaners()
# update with fields from subclasses
if hasattr(self, 'data') and self.data:
clean_data.update(self.data)
delattr(self, 'data')
# pass data, so form is bound
if data:
self.kwargs['data'] = clean_data
super(SurveyTakeForm, self).__init__(*args, **self.kwargs)
self.fields = self.getFields(clean_data)
def setCleaners(self, post_dict=None):
"""Set cleaner methods for dynamic fields.
Used for storing textual input as Text instead of StringProperty. If
passed a dict of field names/values (as the kwarg 'data' to __init__),
it's possible to set clean_[field_id] methods for validation.
This method populates the 'data' dict used for generating form fields.
Args:
post_dict: dictionary used to populate the fields
"""
# prefix for method names
clean = 'clean_'
# data is passed to super's __init__ as the 'data' kwarg
data = {}
# flag whether we can use getlist to retrieve multiple values
is_post = hasattr(post_dict, 'getlist')
schema = {}
if self.survey_content:
schema = eval(self.survey_content.schema)
for key, val in schema.items():
if val['type'] == 'long_answer':
# store > 500 chars per long answer
setattr(self, clean + key,
lambda key=key: db.Text(self.cleaned_data.get(key))
)
if val['has_comment']:
comment = COMMENT_PREFIX + key
#store > 500 chars per comment field
setattr(self, clean + comment,
lambda comment=comment: db.Text(self.cleaned_data.get(comment))
)
# put comment in self.data
if post_dict:
comment_val = post_dict.get(comment) or None
else:
comment_val = getattr(self.survey_record, comment, None)
data[comment] = comment_val
# put POST or record value for field in self.data
is_multi = val['type'] == 'pick_multi'
if post_dict:
if is_multi and is_post:
key_val = post_dict.getlist(key)
else:
key_val = post_dict.get(key)
else:
key_val = getattr(self.survey_record, key, None)
if is_multi and isinstance(key_val, basestring):
# TODO(ajaksu): find out if we still need this safety net
key_val = key_val.split(',')
elif not is_multi and isinstance(key_val, list):
# old pick_multi record for a question that is now single choice
key_val = key_val[0] if key_val else ''
data[key] = key_val
return data
def getFields(self, post_dict=None):
"""Build the SurveyContent (questions) form fields.
params:
post_dict: dict with POST data that will be used for validation
Populates self.survey_fields, which will be ordered in self.insertFields.
"""
if not self.survey_content:
return
post_dict = post_dict or {}
self.survey_fields = {}
schema = SurveyContentSchema(self.survey_content.schema)
attrs = {}
# figure out whether we want a read-only view
read_only = self.read_only
if not read_only:
survey_content = self.survey_content
survey_entity = self.survey_logic.getSurveyForContent(survey_content)
deadline = survey_entity.survey_end
read_only = deadline and (datetime.datetime.now() > deadline)
if read_only:
attrs['disabled'] = 'disabled'
# add unordered fields to self.survey_fields
for field in self.survey_content.dynamic_properties():
value = post_dict.get(field)
# skip comments, as they should go below their corresponding questions
if field.startswith(COMMENT_PREFIX):
continue
label = schema.getLabel(field)
if label is None:
# we log this error in getLabel
continue
# find correct field type
addField = self.fields_map[schema.getType(field)]
# check if question is required, it's never required when editing
required = schema.getRequired(field)
tip = schema.getTip(field)
kwargs = dict(label=label, req=required, tip=tip)
# copy attrs
extra_attrs = attrs.copy()
# add new field
addField(field, value, extra_attrs, schema, **kwargs)
# handle comments if question allows them
if schema.getHasComment(field):
comment = post_dict.get(COMMENT_PREFIX + field)
self.addCommentField(field, comment, extra_attrs, tip='Add a comment.')
return self.insertFields()
def insertFields(self):
"""Add ordered fields to self.fields.
"""
fields = SortedDict()
survey_order = self.survey_content.getSurveyOrder()
# first, insert dynamic survey fields
for position, property in sorted(survey_order.items()):
fields.insert(len(fields) + 1, property, self.survey_fields[property])
# add comment if field has one and this isn't an edit view
property = COMMENT_PREFIX + property
if property in self.survey_fields:
fields.insert(len(fields) + 1, property, self.survey_fields[property])
return fields
def addLongField(self, field, value, attrs, schema, req=True, label='',
tip='', comment=''):
"""Add a long answer fields to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
#fix growfield wrapping
attrs['wrap'] = 'hard'
widget = widgets.Textarea(attrs=attrs)
if not tip:
tip = 'Please provide a long answer to this question.'
question = CharField(help_text=tip, required=req, label=label,
widget=widget, initial=value)
self.survey_fields[field] = question
def addShortField(self, field, value, attrs, schema, req=False, label='',
tip='', comment=''):
"""Add a short answer fields to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
attrs['class'] = "text_question"
widget = widgets.TextInput(attrs=attrs)
if not tip:
tip = 'Please provide a short answer to this question.'
question = CharField(help_text=tip, required=req, label=label,
widget=widget, max_length=140, initial=value)
self.survey_fields[field] = question
def addSingleField(self, field, value, attrs, schema, req=False, label='',
tip='', comment=''):
"""Add a selection field to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
widget = PickOneSelect(attrs)
these_choices = []
# add all properties, but select chosen one
# TODO(ajaksu): this breaks ordering and blocks merging choice methods
options = getattr(self.survey_content, field)
if self.survey_record and hasattr(self.survey_record, field):
these_choices.append((value, value))
if value in options:
options.remove(value)
for option in options:
these_choices.append((option, option))
if not tip:
tip = 'Please select an answer this question.'
question = PickOneField(help_text=tip, required=req, label=label,
choices=tuple(these_choices), widget=widget)
self.survey_fields[field] = question
def addMultiField(self, field, value, attrs, schema, req=False, label='',
tip='', comment=''):
"""Add a pick_multi field to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
widget = PickManyCheckbox(attrs)
# TODO(ajaksu) need to allow checking checkboxes by default
if self.survey_record and isinstance(value, basestring):
# pass value as 'initial' so MultipleChoiceField renders checked boxes
value = value.split(',')
these_choices = [(v,v) for v in getattr(self.survey_content, field)]
if not tip:
tip = 'Please select one or more of these choices.'
question = PickManyField(help_text=tip, required=req, label=label,
choices=tuple(these_choices), widget=widget,
initial=value)
self.survey_fields[field] = question
def addQuantField(self, field, value, attrs, schema, req=False, label='',
tip='', comment=''):
"""Add a pick_quant field to this form.
params:
field: the current field
value: the initial value for this field
attrs: additional attributes for field
schema: schema for survey
req: required bool
label: label for field
tip: tooltip text for field
comment: initial comment value for field
"""
widget = PickQuantRadio(attrs)
if self.survey_record:
value = value
else:
value = None
these_choices = [(v,v) for v in getattr(self.survey_content, field)]
if not tip:
tip = 'Please select one of these choices.'
question = PickQuantField(help_text=tip, required=req, label=label,
choices=tuple(these_choices), widget=widget,
initial=value)
self.survey_fields[field] = question
def addCommentField(self, field, comment, attrs, tip):
"""Add comment field to a question.
params:
field: the name of the field to add the comment field to
comment: the initial value of this field.
attrs: the attrs for the widget
tip: tooltip text for this field
"""
attrs['class'] = 'comment'
attrs['rows'] = '1'
widget = widgets.Textarea(attrs=attrs)
comment_field = CharField(help_text=tip, required=False,
label='Add a Comment (optional)', widget=widget, initial=comment)
self.survey_fields[COMMENT_PREFIX + field] = comment_field
class Meta(object):
model = SurveyContent
exclude = ['schema']
class SurveyEditForm(djangoforms.ModelForm):
"""SurveyContent form for editing a survey.
This class is used to produce survey forms for several circumstances:
- Admin creating survey from scratch
- Admin updating existing survey
Using dynamic properties of the survey model (if passed as an arg) the
survey form is dynamically formed.
"""
def __init__(self, *args, **kwargs):
"""Store special kwargs as attributes.
params:
survey_content: a SurveyContent entity.
survey_logic: an instance of SurveyLogic.
"""
self.kwargs = kwargs
self.survey_content = self.kwargs.pop('survey_content', None)
self.survey_logic = self.kwargs.pop('survey_logic', None)
super(SurveyEditForm, self).__init__(*args, **self.kwargs)
def getFields(self):
"""Build the SurveyContent (questions) form fields.
params:
post_dict: dict with POST data that will be used for validation
Populates self.survey_fields, which will be ordered in self.insert_fields.
Also populates self.data, which will be used in form validation.
"""
if not self.survey_content:
return
self.survey_fields = {}
schema = SurveyContentSchema(self.survey_content.schema)
extra_attrs = {}
# add unordered fields to self.survey_fields
for field in self.survey_content.dynamic_properties():
# use prompts set by survey creator
value = getattr(self.survey_content, field)
from_content = True
label = schema.getLabel(field)
if label is None:
continue
tip = schema.getTip(field)
kwargs = schema.getEditFieldArgs(field, value, tip, label)
kwargs['widget'] = schema.getEditWidget(field, extra_attrs, tip)
# add new field
self.survey_fields[field] = schema.getEditField(field)(**kwargs)
# TODO(ajaksu): find a new way to keep fields in order
return self.insertFields()
def insertFields(self):
"""Add ordered fields to self.fields.
"""
survey_order = self.survey_content.getSurveyOrder()
# insert dynamic survey fields
for position, property in survey_order.items():
self.fields.insert(position, property, self.survey_fields[property])
return self.fields
class Meta(object):
model = SurveyContent
exclude = ['schema']
class SurveyContentSchema(object):
"""Abstract question metadata handling.
"""
def __init__(self, schema):
"""Set the dictionary that this class encapsulates.
Args:
schema: schema as stored in SurveyConent entity
"""
self.schema = eval(schema)
def getType(self, field):
"""Fetch question type for field e.g. short_answer, pick_multi, etc.
Args:
field: name of the field to get the type for
"""
return self.schema[field]["type"]
def getTip(self, field):
"""Fetch question help text, used for tooltips.
Args:
field: name of the field to get the tooltip for
"""
return self.schema[field].get('tip', '')
def getRequired(self, field):
"""Check whether survey question is required.
Args:
field: name of the field to check the required property for
"""
return self.schema[field]["required"]
def getHasComment(self, field):
"""Check whether survey question allows adding a comment.
Args:
field: name of the field to get the hasComment property for
"""
return self.schema[field]["has_comment"]
def getRender(self, field):
"""Get rendering options for choice questions.
Args:
field: name of the field to get the rendering option for
"""
return self.schema[field]["render"]
def getEditField(self, field):
"""For a given question kind, get the correct edit view field.
"""
kind = self.getType(field)
if kind in CHOICE_TYPES:
Field = PickOneField
else:
Field = CharField
return Field
def getEditFieldArgs(self, field, value, tip, label):
"""Build edit view field arguments.
params:
field: field name
value: field value (text for text questions, list for choice questions)
tipe: help text, to be used in a tooltip
label: the field's question (or identifier if question is missing)
"""
kind = self.getType(field)
kwargs = dict(help_text=tip, required=False, label=label)
if kind in CHOICE_TYPES:
kwargs['choices'] = tuple([(val, val) for val in value])
else:
kwargs['initial'] = tip
return kwargs
def getEditWidget(self, field, attrs, tip):
"""Get survey editing widget for questions.
"""
kind = self.getType(field)
is_required = self.getRequired(field)
has_comment = self.getHasComment(field)
if kind in CHOICE_TYPES:
widget = UniversalChoiceEditor
render = self.getRender(field)
args = kind, render, is_required, has_comment, tip
else:
args = is_required, has_comment
if kind == 'long_answer':
widget = LongTextarea
elif kind == 'short_answer':
widget = ShortTextInput
attrs = attrs.copy()
attrs['class'] = kind
kwargs = dict(attrs=attrs)
return widget(*args, **kwargs)
def getLabel(self, field):
"""Fetch the free text 'question' or use field name as label.
"""
if field not in self.schema:
logging.error('field %s not found in schema %s' %
(field, str(self.schema)))
return
else:
label = self.schema[field].get('question') or field
return label
class UniversalChoiceEditor(widgets.Widget):
"""Edit interface for choice questions.
Allows adding and removing options, re-ordering and editing option text.
"""
def __init__(self, kind, render, is_required, has_comment, tip,
attrs=None, choices=()):
"""
params:
kind: question kind (one of selection, pick_multi or pick_quant)
render: question widget (single_select, multi_checkbox or quant_radio)
is_required: bool, controls selection in the required_for field
has_comments: bool, controls selection in the has_comments field
"""
self.attrs = attrs or {}
# Choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
self.kind = kind
self.render_as = render
self.is_required = is_required
self.has_comment = has_comment
self.tooltip_content = tip or ''
def render(self, name, value, attrs=None, choices=()):
"""Render UCE widget.
Option reordering, editing, addition and deletion are added here.
"""
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
# find out which options should be selected in type and render drop-downs.
selected = 'selected="selected"'
context = dict(
name=name,
is_selection=selected * (self.kind == 'selection'),
is_pick_multi=selected * (self.kind == 'pick_multi'),
is_pick_quant=selected * (self.kind == 'pick_quant'),
is_select=selected * (self.render_as == 'single_select'),
is_checkboxes=selected * (self.render_as == 'multi_checkbox'),
is_radio_buttons=selected * (self.render_as == 'quant_radio'),
)
# set required and has_comment selects
context.update(dict(
is_required = self.is_required,
has_comment = self.has_comment,
))
str_value = forms.util.smart_unicode(value) # normalize to string.
chained_choices = enumerate(chain(self.choices, choices))
choices = {}
for i, (option_value, option_label) in chained_choices:
option_value = escape(forms.util.smart_unicode(option_value))
choices[i] = option_value
context['choices'] = choices
tooltip_content = escape(forms.util.smart_unicode(self.tooltip_content))
context['tooltip_content'] = tooltip_content
template = 'soc/survey/universal_choice_editor.html'
return loader.render_to_string(template, context)
class PickOneField(forms.ChoiceField):
"""Stub for customizing the single choice field.
"""
#TODO(james): Ensure that more than one option cannot be selected
def __init__(self, *args, **kwargs):
super(PickOneField, self).__init__(*args, **kwargs)
class PickManyField(forms.MultipleChoiceField):
"""Stub for customizing the multiple choice field.
"""
def __init__(self, *args, **kwargs):
super(PickManyField, self).__init__(*args, **kwargs)
class PickQuantField(forms.ChoiceField):
"""Stub for customizing the choice field.
"""
def __init__(self, *args, **kwargs):
super(PickQuantField, self).__init__(*args, **kwargs)
class LongTextarea(widgets.Textarea):
"""Set whether long question is required or allows comments.
"""
def __init__(self, is_required, has_comment, attrs=None):
"""Initialize widget and store editing mode.
params:
is_required: bool, controls selection in the 'required' extra field
has_comments: bool, controls selection in the 'has_comment' extra field
"""
self.is_required = is_required
self.has_comment = has_comment
super(LongTextarea, self).__init__(attrs)
def render(self, name, value, attrs=None):
"""Render plain textarea or widget with extra fields.
Extra fields are 'required' and 'has_comment'.
"""
# plain text area
output = super(LongTextarea, self).render(name, value, attrs)
# add 'required' and 'has_comment' fields
context = dict(name=name, is_required=self.is_required,
has_comment=self.has_comment)
template = loader.get_template_from_string(REQUIRED_COMMENT_TPL)
rendered = template.render(context=loader.Context(dict_=context))
output = rendered + output
output = '<fieldset>' + output + '</fieldset>'
return output
class ShortTextInput(widgets.TextInput):
"""Set whether short answer question is required or allows comments.
"""
def __init__(self, is_required, has_comment, attrs=None):
"""Initialize widget and store editing mode.
params:
is_required: bool, controls selection in the 'required' extra field
has_comments: bool, controls selection in the 'has_comment' extra field
"""
self.is_required = is_required
self.has_comment = has_comment
super(ShortTextInput, self).__init__(attrs)
def render(self, name, value, attrs=None):
"""Render plain text input or widget with extra fields.
Extra fields are 'required' and 'has_comment'.
"""
# plain text area
output = super(ShortTextInput, self).render(name, value, attrs)
# add 'required' and 'has_comment' fields
context = dict(name=name, is_required=self.is_required,
has_comment=self.has_comment)
template = loader.get_template_from_string(REQUIRED_COMMENT_TPL)
rendered = template.render(context=loader.Context(dict_=context))
output = rendered + output
output = '<fieldset>' + output + '</fieldset>'
return output
class PickOneSelect(forms.Select):
"""Stub for customizing the single choice select widget.
"""
def __init__(self, *args, **kwargs):
super(PickOneSelect, self).__init__(*args, **kwargs)
class PickManyCheckbox(forms.CheckboxSelectMultiple):
"""Customized multiple choice checkbox widget.
"""
def __init__(self, *args, **kwargs):
super(PickManyCheckbox, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None, choices=()):
"""Render checkboxes as list items grouped in a fieldset.
This is the pick_multi widget for survey taking
"""
if value is None:
value = []
has_id = attrs and attrs.has_key('id')
final_attrs = self.build_attrs(attrs, name=name)
# normalize to strings.
str_values = set([forms.util.smart_unicode(v) for v in value])
is_checked = lambda value: value in str_values
smart_unicode = forms.util.smart_unicode
# set container fieldset and list
output = [u'<fieldset id="id_%s">\n <ul class="pick_multi">' % name]
# add numbered checkboxes wrapped in list items
chained_choices = enumerate(chain(self.choices, choices))
for i, (option_value, option_label) in chained_choices:
option_label = escape(smart_unicode(option_label))
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
cb = widgets.CheckboxInput(final_attrs, check_test=is_checked)
rendered_cb = cb.render(name, option_value)
cb_label = (rendered_cb, option_label)
output.append(u' <li><label>%s %s</label></li>' % cb_label)
output.append(u' </ul>\n</fieldset>')
return u'\n'.join(output)
def id_for_label(self, id_):
# see the comment for RadioSelect.id_for_label()
if id_:
id_ += '_fieldset'
return id_
id_for_label = classmethod(id_for_label)
class PickQuantRadioRenderer(widgets.RadioFieldRenderer):
"""Used by PickQuantRadio to enable customization of radio widgets.
"""
def __init__(self, *args, **kwargs):
super(PickQuantRadioRenderer, self).__init__(*args, **kwargs)
def render(self):
"""Outputs set of radio fields in a div.
"""
from django.utils.html import linebreaks
return linebreaks(u'<div class="quant_radio">%s</div>'
% u'\n'.join([u'%s' % force_unicode(w) for w in self]))
class PickQuantRadio(forms.RadioSelect):
"""TODO(James,Ajaksu) Fix Docstring
"""
renderer = PickQuantRadioRenderer
def __init__(self, *args, **kwargs):
super(PickQuantRadio, self).__init__(*args, **kwargs)
# in the future, we'll have more widget types here
WIDGETS = {'multi_checkbox': PickManyCheckbox,
'single_select': PickOneSelect,
'quant_radio': PickQuantRadio}
class HelperForm(object):
"""Thin wrapper for adding values to params['edit_form'].fields.
"""
def __init__(self, form=None):
"""Store the edit_form.
"""
self.form = form
def __call__(self, instance=None):
"""Transparently instantiate and add initial values to the edit_form.
"""
form = self.form(instance=instance)
form.fields['created_by'].initial = instance.author.name
form.fields['last_modified_by'].initial = instance.modified_by.name
form.fields['doc_key_name'].initial = instance.key().id_or_name()
return form
def getCSVHeader(survey_entity):
"""CSV header helper, needs support for comment lines in CSV.
Args:
survey_entity: Survey entity
"""
tpl = '# %s: %s\n'
# add static properties
fields = ['# Melange Survey export for \n# %s\n#\n' % survey_entity.title]
fields += [tpl % (k,v) for k,v in survey_entity.toDict().items()]
fields += [tpl % (f, str(getattr(survey_entity, f))) for f in PLAIN.split()]
fields += [tpl % (f, str(getattr(survey_entity, f).link_id))
for f in FIELDS.split()]
fields.sort()
# add dynamic properties
fields += ['#\n#---\n#\n']
dynamic = survey_entity.survey_content.dynamic_properties()
dynamic = [(prop, getattr(survey_entity.survey_content, prop))
for prop in dynamic]
fields += [tpl % (k,v) for k,v in sorted(dynamic)]
# add schema
fields += ['#\n#---\n#\n']
schema = survey_entity.survey_content.schema
indent = '},\n#' + ' ' * 9
fields += [tpl % ('Schema', schema.replace('},', indent)) + '#\n']
return ''.join(fields).replace('\n', '\r\n')
def getRecords(recs, props):
"""Fetch properties from SurveyRecords for CSV export.
"""
records = []
props = props[1:]
for rec in recs:
values = tuple(getattr(rec, prop, None) for prop in props)
leading = (rec.user.link_id,)
records.append(leading + values)
return records
def toCSV(survey_view):
"""CSV exporter.
Args:
survey_view: instance of the SurveyView
"""
def wrapper(survey):
"""Wrapper function.
"""
survey_logic = survey_view.getParams()['logic']
record_logic = survey_logic.getRecordLogic()
# get header and properties
header = getCSVHeader(survey)
leading = ['user', 'created', 'modified']
properties = leading + survey.survey_content.orderedProperties()
# retrieve the query of the data to export
fields = {'survey': survey}
record_query = record_logic.getQueryForFields(fields)
try:
first = record_query.run().next()
except StopIteration:
# bail out early if survey_records.run() is empty
return header, survey.link_id
# generate results list
recs = record_query.run()
recs = getRecords(recs, properties)
# write results to CSV
output = StringIO.StringIO()
writer = csv.writer(output)
writer.writerow(properties)
writer.writerows(recs)
return header + output.getvalue(), survey.link_id
return wrapper
|
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon13.py measures the network traffic.
# These are all counters, therefore no averaging is needed.
import os, sys, time, math, commands
from libdaemon import Daemon
class MyDaemon(Daemon):
def run(self):
sampleptr = 0
samples = 1
datapoints = 6
sampleTime = 60
cycleTime = samples * sampleTime
# sync to whole minute
waitTime = (cycleTime + sampleTime) - (time.time() % cycleTime)
time.sleep(waitTime)
while True:
startTime = time.time()
result = do_work().split(',')
data = map(int, result)
sampleptr = sampleptr + 1
if (sampleptr == samples):
do_report(data)
sampleptr = 0
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
time.sleep(waitTime)
def do_work():
# 6 datapoints gathered here
# Network traffic
wlIn = 0
wlOut = 0
etIn = 0
etOut = 0
loIn = 0
loOut = 0
list = commands.getoutput("cat /proc/net/dev").splitlines()
for line in range(2,len(list)):
device = list[line].split()[0]
if device == "lo:":
loIn = list[line].split()[1]
loOut = list[line].split()[9]
if device == "eth0:":
etIn = list[line].split()[1]
etOut = list[line].split()[9]
if device == "wlan0:":
wlIn = list[line].split()[1]
wlOut = list[line].split()[9]
if device == "wlan1:":
wlIn = list[line].split()[1]
wlOut = list[line].split()[9]
return '{0}, {1}, {2}, {3}, {4}, {5}'.format(loIn, loOut, etIn, etOut, wlIn, wlOut)
def do_report(result):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
result = ', '.join(map(str, result))
flock = '/tmp/raspdiagd-13.lock'
lock(flock)
f = file('/tmp/13-nettraffic.csv', 'a')
f.write('{0}, {1}\n'.format(outDate, result) )
f.close()
unlock(flock)
return
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/raspdiagd-13.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
first number on each line may touch the device-id
e.g. `eth0:4509833 …` will not be split properly, resulting in invalid
data (usually zero)
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon13.py measures the network traffic.
# These are all counters, therefore no averaging is needed.
import os, sys, time, math, commands
from libdaemon import Daemon
class MyDaemon(Daemon):
def run(self):
sampleptr = 0
samples = 1
datapoints = 6
sampleTime = 60
cycleTime = samples * sampleTime
# sync to whole minute
waitTime = (cycleTime + sampleTime) - (time.time() % cycleTime)
time.sleep(waitTime)
while True:
startTime = time.time()
result = do_work().split(',')
data = map(int, result)
sampleptr = sampleptr + 1
if (sampleptr == samples):
do_report(data)
sampleptr = 0
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
time.sleep(waitTime)
def do_work():
# 6 datapoints gathered here
# Network traffic
wlIn = 0
wlOut = 0
etIn = 0
etOut = 0
loIn = 0
loOut = 0
list = commands.getoutput("cat /proc/net/dev").replace(":"," ").splitlines()
for line in range(2,len(list)):
device = list[line].split()[0]
if device == "lo":
loIn = list[line].split()[1]
loOut = list[line].split()[9]
if device == "eth0":
etIn = list[line].split()[1]
etOut = list[line].split()[9]
if device == "wlan0":
wlIn = list[line].split()[1]
wlOut = list[line].split()[9]
if device == "wlan1":
wlIn += list[line].split()[1]
wlOut += list[line].split()[9]
return '{0}, {1}, {2}, {3}, {4}, {5}'.format(loIn, loOut, etIn, etOut, wlIn, wlOut)
def do_report(result):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
result = ', '.join(map(str, result))
flock = '/tmp/raspdiagd-13.lock'
lock(flock)
f = file('/tmp/13-nettraffic.csv', 'a')
f.write('{0}, {1}\n'.format(outDate, result) )
f.close()
unlock(flock)
return
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/raspdiagd-13.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
|
##############################################################################
#
# Copyright (C) 2015 Comunitea Servicios Tecnológicos All Rights Reserved
# $Omar Castiñeira Saavedra <omar@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, exceptions
from datetime import datetime
from odoo.exceptions import except_orm
from odoo.tools.translate import translate, _
class CrmClaimRma(models.Model):
_inherit = 'crm.claim'
_order = 'id desc'
@api.multi
def _has_category(self, expected_category):
has_category = False
for category in self.category_id:
if category.parent_id.id == expected_category.id:
has_category = True
self.bool_category_id = has_category
@api.multi
def _check_category_id(self):
expected_category = self.env['res.partner.category'].search([('name', '=', 'Certificado')])
for claim in self:
claim._has_category(expected_category)
name = fields.Selection([('return', 'Return'),
('rma', 'RMA')], 'Claim Subject',
required=True, default='rma')
priority = fields.Selection(default='1', required=True, selection=[('1', 'No priority'),
('2', 'High'),
('3', 'Critical')])
comercial = fields.Many2one("res.users", string="Comercial")
country = fields.Many2one("res.country", string="Country")
date = fields.Date('Claim Date', index=False,
default=fields.Date.context_today)
write_date = fields.Datetime("Update date", readonly=True)
date_received = fields.Date('Received Date')
category_id = fields.Many2many(related='partner_id.category_id', readonly=True)
bool_category_id = fields.Boolean(string="Category", compute=_check_category_id)
aditional_notes = fields.Text("Aditional Notes")
claim_inv_line_ids = fields.One2many("claim.invoice.line", "claim_id")
allow_confirm_blocked = fields.Boolean('Allow confirm', copy=False)
transport_incidence = fields.Boolean('Transport incidence')
t_incidence_picking = fields.Char('Trp. inc. picking')
warehouse_location = fields.Selection([('madrid1', 'Madrid - Vicálvaro'),
('italia', 'Italia - Arcore'),
('transit', 'In transit')], "Warehouse Location")
client_ref = fields.Char('Client Ref')
warehouse_date = fields.Date('Final Received Date')
deposit_id = fields.Many2many('stock.picking', string='Deposit')
check_states = ['substate_received', 'substate_process', 'substate_due_receive']
@api.multi
def write(self, vals):
if 'stage_id' in vals:
stage_ids = []
stage_repaired_id = self.env.ref('crm_claim.stage_claim2').id
stage_ids.append(stage_repaired_id)
stage_pending_shipping_id = self.env.ref('crm_claim_rma_custom.stage_claim6').id
stage_ids.append(stage_pending_shipping_id)
stage_received_id = self.env['crm.claim.stage'].search([('name', '=', 'Recibido')]).id
if vals['stage_id'] == stage_received_id and \
not (self.warehouse_location or vals.get('warehouse_location', False)):
raise exceptions.UserError(_('Please, select the warehouse location of the RMA'))
if vals['stage_id'] in stage_ids:
for line in self.claim_line_ids:
line_state = self.env['ir.model.data'].search([('model', '=', 'substate.substate'),
('module', '=', 'crm_claim_rma_custom'),
('res_id', '=', line.substate_id.id)])
if vals['stage_id'] == stage_repaired_id:
if line_state.name in self.check_states:
raise except_orm(_('Warning!'),
_("One or more products aren't review yet!"))
else:
if line_state.name != 'substate_pending_shipping':
raise except_orm(_('Warning!'),
_("One or more products aren't pending shipping yet!"))
if vals['stage_id'] == stage_received_id and self.partner_id.email3:
email_body = self.with_context(lang=self.partner_id.commercial_partner_id.lang)._("<p>Dear Customer,</p> " \
"<p>We inform you that we have received the products corresponding to %s.</p>" \
"<p>We will start the procedure as soon as possible.</p> " \
"<p>Sincerely,</p>" \
"<p>VISIOTECH</p>") % self.number
picking_template = self.env.ref('crm_claim_rma_custom.rma_received_template')
picking_template.with_context(lang=self.partner_id.commercial_partner_id.lang,
email_rma_body=email_body).send_mail(self.id)
return super(CrmClaimRma, self).write(vals)
def _(self, src):
return _(src)
@api.model
def create(self, vals):
if vals.get('name', False):
vals['name'] = vals['name'].split(' ')[0]
return super(CrmClaimRma, self).create(vals)
@api.model
def _get_sequence_number(self):
seq_obj = self.env['ir.sequence']
supplier_type = self.env.ref('crm_claim_type.crm_claim_type_supplier').id
if 'claim_type' in self.env.context and self.env.context['claim_type'] == supplier_type:
res = seq_obj.get('crm.claim.rma.supplier') or '/'
else:
res = seq_obj.get('crm.claim.rma') or '/'
return res
@api.multi
def calculate_invoices(self):
"""
Calculate invoices using data "Product Return of SAT"
"""
for claim_obj in self:
claim_inv_line_obj = self.env['claim.invoice.line']
for invoice_line in claim_obj.claim_inv_line_ids:
if not invoice_line.invoiced:
invoice_line.unlink()
invoce_product_ids = {}
for c_line in claim_obj.claim_line_ids:
if c_line.invoice_id.id not in invoce_product_ids:
for line in c_line.invoice_id.invoice_line_ids:
found = False
if line.invoice_id.id in invoce_product_ids:
for k,v in invoce_product_ids.items():
if line.product_id.id in v.keys() and line.invoice_id.id == k:
invoce_product_ids[line.invoice_id.id][line.product_id.id] += line.quantity
found = True
break
if not found:
invoce_product_ids[line.invoice_id.id][line.product_id.id] = line.quantity
else:
invoce_product_ids[line.invoice_id.id] = {line.product_id.id: line.quantity}
message = ""
for claim_line in claim_obj.claim_line_ids:
vals = {}
taxes_ids = []
if claim_line.invoice_id:
claim_inv_lines = claim_inv_line_obj.search([('claim_line_id', '=', claim_line.id)])
if claim_inv_lines:
continue
for inv_line in claim_line.invoice_id.invoice_line_ids:
if inv_line.product_id == claim_line.product_id:
if inv_line.invoice_line_tax_ids:
taxes_ids = \
[x.id for x in inv_line.invoice_line_tax_ids]
vals = {
'invoice_id': inv_line.invoice_id.id,
'claim_id': claim_line.claim_id.id,
'claim_number': claim_line.claim_id.number,
'claim_line_id': claim_line.id,
'product_id': inv_line.product_id.id,
'product_description': inv_line.product_id.name,
'discount': inv_line.discount,
'qty': claim_line.product_returned_quantity,
'price_unit': inv_line.price_unit,
'cost_unit': inv_line.product_id.standard_price,
'tax_ids': [(6, 0, taxes_ids)]
}
if invoce_product_ids[inv_line.invoice_id.id][inv_line.product_id.id] < inv_line.claim_invoice_line_qty + claim_line.product_returned_quantity:
units_available = invoce_product_ids[inv_line.invoice_id.id][inv_line.product_id.id] - inv_line.claim_invoice_line_qty
if units_available > 0:
message += _("There are not enough units of this product (%s) in this invoice (%s). Only %i unit(s) left available \n") % \
(inv_line.product_id.default_code, inv_line.invoice_id.number,int(units_available))
else:
message += _("All units of this product (%s) included in the indicated invoice (%s) have already been paid \n") % (
inv_line.product_id.default_code, inv_line.invoice_id.number)
break
if not vals:
raise exceptions.Warning(
_("There is at least one line of the claim with \
an incorrect invoice"))
if vals:
claim_inv_line_obj.create(vals)
if message:
raise exceptions.Warning(message)
@api.onchange('partner_id')
def onchange_partner_id(self):
result = super().onchange_partner_id()
if self.partner_id:
self.delivery_address_id = self.partner_id
self.team_id = self.partner_id.team_id # Get team_id from res.partner
self.country = self.partner_id.country_id # Get country_id from res.partner
if self.partner_id.user_id:
self.comercial = self.partner_id.user_id.id
if self.partner_id.rma_warn_msg:
self.description = self.partner_id.rma_warn_msg
return result
@api.onchange('name')
def onchange_name(self):
if self.name == 'return':
self.invoice_type = 'refund'
elif self.name == 'rma':
self.invoice_type = 'invoice'
@api.multi
def make_refund_invoice(self):
for claim_obj in self:
invoice = False
invoice_name = set()
for line in sorted(claim_obj.claim_inv_line_ids, key=lambda d: d.sequence):
if not line.invoiced:
if line.invoice_id.name:
invoice_name.add(line.invoice_id.name)
invoice = True
if not invoice:
raise exceptions.Warning(_("Any line to invoice"))
description = ' '.join(invoice_name)
# TODO-> Revisar: antes sale_refund
domain_journal = [('type', '=', 'sale')]
acc_journal_obj = self.env['account.journal']
acc_journal_ids = acc_journal_obj.search(domain_journal)
reference = claim_obj.client_ref or description
header_vals = {
'partner_id': claim_obj.partner_id.id,
'fiscal_position_id':
claim_obj.partner_id.property_account_position_id.id,
'date_invoice': datetime.now().strftime('%Y-%m-%d'),
'journal_id': acc_journal_ids[0].id,
'account_id':
claim_obj.partner_id.property_account_receivable_id.id,
'currency_id':
claim_obj.partner_id.property_product_pricelist.currency_id.id,
'company_id': claim_obj.company_id.id,
'user_id': self.env.user.id,
'team_id': claim_obj.partner_id.team_id.id,
'claim_id': claim_obj.id,
'type': 'out_refund',
'payment_term_id': False,
# Pago inmediato en rectificativas claim_obj.partner_id.property_payment_term_id.id,
'payment_mode_id':
claim_obj.partner_id.customer_payment_mode_id.id,
'mandate_id': claim_obj.partner_id.valid_mandate_id.id,
'name': reference,
'partner_shipping_id': claim_obj.delivery_address_id.id
}
if claim_obj.picking_ids:
header_vals['picking_ids'] = [(6, 0, [claim_obj.picking_ids[-1].id])]
inv_obj = self.env['account.invoice']
invoice_id = inv_obj.create(header_vals)
fp_obj = self.env['account.fiscal.position']
for line in sorted(claim_obj.claim_inv_line_ids, key=lambda d: d.sequence):
if line.invoiced:
continue
if line.product_id:
account_id = line.product_id.property_account_income_id.id
if not account_id:
account_id = \
line.product_id.categ_id. \
property_account_income_categ_id.id
else:
account_id = line.product_id. \
property_account_expense_id.id
if not account_id:
account_id = \
line.product_id.categ_id. \
property_account_expense_categ_id.id
else:
prop = self.env['ir.property'].get('property_account_income_categ_id', 'product.category')
account_id = prop and prop.id or False
account_id = fp_obj.map_account(account_id)
vals = {
'invoice_id': invoice_id.id,
'name': line.product_description,
'product_id': line.product_id.id,
'account_id': account_id,
'quantity': line.qty,
'claim_line_id': line.claim_line_id.id,
'price_unit': line.price_unit,
'cost_unit': line.cost_unit,
'uos_id': line.product_id.uom_id.id,
'discount': line.discount,
'account_analytic_id': False
}
if line.tax_ids:
taxes_ids = fp_obj.map_tax(line.tax_ids)
vals['invoice_line_tax_ids'] = [(6, 0, taxes_ids.ids)]
line_obj = self.env['account.invoice.line']
line_obj.create(vals)
line.invoiced = True
invoice_id.compute_taxes()
invoice_id.action_invoice_open()
data_pool = self.env['ir.model.data']
action_id = data_pool.xmlid_to_res_id('crm_claim_rma.act_crm_claim_rma_refunds_out')
if action_id:
action = self.env.ref('crm_claim_rma.act_crm_claim_rma_refunds_out').read()[0]
action['domain'] = "[('id','in', [" + str(invoice_id.id) + "])]"
return action
@api.multi
def resequence(self):
for claim in self:
seq = 1
for line in claim.claim_line_ids:
line.sequence = seq
seq += 1
@api.multi
def check_discounts(self):
discount_product_list = []
has_discount = False
for claim_obj in self:
for line in claim_obj.claim_inv_line_ids:
for i_line_id in line.invoice_id.invoice_line_ids:
if i_line_id.product_id.name == 'Discount line' and not line.invoice_id.number in discount_product_list:
has_discount = True
discount_product_list.append(line.invoice_id.number)
if has_discount:
return self.env['invoice.discount.wiz'].create({
'origin_reference': '%s,%s' % ('crm.claim', self.id),
'continue_method': 'make_refund_invoice',
'message': _("This orders have discounts. Do you want to proceed anyways?: %s") % ', '.join(discount_product_list)
}).action_show()
else:
self.make_refund_invoice()
class ClaimInvoiceLine(models.Model):
_name = 'claim.invoice.line'
_rec_name = 'product_description'
_order = 'sequence,id'
sequence = fields.Integer()
claim_id = fields.Many2one('crm.claim', 'Claim')
claim_number = fields.Char("Claim Number")
claim_line_id = fields.Many2one('claim.line', 'Claim lne')
product_id = fields.Many2one("product.product", "Product Code")
product_description = fields.Char("Product Description", required=True)
invoice_id = fields.Many2one("account.invoice", "Invoice")
price_unit = fields.Float("Price Unit")
cost_unit = fields.Float("Cost Unit")
price_subtotal = fields.Float("Price Subtotal", compute="_get_subtotal",
readonly=True)
tax_ids = fields.Many2many("account.tax", "claim_line_tax",
"claimline_id", "tax_id", string="Taxes")
discount = fields.Float("Discount")
qty = fields.Float("Quantity", default="1")
invoiced = fields.Boolean("Invoiced")
@api.multi
def _get_subtotal(self):
for claim_line in self:
claim_line.price_subtotal = claim_line.qty * claim_line.price_unit * ((100.0 - claim_line.discount) / 100.0)
@api.onchange("product_id", "invoice_id")
def onchange_product_id(self):
if self.claim_id.partner_id:
if self.product_id:
taxes_ids = []
if self.invoice_id:
# res['value'] = {'invoice_id': self.invoice_id.id}
any_line = False
for line in self.invoice_id.invoice_line_ids:
if not self.product_id == line.product_id:
any_line = False
else:
any_line = True
price = line.price_unit
taxes_ids = line.invoice_line_tax_ids
break
if not any_line:
raise exceptions.Warning(_('Selected product is not \
in the invoice'))
else:
pricelist_obj = \
self.claim_id.partner_id.property_product_pricelist
price = pricelist_obj.price_get(self.product_id.id, 1.0)
if price:
price = price[pricelist_obj.id]
self.product_description = self.product_id.name
self.qty = 1.0
self.price_unit = price
self.price_subtotal = price
self.discount = 0.0
if taxes_ids:
self.tax_ids = taxes_ids
else:
fpos = self.claim_id.partner_id.property_account_position_id
self.tax_ids = fpos.map_tax(self.product_id.product_tmpl_id.taxes_id)
else:
self.price_subtotal = self.discount and \
self.qty * self.price_unit - (self.discount *
self.price_unit / 100) or \
self.qty * self.price_unit
else:
raise exceptions.Warning(_('Partner not selected'))
@api.onchange("qty", "price_unit", "discount")
def onchange_values(self):
if self.product_id and self.invoice_id:
for line in self.invoice_id.invoice_line_ids:
if line.product_id == self.product_id:
if line.quantity < self.qty:
raise exceptions.Warning(_('Quantity cannot be bigger than the quantity specified on invoice'))
if line.quantity < line.with_context({'not_id': self._origin.id}).claim_invoice_line_qty + self.qty:
units_available = line.quantity - line.with_context({'not_id': self._origin.id}).claim_invoice_line_qty
if units_available > 0:
raise exceptions.Warning(_("There are not enough units of this product (%s) in this invoice (%s). Only %i unit(s) left available \n") %
(line.product_id.default_code, line.invoice_id.number, int(units_available)))
raise exceptions.Warning(
_("All units of this product (%s) included in the indicated invoice (%s) have already been paid \n") % (
line.product_id.default_code, line.invoice_id.number))
price_subtotal = self.qty * self.price_unit * ((100.0 - self.discount) / 100.0)
self.price_subtotal = price_subtotal
@api.multi
def unlink(self):
for line in self:
if line.invoiced:
raise exceptions.Warning(_("Cannot delete an invoiced line"))
return super(ClaimInvoiceLine, self).unlink()
class CrmClaimLine(models.Model):
_inherit = 'claim.line'
comercial = fields.Many2one("res.users", String="Comercial", related="claim_id.comercial")
date_received = fields.Date(related="claim_id.date_received")
name = fields.Char(required=False)
invoice_id = fields.Many2one("account.invoice", string="Invoice")
substate_id = fields. \
Many2one(default=lambda self: self.env.ref('crm_claim_rma_custom.substate_due_receive').id)
claim_name = fields.Selection(related='claim_id.name', readonly=True)
sequence = fields.Integer()
res = {}
@api.model
def create(self, vals):
sec_list = self.env['crm.claim'].browse(vals['claim_id']).claim_line_ids.mapped('sequence')
if sec_list:
vals['sequence'] = max(sec_list) + 1
else:
vals['sequence'] = 0
if 'substate_id' not in vals.keys():
vals['substate_id'] = self.env.ref(
'crm_claim_rma_custom.substate_due_receive').id
return super(CrmClaimLine, self).create(vals)
@api.multi
def write(self, vals):
if 'repair_id' in vals.keys():
vals['substate_id'] = self.env.ref(
'crm_claim_rma_custom.substate_repaired').id
if 'refund_line_id' in vals.keys():
vals['substate_id'] = self.env.ref(
'crm_claim_rma_custom.substate_refund').id
if 'equivalent_product_id' in vals.keys():
vals['substate_id'] = self.env.ref(
'crm_claim_rma_custom.substate_replaced').id
return super(CrmClaimLine, self).write(vals)
@api.multi
def action_split(self):
for line in self:
if line.product_returned_quantity > 1:
for x in range(1, int(line.product_returned_quantity)):
line.copy(default={'product_returned_quantity': 1.0})
line.product_returned_quantity = 1
return {'type': 'ir.actions.client', 'tag': 'reload'}
@api.multi
def create_repair(self):
self.ensure_one()
wzd = self.env['claim.make.repair'].create({'line_id': self.id})
res = wzd.make()
return res
@api.multi
def unlink(self):
claims = self.mapped('claim_id')
super().unlink()
if claims:
claims.resequence()
return True
[FIX] crm_claim_rma_custom: uom_id arreglado en rectificativa de rma
##############################################################################
#
# Copyright (C) 2015 Comunitea Servicios Tecnológicos All Rights Reserved
# $Omar Castiñeira Saavedra <omar@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, exceptions
from datetime import datetime
from odoo.exceptions import except_orm
from odoo.tools.translate import translate, _
class CrmClaimRma(models.Model):
_inherit = 'crm.claim'
_order = 'id desc'
@api.multi
def _has_category(self, expected_category):
has_category = False
for category in self.category_id:
if category.parent_id.id == expected_category.id:
has_category = True
self.bool_category_id = has_category
@api.multi
def _check_category_id(self):
expected_category = self.env['res.partner.category'].search([('name', '=', 'Certificado')])
for claim in self:
claim._has_category(expected_category)
name = fields.Selection([('return', 'Return'),
('rma', 'RMA')], 'Claim Subject',
required=True, default='rma')
priority = fields.Selection(default='1', required=True, selection=[('1', 'No priority'),
('2', 'High'),
('3', 'Critical')])
comercial = fields.Many2one("res.users", string="Comercial")
country = fields.Many2one("res.country", string="Country")
date = fields.Date('Claim Date', index=False,
default=fields.Date.context_today)
write_date = fields.Datetime("Update date", readonly=True)
date_received = fields.Date('Received Date')
category_id = fields.Many2many(related='partner_id.category_id', readonly=True)
bool_category_id = fields.Boolean(string="Category", compute=_check_category_id)
aditional_notes = fields.Text("Aditional Notes")
claim_inv_line_ids = fields.One2many("claim.invoice.line", "claim_id")
allow_confirm_blocked = fields.Boolean('Allow confirm', copy=False)
transport_incidence = fields.Boolean('Transport incidence')
t_incidence_picking = fields.Char('Trp. inc. picking')
warehouse_location = fields.Selection([('madrid1', 'Madrid - Vicálvaro'),
('italia', 'Italia - Arcore'),
('transit', 'In transit')], "Warehouse Location")
client_ref = fields.Char('Client Ref')
warehouse_date = fields.Date('Final Received Date')
deposit_id = fields.Many2many('stock.picking', string='Deposit')
check_states = ['substate_received', 'substate_process', 'substate_due_receive']
@api.multi
def write(self, vals):
if 'stage_id' in vals:
stage_ids = []
stage_repaired_id = self.env.ref('crm_claim.stage_claim2').id
stage_ids.append(stage_repaired_id)
stage_pending_shipping_id = self.env.ref('crm_claim_rma_custom.stage_claim6').id
stage_ids.append(stage_pending_shipping_id)
stage_received_id = self.env['crm.claim.stage'].search([('name', '=', 'Recibido')]).id
if vals['stage_id'] == stage_received_id and \
not (self.warehouse_location or vals.get('warehouse_location', False)):
raise exceptions.UserError(_('Please, select the warehouse location of the RMA'))
if vals['stage_id'] in stage_ids:
for line in self.claim_line_ids:
line_state = self.env['ir.model.data'].search([('model', '=', 'substate.substate'),
('module', '=', 'crm_claim_rma_custom'),
('res_id', '=', line.substate_id.id)])
if vals['stage_id'] == stage_repaired_id:
if line_state.name in self.check_states:
raise except_orm(_('Warning!'),
_("One or more products aren't review yet!"))
else:
if line_state.name != 'substate_pending_shipping':
raise except_orm(_('Warning!'),
_("One or more products aren't pending shipping yet!"))
if vals['stage_id'] == stage_received_id and self.partner_id.email3:
email_body = self.with_context(lang=self.partner_id.commercial_partner_id.lang)._("<p>Dear Customer,</p> " \
"<p>We inform you that we have received the products corresponding to %s.</p>" \
"<p>We will start the procedure as soon as possible.</p> " \
"<p>Sincerely,</p>" \
"<p>VISIOTECH</p>") % self.number
picking_template = self.env.ref('crm_claim_rma_custom.rma_received_template')
picking_template.with_context(lang=self.partner_id.commercial_partner_id.lang,
email_rma_body=email_body).send_mail(self.id)
return super(CrmClaimRma, self).write(vals)
def _(self, src):
return _(src)
@api.model
def create(self, vals):
if vals.get('name', False):
vals['name'] = vals['name'].split(' ')[0]
return super(CrmClaimRma, self).create(vals)
@api.model
def _get_sequence_number(self):
seq_obj = self.env['ir.sequence']
supplier_type = self.env.ref('crm_claim_type.crm_claim_type_supplier').id
if 'claim_type' in self.env.context and self.env.context['claim_type'] == supplier_type:
res = seq_obj.get('crm.claim.rma.supplier') or '/'
else:
res = seq_obj.get('crm.claim.rma') or '/'
return res
@api.multi
def calculate_invoices(self):
"""
Calculate invoices using data "Product Return of SAT"
"""
for claim_obj in self:
claim_inv_line_obj = self.env['claim.invoice.line']
for invoice_line in claim_obj.claim_inv_line_ids:
if not invoice_line.invoiced:
invoice_line.unlink()
invoce_product_ids = {}
for c_line in claim_obj.claim_line_ids:
if c_line.invoice_id.id not in invoce_product_ids:
for line in c_line.invoice_id.invoice_line_ids:
found = False
if line.invoice_id.id in invoce_product_ids:
for k,v in invoce_product_ids.items():
if line.product_id.id in v.keys() and line.invoice_id.id == k:
invoce_product_ids[line.invoice_id.id][line.product_id.id] += line.quantity
found = True
break
if not found:
invoce_product_ids[line.invoice_id.id][line.product_id.id] = line.quantity
else:
invoce_product_ids[line.invoice_id.id] = {line.product_id.id: line.quantity}
message = ""
for claim_line in claim_obj.claim_line_ids:
vals = {}
taxes_ids = []
if claim_line.invoice_id:
claim_inv_lines = claim_inv_line_obj.search([('claim_line_id', '=', claim_line.id)])
if claim_inv_lines:
continue
for inv_line in claim_line.invoice_id.invoice_line_ids:
if inv_line.product_id == claim_line.product_id:
if inv_line.invoice_line_tax_ids:
taxes_ids = \
[x.id for x in inv_line.invoice_line_tax_ids]
vals = {
'invoice_id': inv_line.invoice_id.id,
'claim_id': claim_line.claim_id.id,
'claim_number': claim_line.claim_id.number,
'claim_line_id': claim_line.id,
'product_id': inv_line.product_id.id,
'product_description': inv_line.product_id.name,
'discount': inv_line.discount,
'qty': claim_line.product_returned_quantity,
'price_unit': inv_line.price_unit,
'cost_unit': inv_line.product_id.standard_price,
'tax_ids': [(6, 0, taxes_ids)]
}
if invoce_product_ids[inv_line.invoice_id.id][inv_line.product_id.id] < inv_line.claim_invoice_line_qty + claim_line.product_returned_quantity:
units_available = invoce_product_ids[inv_line.invoice_id.id][inv_line.product_id.id] - inv_line.claim_invoice_line_qty
if units_available > 0:
message += _("There are not enough units of this product (%s) in this invoice (%s). Only %i unit(s) left available \n") % \
(inv_line.product_id.default_code, inv_line.invoice_id.number,int(units_available))
else:
message += _("All units of this product (%s) included in the indicated invoice (%s) have already been paid \n") % (
inv_line.product_id.default_code, inv_line.invoice_id.number)
break
if not vals:
raise exceptions.Warning(
_("There is at least one line of the claim with \
an incorrect invoice"))
if vals:
claim_inv_line_obj.create(vals)
if message:
raise exceptions.Warning(message)
@api.onchange('partner_id')
def onchange_partner_id(self):
result = super().onchange_partner_id()
if self.partner_id:
self.delivery_address_id = self.partner_id
self.team_id = self.partner_id.team_id # Get team_id from res.partner
self.country = self.partner_id.country_id # Get country_id from res.partner
if self.partner_id.user_id:
self.comercial = self.partner_id.user_id.id
if self.partner_id.rma_warn_msg:
self.description = self.partner_id.rma_warn_msg
return result
@api.onchange('name')
def onchange_name(self):
if self.name == 'return':
self.invoice_type = 'refund'
elif self.name == 'rma':
self.invoice_type = 'invoice'
@api.multi
def make_refund_invoice(self):
for claim_obj in self:
invoice = False
invoice_name = set()
for line in sorted(claim_obj.claim_inv_line_ids, key=lambda d: d.sequence):
if not line.invoiced:
if line.invoice_id.name:
invoice_name.add(line.invoice_id.name)
invoice = True
if not invoice:
raise exceptions.Warning(_("Any line to invoice"))
description = ' '.join(invoice_name)
# TODO-> Revisar: antes sale_refund
domain_journal = [('type', '=', 'sale')]
acc_journal_obj = self.env['account.journal']
acc_journal_ids = acc_journal_obj.search(domain_journal)
reference = claim_obj.client_ref or description
header_vals = {
'partner_id': claim_obj.partner_id.id,
'fiscal_position_id':
claim_obj.partner_id.property_account_position_id.id,
'date_invoice': datetime.now().strftime('%Y-%m-%d'),
'journal_id': acc_journal_ids[0].id,
'account_id':
claim_obj.partner_id.property_account_receivable_id.id,
'currency_id':
claim_obj.partner_id.property_product_pricelist.currency_id.id,
'company_id': claim_obj.company_id.id,
'user_id': self.env.user.id,
'team_id': claim_obj.partner_id.team_id.id,
'claim_id': claim_obj.id,
'type': 'out_refund',
'payment_term_id': False,
# Pago inmediato en rectificativas claim_obj.partner_id.property_payment_term_id.id,
'payment_mode_id':
claim_obj.partner_id.customer_payment_mode_id.id,
'mandate_id': claim_obj.partner_id.valid_mandate_id.id,
'name': reference,
'partner_shipping_id': claim_obj.delivery_address_id.id
}
if claim_obj.picking_ids:
header_vals['picking_ids'] = [(6, 0, [claim_obj.picking_ids[-1].id])]
inv_obj = self.env['account.invoice']
invoice_id = inv_obj.create(header_vals)
fp_obj = self.env['account.fiscal.position']
for line in sorted(claim_obj.claim_inv_line_ids, key=lambda d: d.sequence):
if line.invoiced:
continue
if line.product_id:
account_id = line.product_id.property_account_income_id.id
if not account_id:
account_id = \
line.product_id.categ_id. \
property_account_income_categ_id.id
else:
account_id = line.product_id. \
property_account_expense_id.id
if not account_id:
account_id = \
line.product_id.categ_id. \
property_account_expense_categ_id.id
else:
prop = self.env['ir.property'].get('property_account_income_categ_id', 'product.category')
account_id = prop and prop.id or False
account_id = fp_obj.map_account(account_id)
vals = {
'invoice_id': invoice_id.id,
'name': line.product_description,
'product_id': line.product_id.id,
'account_id': account_id,
'quantity': line.qty,
'claim_line_id': line.claim_line_id.id,
'price_unit': line.price_unit,
'cost_unit': line.cost_unit,
'uom_id': line.product_id.uom_id.id,
'discount': line.discount,
'account_analytic_id': False
}
if line.tax_ids:
taxes_ids = fp_obj.map_tax(line.tax_ids)
vals['invoice_line_tax_ids'] = [(6, 0, taxes_ids.ids)]
line_obj = self.env['account.invoice.line']
line_obj.create(vals)
line.invoiced = True
invoice_id.compute_taxes()
invoice_id.action_invoice_open()
data_pool = self.env['ir.model.data']
action_id = data_pool.xmlid_to_res_id('crm_claim_rma.act_crm_claim_rma_refunds_out')
if action_id:
action = self.env.ref('crm_claim_rma.act_crm_claim_rma_refunds_out').read()[0]
action['domain'] = "[('id','in', [" + str(invoice_id.id) + "])]"
return action
@api.multi
def resequence(self):
for claim in self:
seq = 1
for line in claim.claim_line_ids:
line.sequence = seq
seq += 1
@api.multi
def check_discounts(self):
discount_product_list = []
has_discount = False
for claim_obj in self:
for line in claim_obj.claim_inv_line_ids:
for i_line_id in line.invoice_id.invoice_line_ids:
if i_line_id.product_id.name == 'Discount line' and not line.invoice_id.number in discount_product_list:
has_discount = True
discount_product_list.append(line.invoice_id.number)
if has_discount:
return self.env['invoice.discount.wiz'].create({
'origin_reference': '%s,%s' % ('crm.claim', self.id),
'continue_method': 'make_refund_invoice',
'message': _("This orders have discounts. Do you want to proceed anyways?: %s") % ', '.join(discount_product_list)
}).action_show()
else:
self.make_refund_invoice()
class ClaimInvoiceLine(models.Model):
_name = 'claim.invoice.line'
_rec_name = 'product_description'
_order = 'sequence,id'
sequence = fields.Integer()
claim_id = fields.Many2one('crm.claim', 'Claim')
claim_number = fields.Char("Claim Number")
claim_line_id = fields.Many2one('claim.line', 'Claim lne')
product_id = fields.Many2one("product.product", "Product Code")
product_description = fields.Char("Product Description", required=True)
invoice_id = fields.Many2one("account.invoice", "Invoice")
price_unit = fields.Float("Price Unit")
cost_unit = fields.Float("Cost Unit")
price_subtotal = fields.Float("Price Subtotal", compute="_get_subtotal",
readonly=True)
tax_ids = fields.Many2many("account.tax", "claim_line_tax",
"claimline_id", "tax_id", string="Taxes")
discount = fields.Float("Discount")
qty = fields.Float("Quantity", default="1")
invoiced = fields.Boolean("Invoiced")
@api.multi
def _get_subtotal(self):
for claim_line in self:
claim_line.price_subtotal = claim_line.qty * claim_line.price_unit * ((100.0 - claim_line.discount) / 100.0)
@api.onchange("product_id", "invoice_id")
def onchange_product_id(self):
if self.claim_id.partner_id:
if self.product_id:
taxes_ids = []
if self.invoice_id:
# res['value'] = {'invoice_id': self.invoice_id.id}
any_line = False
for line in self.invoice_id.invoice_line_ids:
if not self.product_id == line.product_id:
any_line = False
else:
any_line = True
price = line.price_unit
taxes_ids = line.invoice_line_tax_ids
break
if not any_line:
raise exceptions.Warning(_('Selected product is not \
in the invoice'))
else:
pricelist_obj = \
self.claim_id.partner_id.property_product_pricelist
price = pricelist_obj.price_get(self.product_id.id, 1.0)
if price:
price = price[pricelist_obj.id]
self.product_description = self.product_id.name
self.qty = 1.0
self.price_unit = price
self.price_subtotal = price
self.discount = 0.0
if taxes_ids:
self.tax_ids = taxes_ids
else:
fpos = self.claim_id.partner_id.property_account_position_id
self.tax_ids = fpos.map_tax(self.product_id.product_tmpl_id.taxes_id)
else:
self.price_subtotal = self.discount and \
self.qty * self.price_unit - (self.discount *
self.price_unit / 100) or \
self.qty * self.price_unit
else:
raise exceptions.Warning(_('Partner not selected'))
@api.onchange("qty", "price_unit", "discount")
def onchange_values(self):
if self.product_id and self.invoice_id:
for line in self.invoice_id.invoice_line_ids:
if line.product_id == self.product_id:
if line.quantity < self.qty:
raise exceptions.Warning(_('Quantity cannot be bigger than the quantity specified on invoice'))
if line.quantity < line.with_context({'not_id': self._origin.id}).claim_invoice_line_qty + self.qty:
units_available = line.quantity - line.with_context({'not_id': self._origin.id}).claim_invoice_line_qty
if units_available > 0:
raise exceptions.Warning(_("There are not enough units of this product (%s) in this invoice (%s). Only %i unit(s) left available \n") %
(line.product_id.default_code, line.invoice_id.number, int(units_available)))
raise exceptions.Warning(
_("All units of this product (%s) included in the indicated invoice (%s) have already been paid \n") % (
line.product_id.default_code, line.invoice_id.number))
price_subtotal = self.qty * self.price_unit * ((100.0 - self.discount) / 100.0)
self.price_subtotal = price_subtotal
@api.multi
def unlink(self):
for line in self:
if line.invoiced:
raise exceptions.Warning(_("Cannot delete an invoiced line"))
return super(ClaimInvoiceLine, self).unlink()
class CrmClaimLine(models.Model):
_inherit = 'claim.line'
comercial = fields.Many2one("res.users", String="Comercial", related="claim_id.comercial")
date_received = fields.Date(related="claim_id.date_received")
name = fields.Char(required=False)
invoice_id = fields.Many2one("account.invoice", string="Invoice")
substate_id = fields. \
Many2one(default=lambda self: self.env.ref('crm_claim_rma_custom.substate_due_receive').id)
claim_name = fields.Selection(related='claim_id.name', readonly=True)
sequence = fields.Integer()
res = {}
@api.model
def create(self, vals):
sec_list = self.env['crm.claim'].browse(vals['claim_id']).claim_line_ids.mapped('sequence')
if sec_list:
vals['sequence'] = max(sec_list) + 1
else:
vals['sequence'] = 0
if 'substate_id' not in vals.keys():
vals['substate_id'] = self.env.ref(
'crm_claim_rma_custom.substate_due_receive').id
return super(CrmClaimLine, self).create(vals)
@api.multi
def write(self, vals):
if 'repair_id' in vals.keys():
vals['substate_id'] = self.env.ref(
'crm_claim_rma_custom.substate_repaired').id
if 'refund_line_id' in vals.keys():
vals['substate_id'] = self.env.ref(
'crm_claim_rma_custom.substate_refund').id
if 'equivalent_product_id' in vals.keys():
vals['substate_id'] = self.env.ref(
'crm_claim_rma_custom.substate_replaced').id
return super(CrmClaimLine, self).write(vals)
@api.multi
def action_split(self):
for line in self:
if line.product_returned_quantity > 1:
for x in range(1, int(line.product_returned_quantity)):
line.copy(default={'product_returned_quantity': 1.0})
line.product_returned_quantity = 1
return {'type': 'ir.actions.client', 'tag': 'reload'}
@api.multi
def create_repair(self):
self.ensure_one()
wzd = self.env['claim.make.repair'].create({'line_id': self.id})
res = wzd.make()
return res
@api.multi
def unlink(self):
claims = self.mapped('claim_id')
super().unlink()
if claims:
claims.resequence()
return True
|
from sorl.thumbnail.engines.base import EngineBase
from sorl.thumbnail.compat import BufferIO
try:
from PIL import Image, ImageFile, ImageDraw, ImageChops, ImageFilter
except ImportError:
import Image, ImageFile, ImageDraw, ImageChops
def round_corner(radius, fill):
"""Draw a round corner"""
corner = Image.new('L', (radius, radius), 0) # (0, 0, 0, 0))
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)
return corner
def round_rectangle(size, radius, fill):
"""Draw a rounded rectangle"""
width, height = size
rectangle = Image.new('L', size, 255) # fill
corner = round_corner(radius, 255) # fill
rectangle.paste(corner, (0, 0))
rectangle.paste(corner.rotate(90),
(0, height - radius)) # Rotate the corner and paste it
rectangle.paste(corner.rotate(180), (width - radius, height - radius))
rectangle.paste(corner.rotate(270), (width - radius, 0))
return rectangle
class GaussianBlur(ImageFilter.Filter):
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
def filter(self, image):
return image.gaussian_blur(self.radius)
class Engine(EngineBase):
def get_image(self, source):
buffer = BufferIO(source.read())
return Image.open(buffer)
def get_image_size(self, image):
return image.size
def get_image_info(self, image):
return image.info or {}
def is_valid_image(self, raw_data):
buffer = BufferIO(raw_data)
try:
trial_image = Image.open(buffer)
trial_image.verify()
except Exception:
return False
return True
def _cropbox(self, image, x, y, x2, y2):
return image.crop((x, y, x2, y2))
def _orientation(self, image):
try:
exif = image._getexif()
except (AttributeError, IOError, KeyError, IndexError):
exif = None
if exif:
orientation = exif.get(0x0112)
if orientation == 2:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
image = image.rotate(180)
elif orientation == 4:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
image = image.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
image = image.rotate(-90)
elif orientation == 7:
image = image.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
image = image.rotate(90)
return image
def _colorspace(self, image, colorspace):
if colorspace == 'RGB':
if image.mode == 'RGBA':
return image # RGBA is just RGB + Alpha
if image.mode == 'P' and 'transparency' in image.info:
return image.convert('RGBA')
return image.convert('RGB')
if colorspace == 'GRAY':
return image.convert('L')
return image
def _scale(self, image, width, height):
return image.resize((width, height), resample=Image.ANTIALIAS)
def _crop(self, image, width, height, x_offset, y_offset):
return image.crop((x_offset, y_offset,
width + x_offset, height + y_offset))
def _rounded(self, image, r):
i = round_rectangle(image.size, r, "notusedblack")
image.putalpha(i)
return image
def _blur(self, image, radius):
return image.filter(GaussianBlur(radius))
def _padding(self, image, geometry, options):
x_image, y_image = self.get_image_size(image)
left = int((geometry[0] - x_image) / 2)
top = int((geometry[1] - y_image) / 2)
color = options.get('padding_color')
im = Image.new(image.mode, geometry, color)
im.paste(image, (left, top))
return im
def _get_raw_data(self, image, format_, quality, image_info=None, progressive=False):
ImageFile.MAXBLOCK = image.size[0] * image.size[1]
bf = BufferIO()
params = {
'format': format_,
'quality': quality,
'optimize': 1,
}
params.update(image_info)
if format_ == 'JPEG' and progressive:
params['progressive'] = True
try:
image.save(bf, **params)
except IOError:
maxblock = ImageFile.MAXBLOCK
try:
# Temporary encrease ImageFile MAXBLOCK
ImageFile.MAXBLOCK = image.size[0] * image.size[1]
image.save(bf, **params)
except IOError:
params.pop('optimize')
image.save(bf, **params)
finally:
ImageFile.MAXBLOCK = maxblock
raw_data = bf.getvalue()
bf.close()
return raw_data
Fix MAXBLOCK hackery in PIL engine
- Make sure we never set the buffer size lower than default.
- Remove MAXBLOCK fiddlery (we already set MAXBLOCK a few lines above)
- 'image.save()' may also raise an OSError, so catch that too.
from sorl.thumbnail.engines.base import EngineBase
from sorl.thumbnail.compat import BufferIO
try:
from PIL import Image, ImageFile, ImageDraw, ImageChops, ImageFilter
except ImportError:
import Image, ImageFile, ImageDraw, ImageChops
def round_corner(radius, fill):
"""Draw a round corner"""
corner = Image.new('L', (radius, radius), 0) # (0, 0, 0, 0))
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)
return corner
def round_rectangle(size, radius, fill):
"""Draw a rounded rectangle"""
width, height = size
rectangle = Image.new('L', size, 255) # fill
corner = round_corner(radius, 255) # fill
rectangle.paste(corner, (0, 0))
rectangle.paste(corner.rotate(90),
(0, height - radius)) # Rotate the corner and paste it
rectangle.paste(corner.rotate(180), (width - radius, height - radius))
rectangle.paste(corner.rotate(270), (width - radius, 0))
return rectangle
class GaussianBlur(ImageFilter.Filter):
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
def filter(self, image):
return image.gaussian_blur(self.radius)
class Engine(EngineBase):
def get_image(self, source):
buffer = BufferIO(source.read())
return Image.open(buffer)
def get_image_size(self, image):
return image.size
def get_image_info(self, image):
return image.info or {}
def is_valid_image(self, raw_data):
buffer = BufferIO(raw_data)
try:
trial_image = Image.open(buffer)
trial_image.verify()
except Exception:
return False
return True
def _cropbox(self, image, x, y, x2, y2):
return image.crop((x, y, x2, y2))
def _orientation(self, image):
try:
exif = image._getexif()
except (AttributeError, IOError, KeyError, IndexError):
exif = None
if exif:
orientation = exif.get(0x0112)
if orientation == 2:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
image = image.rotate(180)
elif orientation == 4:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
image = image.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
image = image.rotate(-90)
elif orientation == 7:
image = image.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
image = image.rotate(90)
return image
def _colorspace(self, image, colorspace):
if colorspace == 'RGB':
if image.mode == 'RGBA':
return image # RGBA is just RGB + Alpha
if image.mode == 'P' and 'transparency' in image.info:
return image.convert('RGBA')
return image.convert('RGB')
if colorspace == 'GRAY':
return image.convert('L')
return image
def _scale(self, image, width, height):
return image.resize((width, height), resample=Image.ANTIALIAS)
def _crop(self, image, width, height, x_offset, y_offset):
return image.crop((x_offset, y_offset,
width + x_offset, height + y_offset))
def _rounded(self, image, r):
i = round_rectangle(image.size, r, "notusedblack")
image.putalpha(i)
return image
def _blur(self, image, radius):
return image.filter(GaussianBlur(radius))
def _padding(self, image, geometry, options):
x_image, y_image = self.get_image_size(image)
left = int((geometry[0] - x_image) / 2)
top = int((geometry[1] - y_image) / 2)
color = options.get('padding_color')
im = Image.new(image.mode, geometry, color)
im.paste(image, (left, top))
return im
def _get_raw_data(self, image, format_, quality, image_info=None, progressive=False):
# Increase (but never decrease) PIL buffer size
ImageFile.MAXBLOCK = max(ImageFile.MAXBLOCK, image.size[0] * image.size[1])
bf = BufferIO()
params = {
'format': format_,
'quality': quality,
'optimize': 1,
}
params.update(image_info)
if format_ == 'JPEG' and progressive:
params['progressive'] = True
try:
image.save(bf, **params)
except (IOError, OSError):
# Try without optimization.
params.pop('optimize')
image.save(bf, **params)
raw_data = bf.getvalue()
bf.close()
return raw_data
|
from os import mkdir
from os import listdir
from os import link
from os import unlink
from os import symlink
from os import readlink
from os import rmdir
from os import stat
from os import chmod
from os import rename
from os import lstat
from errno import ENOENT as FileDoesNotExist
from errno import EEXIST as FileExists
from errno import EISDIR as DirectoryExists
from errno import EINVAL as InvalidArgument
from errno import EPERM as NotPermitted
from errno import EISDIR as IsADirectory
from hashlib import md5
from os.path import exists
from os.path import isabs
from os.path import isdir
from os.path import isfile, islink, sep
from os.path import normpath
from os.path import split
from os.path import stat as statc
from shutil import copyfileobj
from func_prototypes import typed, returned
from glob import fnmatch
from fnmatch import fnmatchcase
from functools import total_ordering, partial
from farmfs.util import ingest, safetype, uncurry, first, ffilter
from future.utils import python_2_unicode_compatible
from safeoutput import open as safeopen
from filetype import guess, Type
import filetype
try:
from functools import lru_cache
cached_normpath = lru_cache(maxsize=2**19)(normpath)
cached_split = lru_cache(maxsize=2**19)(split)
except ImportError:
# On python2, functools doesn't provide lru_cache
cached_normpath = normpath
cached_split = split
class XSym(Type):
'''Implements OSX XSym link file type detector'''
def __init__(self):
super(XSym, self).__init__(
mime='inode/symlink',
extension='xsym')
def match(self, buf):
"""Detects the MS-Dos symbolic link format from OSX.
Format of XSym files taken from section 11.7.3 of Mac OSX Internals"""
return (len(buf) >= 10 and
buf[0] == 0x58 and # X
buf[1] == 0x53 and # S
buf[2] == 0x79 and # y
buf[3] == 0x6d and # m
buf[4] == 0xa and # \n
buf[5] >= 0x30 and buf[5] <= 0x39 and # 0-9
buf[6] >= 0x30 and buf[6] <= 0x39 and # 0-9
buf[7] >= 0x30 and buf[7] <= 0x39 and # 0-9
buf[8] >= 0x30 and buf[8] <= 0x39 and # 0-9
buf[9] == 0xa # \n
)
# XXX Dirty, we are touching the set of types in filetype package.
filetype.types.append(XSym())
_BLOCKSIZE = 65536
LINK=u'link'
FILE=u'file'
DIR=u'dir'
TYPES=[LINK, FILE, DIR]
#TODO should take 1 arg, return fn.
def skip_ignored(ignored, path, ftype):
for i in ignored:
if fnmatchcase(path._path, i):
return True
return False
def ftype_selector(keep_types):
keep = lambda p, ft: ft in keep_types # Take p and ft since we may want to use it in entries.
return ffilter(uncurry(keep))
@total_ordering
@python_2_unicode_compatible
class Path:
def __init__(self, path, frame=None):
if path is None:
raise ValueError("path must be defined")
elif isinstance(path, Path):
assert frame is None
self._path = path._path
self._parent = path._parent
else:
path = ingest(path)
if frame is None:
assert isabs(path), "Frame is required when building relative paths: %s" % path
self._path = cached_normpath(path)
else:
assert isinstance(frame, Path)
assert not isabs(path), "path %s is required to be relative when a frame %s is provided" % (path, frame)
self._path = frame.join(path)._path
self._parent = first(cached_split(self._path))
assert isinstance(self._path, safetype)
assert isinstance(self._parent, safetype)
def __str__(self):
return self._path
def __repr__(self):
return str(self)
def mkdir(self):
try:
mkdir(self._path)
except OSError as e:
if e.errno == FileExists:
pass
elif e.errno == DirectoryExists:
pass
else:
raise e
# Returns the parent of self. If self is root ('/'), parent returns None.
# You much check the output of parent before using the value.
# Notcie that parent of root in the shell is '/', so this is a semantic difference
# between us and POSIX.
def parent(self):
if self._path == sep:
return None
else:
return Path(self._parent) #TODO cache this?
def parents(self):
# TODO turn into comprehension.
paths = [self]
path = self
parent = path.parent()
while parent is not None:
paths.append(parent)
path = parent
parent = path.parent()
return reversed(paths)
def relative_to(self, frame):
assert isinstance(frame, Path)
# Get the segment sequences from root to self and frame.
self_family = iter(self.parents())
frame_family = iter(frame.parents())
# Find the common ancesstor of self and frame.
s = None
f = None
common = None
while True:
s = next(self_family, None)
f = next(frame_family, None)
if s is None and f is None:
if common is None:
# common should have at least advanced to root!
raise ValueError("Failed to find common decendent of %s and %s" % (self, frame))
else:
# self and frame exhaused at the same time. Must be the same path.
return SELF_STR
elif s is None:
# frame is a decendent of self. Self is an ancesstor of frame.
# We can return remaining segments of frame.
# Self is "/a" frame = "/a/b/c" common is "/a" result is "../.."
backtracks = len(list(frame_family)) + 1
backtrack = [PARENT_STR] * backtracks
backtrack = sep.join([PARENT_STR]*backtracks)
# raise NotImplementedError("self %s frame %s common %s backtracks %s backtrack %s" % (
# self, frame, common, backtracks, backtrack))
return backtrack
elif f is None:
# self is a decendent of frame. frame is an ancesstor of self.
# We can return remaining segments of self.
if common == ROOT:
return self._path[len(common._path):]
else:
return self._path[len(common._path)+1:]
elif s == f:
# self and frame decendent are the same, so advance.
common = s
pass
else:
# we need to backtrack from frame to common.
backtracks = len(list(frame_family)) + 1
backtrack = [PARENT_STR] * backtracks
backtrack = sep.join([PARENT_STR]*backtracks)
if common == ROOT:
forward = self._path[len(common._path):]
else:
forward = self._path[len(common._path)+1:]
# print("backtracks", backtracks, "backtrack", backtrack, "forward", forward, "common", common)
return backtrack + sep + forward
def exists(self):
"""Returns true if a path exists. This includes symlinks even if they are broken."""
return self.islink() or exists(self._path)
def readlink(self, frame=None):
"""
Returns the link destination if the Path is a symlink.
If the path doesn't exist, raises FileNotFoundError
If the path is not a symlink raises OSError Errno InvalidArgument.
"""
return Path(readlink(self._path), frame)
def link(self, dst):
"""
Creates a hard link to dst.
dst
DNE Dir F SLF SLD SLB
s DNR R R N N R R
e Dir R R R R R R
l F R R R R ? ?
f SL R R R R ? ?
R means raises.
N means new hardlink created.
"""
assert isinstance(dst, Path)
link(dst._path, self._path)
def symlink(self, dst):
assert isinstance(dst, Path)
symlink(dst._path, self._path)
#TODO this behavior is the opposite of what one would expect.
def copy(self, dst):
assert isinstance(dst, Path)
with open(self._path, 'rb') as src_fd:
with safeopen(dst._path, 'wb') as dst_fd:
copyfileobj(src_fd, dst_fd)
def unlink(self, clean=None):
try:
unlink(self._path)
except OSError as e:
if e.errno == FileDoesNotExist:
pass
else:
raise e
if clean is not None:
parent = self.parent()
parent._cleanup(clean)
def rmdir(self, clean=None):
rmdir(self._path)
if clean is not None:
parent = self.parent()
parent._cleanup(clean)
"""Called on the parent of file or directory after a removal
(if cleanup as asked for). Recuses cleanup until it reaches terminus.
"""
def _cleanup(self, terminus):
assert isinstance(terminus, Path)
assert terminus in self.parents()
if self == terminus:
return
if len(list(self.dir_gen())) == 0:
self.rmdir(terminus)
def islink(self):
return islink(self._path)
def isdir(self):
return isdir(self._path)
def isfile(self):
return isfile(self._path)
def checksum(self):
"""
If self path is a file or a symlink to a file, compute a checksum returned as a string.
If self points to a missing file or a broken symlink, raises FileDoesNotExist.
If self points to a directory or a symlink facing directory, raises IsADirectory.
"""
hasher = md5()
with self.open('rb') as fd:
buf = fd.read(_BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(_BLOCKSIZE)
digest = safetype(hasher.hexdigest())
return digest
def __cmp__(self, other):
return (self > other) - (self < other)
def __eq__(self, other):
assert isinstance(other, Path)
return self._path == other._path
def __ne__(self, other):
assert isinstance(other, Path)
return not (self == other)
def __lt__(self, other):
assert isinstance(other, Path)
return self._path.split(sep) < other._path.split(sep)
def __hash__(self):
return hash(self._path)
def join(self, child):
child = safetype(child)
try:
output = Path( self._path + sep + child)
except UnicodeDecodeError as e:
raise ValueError(str(e) + "\nself path: "+ self._path + "\nchild: ", child)
return output
def dir_gen(self):
"""Generates the set of Paths under this directory"""
assert self.isdir(), "%s is not a directory" % self._path
assert isinstance(self._path, safetype)
names = listdir(self._path)
for name in names:
child = self.join(name)
yield child
def dir_list(self):
names = sorted(listdir(self._path))
paths = [Path(n, self) for n in names]
return paths
def ftype(self):
st = lstat(self._path)
if statc.S_ISLNK(st.st_mode):
return LINK
elif statc.S_ISREG(st.st_mode):
return FILE
elif statc.S_ISDIR(st.st_mode):
return DIR
else:
raise ValueError("%s is not in %s" % (self, TYPES))
def entries(self, skip=None):
t = self.ftype()
if skip and skip(self, t):
return
yield (self, t)
if t == DIR:
children = self.dir_gen()
for dir_entry in sorted(children):
for x in dir_entry.entries(skip):
yield x
def open(self, mode):
return open(self._path, mode)
def stat(self):
return stat(self._path)
def chmod(self, mode):
return chmod(self._path, mode)
def rename(self, dst):
return rename(self._path, dst._path)
def filetype(self):
# XXX Working around bug in filetype guess.
# Duck typing checks don't work on py27, because of str bytes confusion.
# So we read the file outselves and put it in a bytearray.
# Remove this when we drop support for py27.
with self.open("rb") as fd:
type = guess(bytearray(fd.read(256)))
if type:
return type.mime
else:
return None
@returned(Path)
def userPath2Path(arg, frame):
"""
Building paths using conventional POSIX systems will discard CWD if the
path is absolute. FarmFS makes passing of CWD explicit so that path APIs
are pure functions. Additionally FarmFS path construction doesn't allow
for absolute paths to be mixed with frames. This is useful for
spotting bugs and making sure that pathing has strong guarantees. However
this comes at the expense of user expectation. When dealing with user
input, there is an expecation that POSIX semantics are at play.
userPath2Path checks to see if the provided path is absolute, and if not,
adds the CWD frame.
"""
arg = ingest(arg)
if isabs(arg):
return Path(arg)
else:
return Path(arg, frame)
#TODO this function is dangerous. Would be better if we did sorting in the snaps to ensure order of ops explicitly.
@typed(Path)
def ensure_absent(path):
if path.exists():
if path.isdir():
for child in path.dir_gen():
ensure_absent(child)
path.rmdir()
else:
path.unlink()
else:
pass # No work to do.
@typed(Path)
def ensure_dir(path):
if path.exists():
if path.isdir():
pass # There is nothing to do.
else:
path.unlink()
path.mkdir()
else:
assert path != ROOT, "Path is root, which must be a directory"
parent = path.parent()
assert parent != path, "Path and parent were the same!"
ensure_dir(parent)
path.mkdir()
@typed(Path, Path)
def ensure_link(path, orig):
assert orig.exists()
parent = path.parent()
assert parent != path, "Path and parent were the same!"
ensure_dir(parent)
ensure_absent(path)
path.link(orig)
write_mask = statc.S_IWUSR | statc.S_IWGRP | statc.S_IWOTH
read_only_mask = ~write_mask
@typed(Path)
def ensure_readonly(path):
mode = path.stat().st_mode
read_only = mode & read_only_mask
path.chmod(read_only)
#TODO this is used only for fsck readonly check.
@typed(Path)
def is_readonly(path):
mode = path.stat().st_mode
writable = mode & write_mask
return bool(writable)
@typed(Path, Path)
def ensure_copy(dst, src):
assert src.exists()
parent = dst.parent()
assert parent != dst, "dst and parent were the same!"
ensure_dir(parent)
ensure_absent(dst)
src.copy(dst)
@typed(Path, Path)
def ensure_symlink(path, target):
ensure_symlink_unsafe(path, target._path)
@typed(Path, safetype)
def ensure_symlink_unsafe(path, orig):
parent = path.parent()
assert parent != path, "Path and parent were the same!"
ensure_dir(parent)
ensure_absent(path)
assert not path.exists()
symlink(orig, path._path)
assert path.islink()
"""
Creates/Deletes directories. Does whatever is required inorder
to make and open a file with the mode previded.
Mode settings to consider are:
O_CREAT create file if it does not exist
O_TRUNC truncate size to 0
O_EXCL error if O_CREAT and the file exists
"""
def ensure_file(path, mode):
assert isinstance(path, Path)
parent = path.parent()
assert parent != path, "Path and parent were the same!"
ensure_dir(parent)
fd = path.open(mode)
return fd
ROOT = Path(sep)
PARENT_STR = safetype("..")
SELF_STR = safetype(".")
def walk(*roots, skip=None):
dirs = [iter(sorted(roots))]
while len(dirs) > 0:
curDir = dirs[-1]
curPath = next(curDir, None)
if curPath is None:
dirs.pop()
else:
type = curPath.ftype()
if skip and skip(curPath, type):
continue
yield (curPath, type)
if type is DIR:
children = curPath.dir_list()
dirs.append(iter(children))
Add fast Path constuctor.
Useful for walking directories because we know that the enumerated
directory is the parent, and has already been normalized. The name is
known to be a single entry so frame + sep + name is known to be
normalized.
from os import mkdir
from os import listdir
from os import link
from os import unlink
from os import symlink
from os import readlink
from os import rmdir
from os import stat
from os import chmod
from os import rename
from os import lstat
from errno import ENOENT as FileDoesNotExist
from errno import EEXIST as FileExists
from errno import EISDIR as DirectoryExists
from errno import EINVAL as InvalidArgument
from errno import EPERM as NotPermitted
from errno import EISDIR as IsADirectory
from hashlib import md5
from os.path import exists
from os.path import isabs
from os.path import isdir
from os.path import isfile, islink, sep
from os.path import normpath
from os.path import split
from os.path import stat as statc
from shutil import copyfileobj
from func_prototypes import typed, returned
from glob import fnmatch
from fnmatch import fnmatchcase
from functools import total_ordering, partial
from farmfs.util import ingest, safetype, uncurry, first, ffilter
from future.utils import python_2_unicode_compatible
from safeoutput import open as safeopen
from filetype import guess, Type
import filetype
try:
from functools import lru_cache
cached_normpath = lru_cache(maxsize=2**19)(normpath)
cached_split = lru_cache(maxsize=2**19)(split)
except ImportError:
# On python2, functools doesn't provide lru_cache
cached_normpath = normpath
cached_split = split
class XSym(Type):
'''Implements OSX XSym link file type detector'''
def __init__(self):
super(XSym, self).__init__(
mime='inode/symlink',
extension='xsym')
def match(self, buf):
"""Detects the MS-Dos symbolic link format from OSX.
Format of XSym files taken from section 11.7.3 of Mac OSX Internals"""
return (len(buf) >= 10 and
buf[0] == 0x58 and # X
buf[1] == 0x53 and # S
buf[2] == 0x79 and # y
buf[3] == 0x6d and # m
buf[4] == 0xa and # \n
buf[5] >= 0x30 and buf[5] <= 0x39 and # 0-9
buf[6] >= 0x30 and buf[6] <= 0x39 and # 0-9
buf[7] >= 0x30 and buf[7] <= 0x39 and # 0-9
buf[8] >= 0x30 and buf[8] <= 0x39 and # 0-9
buf[9] == 0xa # \n
)
# XXX Dirty, we are touching the set of types in filetype package.
filetype.types.append(XSym())
_BLOCKSIZE = 65536
LINK=u'link'
FILE=u'file'
DIR=u'dir'
TYPES=[LINK, FILE, DIR]
#TODO should take 1 arg, return fn.
def skip_ignored(ignored, path, ftype):
for i in ignored:
if fnmatchcase(path._path, i):
return True
return False
def ftype_selector(keep_types):
keep = lambda p, ft: ft in keep_types # Take p and ft since we may want to use it in entries.
return ffilter(uncurry(keep))
@total_ordering
@python_2_unicode_compatible
class Path:
def __init__(self, path, frame=None, fast=False):
# output = Path( self._path + sep + child)
if fast:
# Fast path is generated by walk. frame is already a Path and path is a single element from listdir.
self._path = frame._path + sep + path
self._parent = frame._path
elif isinstance(path, Path):
# Copy constructor from another Path.
assert frame is None
self._path = path._path
self._parent = path._parent
else:
if path is None:
raise ValueError("path must be defined")
path = ingest(path)
if frame is None:
assert isabs(path), "Frame is required when building relative paths: %s" % path
self._path = cached_normpath(path)
else:
assert isinstance(frame, Path)
assert not isabs(path), "path %s is required to be relative when a frame %s is provided" % (path, frame)
self._path = cached_normpath(frame._path + sep + path)
self._parent = first(cached_split(self._path))
assert isinstance(self._path, safetype)
assert isinstance(self._parent, safetype), type(self._parent)
def __str__(self):
return self._path
def __repr__(self):
return str(self)
def mkdir(self):
try:
mkdir(self._path)
except OSError as e:
if e.errno == FileExists:
pass
elif e.errno == DirectoryExists:
pass
else:
raise e
# Returns the parent of self. If self is root ('/'), parent returns None.
# You much check the output of parent before using the value.
# Notcie that parent of root in the shell is '/', so this is a semantic difference
# between us and POSIX.
def parent(self):
if self._path == sep:
return None
else:
return Path(self._parent) #TODO cache this?
def parents(self):
# TODO turn into comprehension.
paths = [self]
path = self
parent = path.parent()
while parent is not None:
paths.append(parent)
path = parent
parent = path.parent()
return reversed(paths)
def relative_to(self, frame):
assert isinstance(frame, Path)
# Get the segment sequences from root to self and frame.
self_family = iter(self.parents())
frame_family = iter(frame.parents())
# Find the common ancesstor of self and frame.
s = None
f = None
common = None
while True:
s = next(self_family, None)
f = next(frame_family, None)
if s is None and f is None:
if common is None:
# common should have at least advanced to root!
raise ValueError("Failed to find common decendent of %s and %s" % (self, frame))
else:
# self and frame exhaused at the same time. Must be the same path.
return SELF_STR
elif s is None:
# frame is a decendent of self. Self is an ancesstor of frame.
# We can return remaining segments of frame.
# Self is "/a" frame = "/a/b/c" common is "/a" result is "../.."
backtracks = len(list(frame_family)) + 1
backtrack = [PARENT_STR] * backtracks
backtrack = sep.join([PARENT_STR]*backtracks)
# raise NotImplementedError("self %s frame %s common %s backtracks %s backtrack %s" % (
# self, frame, common, backtracks, backtrack))
return backtrack
elif f is None:
# self is a decendent of frame. frame is an ancesstor of self.
# We can return remaining segments of self.
if common == ROOT:
return self._path[len(common._path):]
else:
return self._path[len(common._path)+1:]
elif s == f:
# self and frame decendent are the same, so advance.
common = s
pass
else:
# we need to backtrack from frame to common.
backtracks = len(list(frame_family)) + 1
backtrack = [PARENT_STR] * backtracks
backtrack = sep.join([PARENT_STR]*backtracks)
if common == ROOT:
forward = self._path[len(common._path):]
else:
forward = self._path[len(common._path)+1:]
# print("backtracks", backtracks, "backtrack", backtrack, "forward", forward, "common", common)
return backtrack + sep + forward
def exists(self):
"""Returns true if a path exists. This includes symlinks even if they are broken."""
return self.islink() or exists(self._path)
def readlink(self, frame=None):
"""
Returns the link destination if the Path is a symlink.
If the path doesn't exist, raises FileNotFoundError
If the path is not a symlink raises OSError Errno InvalidArgument.
"""
return Path(readlink(self._path), frame)
def link(self, dst):
"""
Creates a hard link to dst.
dst
DNE Dir F SLF SLD SLB
s DNR R R N N R R
e Dir R R R R R R
l F R R R R ? ?
f SL R R R R ? ?
R means raises.
N means new hardlink created.
"""
assert isinstance(dst, Path)
link(dst._path, self._path)
def symlink(self, dst):
assert isinstance(dst, Path)
symlink(dst._path, self._path)
#TODO this behavior is the opposite of what one would expect.
def copy(self, dst):
assert isinstance(dst, Path)
with open(self._path, 'rb') as src_fd:
with safeopen(dst._path, 'wb') as dst_fd:
copyfileobj(src_fd, dst_fd)
def unlink(self, clean=None):
try:
unlink(self._path)
except OSError as e:
if e.errno == FileDoesNotExist:
pass
else:
raise e
if clean is not None:
parent = self.parent()
parent._cleanup(clean)
def rmdir(self, clean=None):
rmdir(self._path)
if clean is not None:
parent = self.parent()
parent._cleanup(clean)
"""Called on the parent of file or directory after a removal
(if cleanup as asked for). Recuses cleanup until it reaches terminus.
"""
def _cleanup(self, terminus):
assert isinstance(terminus, Path)
assert terminus in self.parents()
if self == terminus:
return
if len(list(self.dir_gen())) == 0:
self.rmdir(terminus)
def islink(self):
return islink(self._path)
def isdir(self):
return isdir(self._path)
def isfile(self):
return isfile(self._path)
def checksum(self):
"""
If self path is a file or a symlink to a file, compute a checksum returned as a string.
If self points to a missing file or a broken symlink, raises FileDoesNotExist.
If self points to a directory or a symlink facing directory, raises IsADirectory.
"""
hasher = md5()
with self.open('rb') as fd:
buf = fd.read(_BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(_BLOCKSIZE)
digest = safetype(hasher.hexdigest())
return digest
def __cmp__(self, other):
return (self > other) - (self < other)
def __eq__(self, other):
assert isinstance(other, Path)
return self._path == other._path
def __ne__(self, other):
assert isinstance(other, Path)
return not (self == other)
def __lt__(self, other):
assert isinstance(other, Path)
return self._path.split(sep) < other._path.split(sep)
def __hash__(self):
return hash(self._path)
def join(self, child):
child = safetype(child)
try:
output = Path( self._path + sep + child)
except UnicodeDecodeError as e:
raise ValueError(str(e) + "\nself path: "+ self._path + "\nchild: ", child)
return output
def dir_gen(self):
"""Generates the set of Paths under this directory"""
assert self.isdir(), "%s is not a directory" % self._path
assert isinstance(self._path, safetype)
names = listdir(self._path)
for name in names:
child = self.join(name)
yield child
def dir_list(self):
names = sorted(listdir(self._path))
paths = [Path(n, self, fast=True) for n in names]
return paths
def ftype(self):
st = lstat(self._path)
if statc.S_ISLNK(st.st_mode):
return LINK
elif statc.S_ISREG(st.st_mode):
return FILE
elif statc.S_ISDIR(st.st_mode):
return DIR
else:
raise ValueError("%s is not in %s" % (self, TYPES))
def entries(self, skip=None):
t = self.ftype()
if skip and skip(self, t):
return
yield (self, t)
if t == DIR:
children = self.dir_gen()
for dir_entry in sorted(children):
for x in dir_entry.entries(skip):
yield x
def open(self, mode):
return open(self._path, mode)
def stat(self):
return stat(self._path)
def chmod(self, mode):
return chmod(self._path, mode)
def rename(self, dst):
return rename(self._path, dst._path)
def filetype(self):
# XXX Working around bug in filetype guess.
# Duck typing checks don't work on py27, because of str bytes confusion.
# So we read the file outselves and put it in a bytearray.
# Remove this when we drop support for py27.
with self.open("rb") as fd:
type = guess(bytearray(fd.read(256)))
if type:
return type.mime
else:
return None
@returned(Path)
def userPath2Path(arg, frame):
"""
Building paths using conventional POSIX systems will discard CWD if the
path is absolute. FarmFS makes passing of CWD explicit so that path APIs
are pure functions. Additionally FarmFS path construction doesn't allow
for absolute paths to be mixed with frames. This is useful for
spotting bugs and making sure that pathing has strong guarantees. However
this comes at the expense of user expectation. When dealing with user
input, there is an expecation that POSIX semantics are at play.
userPath2Path checks to see if the provided path is absolute, and if not,
adds the CWD frame.
"""
arg = ingest(arg)
if isabs(arg):
return Path(arg)
else:
return Path(arg, frame)
#TODO this function is dangerous. Would be better if we did sorting in the snaps to ensure order of ops explicitly.
@typed(Path)
def ensure_absent(path):
if path.exists():
if path.isdir():
for child in path.dir_gen():
ensure_absent(child)
path.rmdir()
else:
path.unlink()
else:
pass # No work to do.
@typed(Path)
def ensure_dir(path):
if path.exists():
if path.isdir():
pass # There is nothing to do.
else:
path.unlink()
path.mkdir()
else:
assert path != ROOT, "Path is root, which must be a directory"
parent = path.parent()
assert parent != path, "Path and parent were the same!"
ensure_dir(parent)
path.mkdir()
@typed(Path, Path)
def ensure_link(path, orig):
assert orig.exists()
parent = path.parent()
assert parent != path, "Path and parent were the same!"
ensure_dir(parent)
ensure_absent(path)
path.link(orig)
write_mask = statc.S_IWUSR | statc.S_IWGRP | statc.S_IWOTH
read_only_mask = ~write_mask
@typed(Path)
def ensure_readonly(path):
mode = path.stat().st_mode
read_only = mode & read_only_mask
path.chmod(read_only)
#TODO this is used only for fsck readonly check.
@typed(Path)
def is_readonly(path):
mode = path.stat().st_mode
writable = mode & write_mask
return bool(writable)
@typed(Path, Path)
def ensure_copy(dst, src):
assert src.exists()
parent = dst.parent()
assert parent != dst, "dst and parent were the same!"
ensure_dir(parent)
ensure_absent(dst)
src.copy(dst)
@typed(Path, Path)
def ensure_symlink(path, target):
ensure_symlink_unsafe(path, target._path)
@typed(Path, safetype)
def ensure_symlink_unsafe(path, orig):
parent = path.parent()
assert parent != path, "Path and parent were the same!"
ensure_dir(parent)
ensure_absent(path)
assert not path.exists()
symlink(orig, path._path)
assert path.islink()
"""
Creates/Deletes directories. Does whatever is required inorder
to make and open a file with the mode previded.
Mode settings to consider are:
O_CREAT create file if it does not exist
O_TRUNC truncate size to 0
O_EXCL error if O_CREAT and the file exists
"""
def ensure_file(path, mode):
assert isinstance(path, Path)
parent = path.parent()
assert parent != path, "Path and parent were the same!"
ensure_dir(parent)
fd = path.open(mode)
return fd
ROOT = Path(sep)
PARENT_STR = safetype("..")
SELF_STR = safetype(".")
def walk(*roots, skip=None):
dirs = [iter(sorted(roots))]
while len(dirs) > 0:
curDir = dirs[-1]
curPath = next(curDir, None)
if curPath is None:
dirs.pop()
else:
type = curPath.ftype()
if skip and skip(curPath, type):
continue
yield (curPath, type)
if type is DIR:
children = curPath.dir_list()
dirs.append(iter(children))
|
#!/bin/sh
# -*- python -*-
################################################################################
# This file is python bilingual: The next line starts a comment in Python,
# and is a no-op in shell
""":"
# Find a suitable python interpreter (adapt for your specific needs)
for cmd in python3 python python2; do
command -v > /dev/null $cmd && exec $cmd $0 "$@"
done
echo "Error: Could not find a valid python interpreter --> exiting!" >&2
exit 2
":"""
################################################################################
# Git Version: @git@
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2014 University of Texas at Austin
# Copyright (C) 2013-2014 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
from __future__ import print_function
from fnmatch import fnmatch
from collections import OrderedDict
import os, sys, re, argparse
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--lib64", dest='lib64', action="store", help="LIB64 install directory")
parser.add_argument("--base", dest='base', action="store", help="base library")
parser.add_argument("--real", dest='real', action="store", help="real library")
args = parser.parse_args()
return args
def files_in_tree(path,pattern):
path = os.path.realpath(path)
fileA = []
if (not os.path.isdir(path)):
return fileA
for root, dirs, files in os.walk(path):
for name in files:
fn = os.path.join(root, name)
if (fnmatch(fn,pattern)):
fileA.append(fn)
return fileA
def main():
args = CmdLineOptions().execute()
lib64_dir = args.lib64
baseLib = args.base
realLib = args.real
baseBn = os.path.basename(baseLib)
dirNm = os.path.dirname(baseLib)
pattern = os.path.join(dirNm,baseBn)+"*"
fileA = files_in_tree(dirNm, pattern)
fileA.append(realLib)
fileT = {}
for fn in fileA:
fileT[fn] = True
for fn in fileT:
if (not fileT[fn]):
continue
print(fn+":")
if (os.path.islink(fn)):
newFn = os.readlink(fn)
if (newFn.find('/') == -1):
newFn = os.path.join(dirNm,newFn)
if (os.path.isfile(newFn)):
if (fileT[newFn]):
cmd = "cp "+newFn+" "+lib64_dir
print (" ",cmd)
os.system(cmd)
cmd = "ln -sf "+os.path.basename(newFn)+" "+ os.path.join(lib64_dir,os.path.basename(fn))
print (" ",cmd)
os.system(cmd)
else:
print ("Cannot deal w/link: ",newFn)
os.exit(-1)
else:
if (fileT[fn]):
cmd = "cp "+fn+" "+lib64_dir
print (" ",cmd)
os.system(cmd)
if ( __name__ == '__main__'): main()
remove all if test
#!/bin/sh
# -*- python -*-
################################################################################
# This file is python bilingual: The next line starts a comment in Python,
# and is a no-op in shell
""":"
# Find a suitable python interpreter (adapt for your specific needs)
for cmd in python3 python python2; do
command -v > /dev/null $cmd && exec $cmd $0 "$@"
done
echo "Error: Could not find a valid python interpreter --> exiting!" >&2
exit 2
":"""
################################################################################
# Git Version: @git@
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2014 University of Texas at Austin
# Copyright (C) 2013-2014 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
from __future__ import print_function
from fnmatch import fnmatch
from collections import OrderedDict
import os, sys, re, argparse
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--lib64", dest='lib64', action="store", help="LIB64 install directory")
parser.add_argument("--base", dest='base', action="store", help="base library")
parser.add_argument("--real", dest='real', action="store", help="real library")
args = parser.parse_args()
return args
def files_in_tree(path,pattern):
path = os.path.realpath(path)
fileA = []
if (not os.path.isdir(path)):
return fileA
for root, dirs, files in os.walk(path):
for name in files:
fn = os.path.join(root, name)
if (fnmatch(fn,pattern)):
fileA.append(fn)
return fileA
def main():
args = CmdLineOptions().execute()
lib64_dir = args.lib64
baseLib = args.base
realLib = args.real
baseBn = os.path.basename(baseLib)
dirNm = os.path.dirname(baseLib)
pattern = os.path.join(dirNm,baseBn)+"*"
fileA = files_in_tree(dirNm, pattern)
fileA.append(realLib)
fileT = {}
for fn in fileA:
fileT[fn] = True
for fn in fileT:
print(fn+":")
if (os.path.islink(fn)):
newFn = os.readlink(fn)
if (newFn.find('/') == -1):
newFn = os.path.join(dirNm,newFn)
if (os.path.isfile(newFn)):
cmd = "cp "+newFn+" "+lib64_dir
print (" ",cmd)
os.system(cmd)
cmd = "ln -sf "+os.path.basename(newFn)+" "+ os.path.join(lib64_dir,os.path.basename(fn))
print (" ",cmd)
os.system(cmd)
else:
print ("Cannot deal w/link: ",newFn)
os.exit(-1)
else:
cmd = "cp "+fn+" "+lib64_dir
print (" ",cmd)
os.system(cmd)
if ( __name__ == '__main__'): main()
|
#!/usr/bin/env python
from __future__ import print_function, division
from fractions import gcd
import operator, cmath
import math as rmath
import random, itertools, sys, string
from types import *
from base64 import *
from copy import copy
def template_specialize(fname, *args):
if fname not in globals():
def raiseError(*args, **kwargs):
raise NotImplementedError("This type combination is unimplemented.")
globals()[fname] = raiseError
def template_specializer(func):
old_func = globals()[fname]
globals()[fname] = lambda *pargs: func(*pargs) if all(isinstance(a, t) for a, t in zip(pargs, args)) else old_func(*pargs)
return func
return template_specializer
phi = (1+5**.5)/2
def Fib(n):
if n<2:
return n
a,b=1,1
while n>2:
a,b,n=b,a+b,n-1
return b
def prod(iter):
return reduce(operator.mul, iter, 1)
primes = [2,3]
class MathSelector(object):
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
try:
return getattr(rmath,self.fn)(*args, **kwargs)
except AttributeError:
try:
return getattr(cmath,self.fn)(*args, **kwargs)
except Exception as e:
if self.fn == 'factorial':
return naive_factorial(*args, **kwargs)
else:
raise e
class Math(object):
def __getattr__(self, fn):
mathmod = cmath if hasattr(cmath,fn) else rmath
return MathSelector(fn) if callable(getattr(mathmod,fn)) else getattr(rmath,fn)
math = Math()
class SeriousFunction(object):
def __init__(self, code):
self.code = code
def __call__(self,srs):
srs.eval(self.code,print_at_end=False)
def __str__(self):
return '%s'%self.code
def __repr__(self):
return '`%s`'%self.code
def __len__(self):
return len(self.code)
def __add__(self,other):
return SeriousFunction(self.code+other.code)
def __mul__(self,other):
return SeriousFunction(self.code*other)
def __mod__(self,other):
return SeriousFunction(self.code%other)
def NinetyNineBottles():
x = 99
res = ''
for i in range(99):
w = 'Take one down and pass it around, '+str((x-(i+1)))+' bottle{0} of beer on the wall.'.format(['s',''][x-i==2])
y = str((x-i))+' bottle{0} of beer on the wall, '+str((x-i))+' bottle{0} of beer.'
y=y.format(['s',''][x-i==1])
z = 'Go to the store and buy some more, '+str(x)+' bottles of beer on the wall.'
if i == (x-1):
res += y + '\n' + z
else:
res += y + '\n' + w
i += 1
res += '\n\n'
return res
def _sum(data, start=None):
if any([type(x) in [FloatType, ComplexType] for x in data]):
return math.fsum(data)+start
if start is None:
return sum(data)
else:
return sum(data, start)
def median(data):
n = len(data)
if n%2 == 1:
return data[n//2]
else:
i = n//2-1
return _sum(data[i:i+2])/2
def naive_factorial(x):
res = 1
while x:
res *= x
x -= 1
return res
def nCr(n, k):
if k > n:
return 0
elif k==n:
return 1
res = 1
while k:
res *= (n+1-k)/k
k-=1
return int(res)
def nPr(n, k):
if k > n:
return 0
return nCr(n,k)*math.factorial(k)
def is_prime(x):
global primes
if x in primes:
return 1
if x<2 or (max(primes) > x):
return 0
for p in filter(lambda p:p*p<=x,primes):
if x%p==0:
return 0
n = max(primes)+2
while n*n<=x:
if x%n==0:
return 0
return 1
def init_primes_up_to(n):
global primes
if max(primes) > n:
return
x = max(primes)+2
while x < n:
if is_prime(x):
primes.append(x)
x+=2
init_primes_up_to(100)
def nth_prime(n):
global primes
while len(primes)<=n:
init_primes_up_to(max(primes)+100)
return primes[n]
def Fib_index(n):
i=0
while Fib(i)<n:
i+=1
return i if Fib(i) == n else -1
def div_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(a[-1:]+a[:-1])
elif type(a) in [IntType, LongType, FloatType, ComplexType]:
b=srs.pop()
srs.push(a/b)
else:
srs.push(a)
def idiv_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(a[1:]+a[:1])
elif type(a) in [IntType,LongType,FloatType,ComplexType]:
b=srs.pop()
srs.push(a//b)
else:
srs.push(a)
def dupe_fn(srs):
a=srs.pop()
srs.push(a)
srs.push(copy(a))
def rot2_fn(srs):
a,b=srs.pop(),srs.pop()
srs.push(a)
srs.push(b)
def deq_fn(srs):
a=srs.pop()
if type(a) is ListType:
b=a.pop(-1)
srs.push(a)
srs.push(b)
else:
srs.push(a)
def i_fn(srs):
a=srs.pop()
if type(a) is StringType and (all([c.isdigit() or c=='.' for c in a]) and a.count('.')<2):
srs.push(float(a))
elif type(a) is ListType:
for x in a[::-1]:
srs.push(x)
else:
srs.push(a)
def to_list_fn(srs):
srs.stack = [srs.stack]
def psh_fn(srs):
a=srs.pop()
b=srs.pop()
a=[b]+a
srs.push(a)
def p_fn(srs):
a=srs.pop()
if type(a) in [IntType, LongType]:
srs.push(is_prime(a))
elif type(a) is ListType:
b=a.pop(0)
srs.push(a)
srs.push(b)
else:
srs.push(a)
def enq_fn(srs):
a,b=srs.pop(),srs.pop()
a.append(b)
srs.push(a)
def flatten(lst):
return sum(([x] if not isinstance(x, list) else flatten(x) for x in lst), [])
def flat_explode_fn(srs):
tmp = []
while len(srs.stack)>0:
a = srs.pop()
if type(a) is StringType:
a = a.split('')
elif type(a) is ListType:
a = flatten(a)
tmp.append(a)
srs.stack = tmp[:]
def nrrot_fn(srs):
a=x.pop()
srs.stack=srs.stack[a:]+srs.stack[:a]
def nlrot_fn(srs):
a=x.pop()
srs.stack=srs.stack[:a]+srs.stack[a:]
def ins_top_fn(srs):
a=srs.pop()
b=srs.pop()
srs.stack=srs.stack[:a]+[b]+srs.stack[a:]
def ins_bot_fn(srs):
a=srs.pop()
b=srs.pop()
srs.stack=srs.stack[:-a]+[b]+srs.stack[-a:]
def dupe_all_fn(srs):
srs.stack=[copy(x) for x in srs.stack[:]]+srs.stack[:]
def dupe_each_fn(srs):
tmp=[]
while len(srs.stack)>0:
a=srs.pop()
tmp.append(a)
tmp.append(copy(a))
srs.stack=tmp[:]
def lr_fn(srs):
a=srs.pop()
if type(a) is StringType:
map(srs.push,a[::-1])
elif type(a) in [IntType, LongType]:
srs.push(range(a))
def s_fn(srs):
a=srs.pop()
if type(a) is StringType:
b=srs.pop()
if type(b) is ListType:
try:
b=''.join(b)
except TypeError:
b=''.join(map(repr,b))
if not type(b) in [StringType,ListType]:
b=repr(b)
srs.push([''.join(list(g)) for k,g in itertools.groupby(a,lambda x:x in b) if not k])
elif type(a) is ListType:
b=srs.pop()
if not type(b) in [StringType,ListType]:
b=[b]
srs.push([list(g) for k,g in itertools.groupby(a,lambda x:x in b) if not k])
else:
srs.push(1 if a>0 else -1 if a<0 else 0)
def if_fn(srs):
a,b,c=srs.pop(),srs.pop(),srs.pop()
srs.push(b if a else c)
def invert_fn(srs):
srs.stack=srs.stack[::-1]
def comp_fn(srs):
a=srs.pop()
if type(a) is ListType:
a = a+[0] if a%2 else a
while len(a) > 0:
r,i = a.pop(0),a.pop(0)
srs.push(complex(r,i))
elif type(a) in [IntType, LongType, FloatType]:
b=srs.pop()
srs.push(complex(a,b))
else:
srs.push(a)
def M_fn(srs):
a=srs.pop()
if type(a) in [StringType,ListType]:
srs.push(max(a))
else:
b=srs.pop()
res=[]
for x in b:
s = srs.make_new(x)
a(s)
res+=s.stack
srs.push(res)
def r_fn(srs):
a=srs.pop()
if isinstance(a,SeriousFunction):
b=srs.pop()
s=srs.make_new(*b)
a(s)
srs.push(s.stack)
elif type(a) in [StringType,ListType]:
srs.push(a[::-1])
else:
srs.push(range(1,a+1))
def n_fn(srs):
a,b=srs.pop(),srs.pop()
for i in range(b):
if isinstance(a, SeriousFunction):
a(srs)
else:
srs.push(a)
def full_factor(n):
global primes
init_primes_up_to(n)
res=[]
for p in filter(lambda x:x<=n,primes):
a=0
while n%p==0:
a+=1
n//=p
if a:
res.append([p,a])
return res
def factor(n):
return [a for a,b in full_factor(n)]
def mod_fn(srs):
a=srs.pop()
b=srs.pop()
if type(a) is StringType or isinstance(a,SeriousFunction):
srs.push(a%tuple(b))
else:
srs.push(a%b)
def f_fn(srs):
a=srs.pop()
if type(a) is StringType:
b=srs.pop()
srs.push(a.format(*b))
else:
srs.push(Fib_index(a))
def make_list_fn(srs):
a=srs.pop()
res=a
try:
res=list(a)
except:
res=[a]
srs.push(res)
def j_fn(srs):
a=srs.pop()
if type(a) in [ListType, StringType]:
srs.push(random.choice(a))
else:
srs.push(random.randrange(a))
def star_fn(srs):
a=srs.pop()
b=srs.pop()
if type(a) is ListType and type(b) is not ListType:
srs.push(map(lambda x:x*b,a))
elif type(b) is ListType and type(a) is not ListType:
srs.push(map(lambda x:x*a,b))
elif type(a) == type(b) == ListType:
if(len(b) > len(a)):
a,b=b,a
while len(b) < len(a):
b.append(0)
srs.push(_sum([prod(x) for x in zip(a,b)]))
else:
srs.push(a*b)
def plus_fn(srs):
a=srs.pop()
b=srs.pop()
if type(a) is ListType and type(b) in [IntType, LongType, ComplexType, FloatType]:
srs.push(map(lambda x:x+b,a))
elif type(b) is ListType and type(a) in [IntType, LongType, ComplexType, FloatType]:
srs.push(map(lambda x:x+a,b))
else:
srs.push(a+b)
def digit_to_char(digit):
if digit < 10:
return str(digit)
return chr(ord('a') + digit - 10)
def str_base(number,base):
if number < 0:
return '-' + str_base(-number, base)
(d, m) = divmod(number, base)
if d > 0:
return str_base(d, base) + digit_to_char(m)
return digit_to_char(m)
def i_mul_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(map(lambda x:complex(0,x),a))
else:
srs.push(complex(0,a))
def npop_list_fn(srs):
a=srs.pop()
res=[]
for _ in range(a):
res.append(srs.pop())
srs.push(res)
def E_fn(srs):
a=srs.pop()
if type(a) in [IntType,LongType,FloatType,ComplexType]:
srs.push(math.erf(a))
else:
b=srs.pop()
srs.push(a[b])
def peek_print_fn(srs):
print(' '.join(map(repr, srs.stack)))
def while_fn(srs):
f=srs.pop()
while srs.peek():
f(srs)
def dupe_each_n_fn(srs):
a=srs.pop()
tmp = []
while srs.stack:
b = srs.pop()
tmp+=[b for i in range(a)]
srs.stack=tmp[:]
def S_fn(srs):
a=srs.pop()
if type(a) is StringType:
srs.push(''.join(sorted(a)))
elif type(a) is ListType:
srs.push(sorted(a))
else:
srs.push(math.sin(a))
def print_all_fn(srs):
while srs.stack:
print(srs.pop())
def zip_fn(srs):
a=srs.pop()
if type(a) in [ListType,StringType]:
b=srs.pop()
srs.push(map(list,[filter(lambda x:x is not None,zlist) for zlist in itertools.izip_longest(a,b)]))
else:
lists = [srs.pop() for i in range(a)]
srs.push(map(list,[filter(lambda x:x is not None,zlist) for zlist in itertools.izip_longest(*lists)]))
def sum_fn(srs):
a=srs.pop()
res = _sum(a,start=type(a[0])()) if type(a[0]) is not StringType else ''.join(map(str,a))
srs.push(res)
def index_fn(srs):
b,a=srs.pop(),srs.pop()
if a in b:
srs.push(b.index(a))
else:
srs.push(-1)
def cond_quit_fn(srs):
a=srs.pop() if srs.stack else None
if a:
srs.push(a)
else:
exit()
def median_fn(srs):
a=srs.pop()
if len(a)%2:
srs.push(a[len(a)//2])
else:
if all([type(x) is StringType for x in a[len(a)//2-1:][:2]]):
med = median(map(ord,a))
srs.push(chr(med))
else:
srs.push(median(a))
def c_fn(srs):
a=srs.pop()
if type(a) in [ListType,StringType]:
b=srs.pop()
srs.push(a.count(b))
else:
srs.push(chr(a%256))
def exit_fn(srs):
exit()
registers = dict()
def get_reg(i):
global registers
return registers[i]
def set_reg(i, val):
global registers
registers[i] = val
def diff_fn(srs):
a,b=srs.pop(),srs.pop()
if all([type(x) in [ListType,StringType] for x in (a,b)]):
srs.push(filter(lambda x:x not in b, a))
else:
srs.push(a-b)
def m_fn(srs):
a=srs.pop()
if type(a) in [StringType,ListType]:
srs.push(min(a))
else:
srs.push(map(list,math.modf(a)))
def filter_types(iter,*types):
return filter(lambda x:type(x) in types, iter)
def inv_fil_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(filter_types(a,IntType,LongType,FloatType,ComplexType))
else:
srs.push(1/a)
def AE_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(filter_types(a,StringType))
else:
b,c=srs.pop(),srs.pop()
srs.push(a.replace(b,c))
def fn_fil_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(filter(lambda x:isinstance(x,SeriousFunction),a))
else:
srs.push(SeriousFunction(a))
def get_input_fn(srs):
a=raw_input()
try:
b = eval(a)
b = list(b) if type(b) is TupleType else b
except:
b = a
finally:
srs.push(b)
fn_table={
0x09:lambda x:x.push(sys.stdin.read(1)),
0x0C:lambda x:x.push(sys.stdin.read()),
0x20:lambda x:x.push(len(x.stack)),
0x21:lambda x:x.push(math.factorial(x.pop())),
0x23:make_list_fn,
0x24:lambda x:x.push(str(x.pop())),
0x25:mod_fn,
0x26:lambda x:x.push(x.pop() & x.pop()),
0x28:lambda x:x.push(x.stack.pop(-1)),
0x29:lambda x:x.append(x.pop()),
0x2A:star_fn,
0x2B:plus_fn,
0x2C:get_input_fn,
0x2D:diff_fn,
0x2E:lambda x:(lambda y:print(y) if not isinstance(y,SeriousFunction) else y(x) or print(x.pop()))(x.pop()),
0x2F:div_fn,
0x3B:dupe_fn,
0x3C:lambda x:x.push(int(x.pop()<x.pop())),
0x3D:lambda x:x.push(int(x.pop()==x.pop())),
0x3E:lambda x:x.push(int(x.pop()>x.pop())),
0x3F:lambda x:x,
0x40:rot2_fn,
0x41:lambda x:x.push(abs(x.pop())),
0x42:lambda x:x.push(random.randrange(x.pop(),x.pop())),
0x43:lambda x:x.push(math.cos(x.pop())),
0x44:lambda x:x.push(x.pop()-1),
0x45:E_fn,
0x46:lambda x:x.push(Fib(x.pop())),
0x47:lambda x:x.push(random.random()),
0x48:lambda x:x.push("Hello, World!"),
0x49:if_fn,
0x4A:j_fn,
0x4B:lambda x:x.push(math.ceil(x.pop())),
0x4C:lambda x:x.push(math.floor(x.pop())),
0x4D:M_fn,
0x4E:lambda x:x.push(NinetyNineBottles()),
0x4F:lambda x:map(lambda y:map(x.push,map(ord,y)[::-1]),x.pop()[::-1]),
0x50:lambda x:x.push(nth_prime(x.pop())),
0x51:lambda x:x.push(x.code),
0x52:r_fn,
0x53:S_fn,
0x54:lambda x:x.push(math.tan(x.pop())),
0x55:lambda x:x.push(list(set(x.pop()).union(x.pop()))),
0x56:lambda x:x.push(random.uniform(x.pop(),x.pop())),
0x58:lambda x:x.pop(),
0x59:lambda x:x.push(0 if x.pop() else 1),
0x5A:zip_fn,
0x5C:idiv_fn,
0x5E:lambda x:x.push(pow(x.pop(),x.pop())),
0x5F:lambda x:x.push(math.log(x.pop())),
0x61:invert_fn,
0x62:lambda x:x.push(int(bool(x.pop()))),
0x63:c_fn,
0x64:deq_fn,
0x65:lambda x:x.push(math.exp(x.pop())),
0x66:f_fn,
0x67:lambda x:x.push(gcd(x.pop(),x.pop())),
0x68:lambda x:x.push(math.hypot(x.pop(),x.pop())),
0x69:i_fn,
0x6A:lambda x:x.push(str.join(x.pop(),map(str,x.pop()))),
0x6B:to_list_fn,
0x6C:lambda x:x.push(len(x.pop())),
0x6D:m_fn,
0x6E:n_fn,
0x6F:psh_fn,
0x70:p_fn,
0x71:enq_fn,
0x72:lr_fn,
0x73:s_fn,
0x74:flat_explode_fn,
0x75:lambda x:x.push(x.pop()+1),
0x76:lambda x:random.seed(x.pop()),
0x77:lambda x:x.push(full_factor(x.pop())),
0x78:lambda x:x.push(range(x.pop(),x.pop())),
0x79:lambda x:x.push(factor(x.pop())),
0x7A:lambda x:map(x.eval,(lambda y:['.' for _ in range(y)])(x.pop())),
0x7B:nrrot_fn,
0x7C:lambda x:x.push(x.pop() | x.pop()),
0x7D:nlrot_fn,
0x7E:lambda x:x.push(~x.pop()),
0x7F:exit_fn,
0x80:comp_fn,
0x81:print_all_fn,
0x82:lambda x:map(lambda y:x.pop(), range(len(x.stack))),
0x83:lambda x:x.push(math.asin(x.pop())),
0x84:lambda x:x.push(math.acos(x.pop())),
0x85:lambda x:x.push(math.atan(x.pop())),
0x86:lambda x:x.push(math.atan2(x.pop(),x.pop())),
0x87:lambda x:x.push(math.asinh(x.pop())),
0x88:lambda x:x.push(math.acosh(x.pop())),
0x89:lambda x:x.push(math.atanh(x.pop())),
0x8A:lambda x:x.push(repr(x.pop())),
0x8B:lambda x:x.push(complex(0,1)),
0x8C:i_mul_fn,
0x8D:inv_fil_fn,
0x8E:lambda x:x.push(math.sinh(x.pop())),
0x8F:lambda x:x.push(math.cosh(x.pop())),
0x90:lambda x:x.push(math.tanh(x.pop())),
0x91:lambda x:x.push((lambda y:mean(y) if y else 0)(x.pop())),
0x92:AE_fn,
0x93:lambda x:x.push(x.pop().strip()),
0x94:lambda x:x.push(x.pop().lstrip()),
0x95:lambda x:x.push(x.pop().rstrip()),
0x96:lambda x:x.push(x.pop().upper()),
0x97:lambda x:x.push(x.pop().lower()),
0x98:lambda x:x.push(x.pop().title()),
0x99:lambda x:x.push(x.pop().swapcase()),
0x9A:lambda x:x.push((lambda y:max(y,key=y.count))(x.pop())),
0x9B:lambda x:x.push(math.copysign(x.pop(),x.pop())),
0x9C:fn_fil_fn,
0x9D:lambda x:x.push(map(operator.add,itertools.izip_longest(x.pop(),x.pop(),fillvalue=0))),
0x9E:lambda x:x.push(cmath.phase(x.pop())),
0x9F:lambda x:x.pop()(x),
0xA0:lambda x:x.push(x.pop().conjugate()),
0xA1:index_fn,
0xA2:cond_quit_fn,
0xA3:lambda x:x.push(''.join(map(chr,range(97,122+1)))),
0xA4:lambda x:x.push(map(list,enumerate(x.pop()))),
0xA5:lambda x:x.push(filter_types(x.pop(),ListType)),
0xA6:lambda x:x.push(x.pop()**2),
0xA7:lambda x:x.push(math.degrees(x.pop())),
0xA8:lambda x:x.push(int(x.pop(),x.pop())),
0xA9:lambda x:x.push(x.pop()+2),
0xAA:lambda x:x.push(x.pop()-2),
0xAB:lambda x:x.push(x.pop()/2),
0xAC:lambda x:x.push(x.pop()/4),
0xAD:lambda x:x.push(str_base(x.pop(),x.pop())),
0xAE:ins_bot_fn,
0xAF:ins_top_fn,
0xB0:lambda x:x.push(list(itertools.compress(x.pop(),x.pop()))),
0xB1:lambda x:x.push((lambda y:sum([1 if gcd(i,y)==1 else 0 for i in range(1,y+1)]))(x.pop())),
0xB2:lambda x:x.push(sum([is_prime(i) for i in range(1,x.pop()+1)])),
0xB3:dupe_all_fn,
0xB4:lambda x:x.push(1 if gcd(x.pop(),x.pop())==1 else 0),
0xB9:lambda x:x.push((lambda y:[nCr(y,k) for k in range(y+1)])(x.pop())),
0xBA:median_fn,
0xBB:lambda x:set_reg(0,x.pop()),
0xBC:lambda x:set_reg(1,x.pop()),
0xBD:lambda x:x.push(get_reg(0)),
0xBE:lambda x:x.push(get_reg(1)),
0xBF:lambda x:set_reg(x.pop(),x.pop()),
0xC0:lambda x:x.push(get_reg(x.pop())),
0xC5:dupe_each_fn,
0xC6:dupe_each_n_fn,
0xC7:npop_list_fn,
0xCB:lambda x:x.push(math.pi),
0xCC:lambda x:x.push(math.e),
0xCE:while_fn,
0xD1:lambda x:x.push(pow(10,x.pop())),
0xD2:lambda x:x.push(math.log(x.pop(),10)),
0xD3:lambda x:x.push(pow(2,x.pop())),
0xD4:lambda x:x.push(math.log(x.pop(),2)),
0xD5:lambda x:x.push(math.log(2)),
0xDB:lambda x:x.push(nCr(x.pop(),x.pop())),
0xDC:lambda x:x.push(nPr(x.pop(),x.pop())),
0xDD:lambda x:x.push(b64decode(x.pop())),
0xDE:lambda x:x.push(b64encode(x.pop())),
0xDF:lambda x:x.push(("0123456789"+string.ascii_uppercase+string.ascii_lowercase+"+/")[:x.pop()]),
0xE2:lambda x:x.push(math.gamma(x.pop())),
0xE3:lambda x:x.push(reduce(operator.mul,x.pop(),1)),
0xE4:sum_fn,
0xE7:lambda x:x.push(x.pop()*2),
0xED:lambda x:x.push(phi),
0xEE:lambda x:x.push(""),
0xEF:lambda x:x.push(list(set(x.pop()).intersection(x.pop()))),
0xF1:lambda x:x.push(-x.pop()),
0xF2:lambda x:x.push(x.pop()>=x.pop()),
0xF3:lambda x:x.push(x.pop()<=x.pop()),
0xF7:lambda x:x.push(int(x.pop())),
0xF8:lambda x:x.push(math.radians(x.pop())),
0xFB:lambda x:x.push(x.pop()**.5),
0xFE:peek_print_fn,
}
undo stupid exception handling thing
#!/usr/bin/env python
from __future__ import print_function, division
from fractions import gcd
import operator, cmath
import math as rmath
import random, itertools, sys, string
from types import *
from base64 import *
from copy import copy
def template_specialize(fname, *args):
if fname not in globals():
def raiseError(*args, **kwargs):
raise NotImplementedError("This type combination is unimplemented.")
globals()[fname] = raiseError
def template_specializer(func):
old_func = globals()[fname]
globals()[fname] = lambda *pargs: func(*pargs) if all(isinstance(a, t) for a, t in zip(pargs, args)) else old_func(*pargs)
return func
return template_specializer
phi = (1+5**.5)/2
def Fib(n):
if n<2:
return n
a,b=1,1
while n>2:
a,b,n=b,a+b,n-1
return b
def prod(iter):
return reduce(operator.mul, iter, 1)
primes = [2,3]
class MathSelector(object):
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
try:
return getattr(rmath,self.fn)(*args, **kwargs)
except:
try:
return getattr(cmath,self.fn)(*args, **kwargs)
except Exception as e:
if self.fn == 'factorial':
return naive_factorial(*args, **kwargs)
else:
raise e
class Math(object):
def __getattr__(self, fn):
mathmod = cmath if hasattr(cmath,fn) else rmath
return MathSelector(fn) if callable(getattr(mathmod,fn)) else getattr(rmath,fn)
math = Math()
class SeriousFunction(object):
def __init__(self, code):
self.code = code
def __call__(self,srs):
srs.eval(self.code,print_at_end=False)
def __str__(self):
return '%s'%self.code
def __repr__(self):
return '`%s`'%self.code
def __len__(self):
return len(self.code)
def __add__(self,other):
return SeriousFunction(self.code+other.code)
def __mul__(self,other):
return SeriousFunction(self.code*other)
def __mod__(self,other):
return SeriousFunction(self.code%other)
def NinetyNineBottles():
x = 99
res = ''
for i in range(99):
w = 'Take one down and pass it around, '+str((x-(i+1)))+' bottle{0} of beer on the wall.'.format(['s',''][x-i==2])
y = str((x-i))+' bottle{0} of beer on the wall, '+str((x-i))+' bottle{0} of beer.'
y=y.format(['s',''][x-i==1])
z = 'Go to the store and buy some more, '+str(x)+' bottles of beer on the wall.'
if i == (x-1):
res += y + '\n' + z
else:
res += y + '\n' + w
i += 1
res += '\n\n'
return res
def _sum(data, start=None):
if any([type(x) in [FloatType, ComplexType] for x in data]):
return math.fsum(data)+start
if start is None:
return sum(data)
else:
return sum(data, start)
def median(data):
n = len(data)
if n%2 == 1:
return data[n//2]
else:
i = n//2-1
return _sum(data[i:i+2])/2
def naive_factorial(x):
res = 1
while x:
res *= x
x -= 1
return res
def nCr(n, k):
if k > n:
return 0
elif k==n:
return 1
res = 1
while k:
res *= (n+1-k)/k
k-=1
return int(res)
def nPr(n, k):
if k > n:
return 0
return nCr(n,k)*math.factorial(k)
def is_prime(x):
global primes
if x in primes:
return 1
if x<2 or (max(primes) > x):
return 0
for p in filter(lambda p:p*p<=x,primes):
if x%p==0:
return 0
n = max(primes)+2
while n*n<=x:
if x%n==0:
return 0
return 1
def init_primes_up_to(n):
global primes
if max(primes) > n:
return
x = max(primes)+2
while x < n:
if is_prime(x):
primes.append(x)
x+=2
init_primes_up_to(100)
def nth_prime(n):
global primes
while len(primes)<=n:
init_primes_up_to(max(primes)+100)
return primes[n]
def Fib_index(n):
i=0
while Fib(i)<n:
i+=1
return i if Fib(i) == n else -1
def div_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(a[-1:]+a[:-1])
elif type(a) in [IntType, LongType, FloatType, ComplexType]:
b=srs.pop()
srs.push(a/b)
else:
srs.push(a)
def idiv_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(a[1:]+a[:1])
elif type(a) in [IntType,LongType,FloatType,ComplexType]:
b=srs.pop()
srs.push(a//b)
else:
srs.push(a)
def dupe_fn(srs):
a=srs.pop()
srs.push(a)
srs.push(copy(a))
def rot2_fn(srs):
a,b=srs.pop(),srs.pop()
srs.push(a)
srs.push(b)
def deq_fn(srs):
a=srs.pop()
if type(a) is ListType:
b=a.pop(-1)
srs.push(a)
srs.push(b)
else:
srs.push(a)
def i_fn(srs):
a=srs.pop()
if type(a) is StringType and (all([c.isdigit() or c=='.' for c in a]) and a.count('.')<2):
srs.push(float(a))
elif type(a) is ListType:
for x in a[::-1]:
srs.push(x)
else:
srs.push(a)
def to_list_fn(srs):
srs.stack = [srs.stack]
def psh_fn(srs):
a=srs.pop()
b=srs.pop()
a=[b]+a
srs.push(a)
def p_fn(srs):
a=srs.pop()
if type(a) in [IntType, LongType]:
srs.push(is_prime(a))
elif type(a) is ListType:
b=a.pop(0)
srs.push(a)
srs.push(b)
else:
srs.push(a)
def enq_fn(srs):
a,b=srs.pop(),srs.pop()
a.append(b)
srs.push(a)
def flatten(lst):
return sum(([x] if not isinstance(x, list) else flatten(x) for x in lst), [])
def flat_explode_fn(srs):
tmp = []
while len(srs.stack)>0:
a = srs.pop()
if type(a) is StringType:
a = a.split('')
elif type(a) is ListType:
a = flatten(a)
tmp.append(a)
srs.stack = tmp[:]
def nrrot_fn(srs):
a=x.pop()
srs.stack=srs.stack[a:]+srs.stack[:a]
def nlrot_fn(srs):
a=x.pop()
srs.stack=srs.stack[:a]+srs.stack[a:]
def ins_top_fn(srs):
a=srs.pop()
b=srs.pop()
srs.stack=srs.stack[:a]+[b]+srs.stack[a:]
def ins_bot_fn(srs):
a=srs.pop()
b=srs.pop()
srs.stack=srs.stack[:-a]+[b]+srs.stack[-a:]
def dupe_all_fn(srs):
srs.stack=[copy(x) for x in srs.stack[:]]+srs.stack[:]
def dupe_each_fn(srs):
tmp=[]
while len(srs.stack)>0:
a=srs.pop()
tmp.append(a)
tmp.append(copy(a))
srs.stack=tmp[:]
def lr_fn(srs):
a=srs.pop()
if type(a) is StringType:
map(srs.push,a[::-1])
elif type(a) in [IntType, LongType]:
srs.push(range(a))
def s_fn(srs):
a=srs.pop()
if type(a) is StringType:
b=srs.pop()
if type(b) is ListType:
try:
b=''.join(b)
except TypeError:
b=''.join(map(repr,b))
if not type(b) in [StringType,ListType]:
b=repr(b)
srs.push([''.join(list(g)) for k,g in itertools.groupby(a,lambda x:x in b) if not k])
elif type(a) is ListType:
b=srs.pop()
if not type(b) in [StringType,ListType]:
b=[b]
srs.push([list(g) for k,g in itertools.groupby(a,lambda x:x in b) if not k])
else:
srs.push(1 if a>0 else -1 if a<0 else 0)
def if_fn(srs):
a,b,c=srs.pop(),srs.pop(),srs.pop()
srs.push(b if a else c)
def invert_fn(srs):
srs.stack=srs.stack[::-1]
def comp_fn(srs):
a=srs.pop()
if type(a) is ListType:
a = a+[0] if a%2 else a
while len(a) > 0:
r,i = a.pop(0),a.pop(0)
srs.push(complex(r,i))
elif type(a) in [IntType, LongType, FloatType]:
b=srs.pop()
srs.push(complex(a,b))
else:
srs.push(a)
def M_fn(srs):
a=srs.pop()
if type(a) in [StringType,ListType]:
srs.push(max(a))
else:
b=srs.pop()
res=[]
for x in b:
s = srs.make_new(x)
a(s)
res+=s.stack
srs.push(res)
def r_fn(srs):
a=srs.pop()
if isinstance(a,SeriousFunction):
b=srs.pop()
s=srs.make_new(*b)
a(s)
srs.push(s.stack)
elif type(a) in [StringType,ListType]:
srs.push(a[::-1])
else:
srs.push(range(1,a+1))
def n_fn(srs):
a,b=srs.pop(),srs.pop()
for i in range(b):
if isinstance(a, SeriousFunction):
a(srs)
else:
srs.push(a)
def full_factor(n):
global primes
init_primes_up_to(n)
res=[]
for p in filter(lambda x:x<=n,primes):
a=0
while n%p==0:
a+=1
n//=p
if a:
res.append([p,a])
return res
def factor(n):
return [a for a,b in full_factor(n)]
def mod_fn(srs):
a=srs.pop()
b=srs.pop()
if type(a) is StringType or isinstance(a,SeriousFunction):
srs.push(a%tuple(b))
else:
srs.push(a%b)
def f_fn(srs):
a=srs.pop()
if type(a) is StringType:
b=srs.pop()
srs.push(a.format(*b))
else:
srs.push(Fib_index(a))
def make_list_fn(srs):
a=srs.pop()
res=a
try:
res=list(a)
except:
res=[a]
srs.push(res)
def j_fn(srs):
a=srs.pop()
if type(a) in [ListType, StringType]:
srs.push(random.choice(a))
else:
srs.push(random.randrange(a))
def star_fn(srs):
a=srs.pop()
b=srs.pop()
if type(a) is ListType and type(b) is not ListType:
srs.push(map(lambda x:x*b,a))
elif type(b) is ListType and type(a) is not ListType:
srs.push(map(lambda x:x*a,b))
elif type(a) == type(b) == ListType:
if(len(b) > len(a)):
a,b=b,a
while len(b) < len(a):
b.append(0)
srs.push(_sum([prod(x) for x in zip(a,b)]))
else:
srs.push(a*b)
def plus_fn(srs):
a=srs.pop()
b=srs.pop()
if type(a) is ListType and type(b) in [IntType, LongType, ComplexType, FloatType]:
srs.push(map(lambda x:x+b,a))
elif type(b) is ListType and type(a) in [IntType, LongType, ComplexType, FloatType]:
srs.push(map(lambda x:x+a,b))
else:
srs.push(a+b)
def digit_to_char(digit):
if digit < 10:
return str(digit)
return chr(ord('a') + digit - 10)
def str_base(number,base):
if number < 0:
return '-' + str_base(-number, base)
(d, m) = divmod(number, base)
if d > 0:
return str_base(d, base) + digit_to_char(m)
return digit_to_char(m)
def i_mul_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(map(lambda x:complex(0,x),a))
else:
srs.push(complex(0,a))
def npop_list_fn(srs):
a=srs.pop()
res=[]
for _ in range(a):
res.append(srs.pop())
srs.push(res)
def E_fn(srs):
a=srs.pop()
if type(a) in [IntType,LongType,FloatType,ComplexType]:
srs.push(math.erf(a))
else:
b=srs.pop()
srs.push(a[b])
def peek_print_fn(srs):
print(' '.join(map(repr, srs.stack)))
def while_fn(srs):
f=srs.pop()
while srs.peek():
f(srs)
def dupe_each_n_fn(srs):
a=srs.pop()
tmp = []
while srs.stack:
b = srs.pop()
tmp+=[b for i in range(a)]
srs.stack=tmp[:]
def S_fn(srs):
a=srs.pop()
if type(a) is StringType:
srs.push(''.join(sorted(a)))
elif type(a) is ListType:
srs.push(sorted(a))
else:
srs.push(math.sin(a))
def print_all_fn(srs):
while srs.stack:
print(srs.pop())
def zip_fn(srs):
a=srs.pop()
if type(a) in [ListType,StringType]:
b=srs.pop()
srs.push(map(list,[filter(lambda x:x is not None,zlist) for zlist in itertools.izip_longest(a,b)]))
else:
lists = [srs.pop() for i in range(a)]
srs.push(map(list,[filter(lambda x:x is not None,zlist) for zlist in itertools.izip_longest(*lists)]))
def sum_fn(srs):
a=srs.pop()
res = _sum(a,start=type(a[0])()) if type(a[0]) is not StringType else ''.join(map(str,a))
srs.push(res)
def index_fn(srs):
b,a=srs.pop(),srs.pop()
if a in b:
srs.push(b.index(a))
else:
srs.push(-1)
def cond_quit_fn(srs):
a=srs.pop() if srs.stack else None
if a:
srs.push(a)
else:
exit()
def median_fn(srs):
a=srs.pop()
if len(a)%2:
srs.push(a[len(a)//2])
else:
if all([type(x) is StringType for x in a[len(a)//2-1:][:2]]):
med = median(map(ord,a))
srs.push(chr(med))
else:
srs.push(median(a))
def c_fn(srs):
a=srs.pop()
if type(a) in [ListType,StringType]:
b=srs.pop()
srs.push(a.count(b))
else:
srs.push(chr(a%256))
def exit_fn(srs):
exit()
registers = dict()
def get_reg(i):
global registers
return registers[i]
def set_reg(i, val):
global registers
registers[i] = val
def diff_fn(srs):
a,b=srs.pop(),srs.pop()
if all([type(x) in [ListType,StringType] for x in (a,b)]):
srs.push(filter(lambda x:x not in b, a))
else:
srs.push(a-b)
def m_fn(srs):
a=srs.pop()
if type(a) in [StringType,ListType]:
srs.push(min(a))
else:
srs.push(map(list,math.modf(a)))
def filter_types(iter,*types):
return filter(lambda x:type(x) in types, iter)
def inv_fil_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(filter_types(a,IntType,LongType,FloatType,ComplexType))
else:
srs.push(1/a)
def AE_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(filter_types(a,StringType))
else:
b,c=srs.pop(),srs.pop()
srs.push(a.replace(b,c))
def fn_fil_fn(srs):
a=srs.pop()
if type(a) is ListType:
srs.push(filter(lambda x:isinstance(x,SeriousFunction),a))
else:
srs.push(SeriousFunction(a))
def get_input_fn(srs):
a=raw_input()
try:
b = eval(a)
b = list(b) if type(b) is TupleType else b
except:
b = a
finally:
srs.push(b)
fn_table={
0x09:lambda x:x.push(sys.stdin.read(1)),
0x0C:lambda x:x.push(sys.stdin.read()),
0x20:lambda x:x.push(len(x.stack)),
0x21:lambda x:x.push(math.factorial(x.pop())),
0x23:make_list_fn,
0x24:lambda x:x.push(str(x.pop())),
0x25:mod_fn,
0x26:lambda x:x.push(x.pop() & x.pop()),
0x28:lambda x:x.push(x.stack.pop(-1)),
0x29:lambda x:x.append(x.pop()),
0x2A:star_fn,
0x2B:plus_fn,
0x2C:get_input_fn,
0x2D:diff_fn,
0x2E:lambda x:(lambda y:print(y) if not isinstance(y,SeriousFunction) else y(x) or print(x.pop()))(x.pop()),
0x2F:div_fn,
0x3B:dupe_fn,
0x3C:lambda x:x.push(int(x.pop()<x.pop())),
0x3D:lambda x:x.push(int(x.pop()==x.pop())),
0x3E:lambda x:x.push(int(x.pop()>x.pop())),
0x3F:lambda x:x,
0x40:rot2_fn,
0x41:lambda x:x.push(abs(x.pop())),
0x42:lambda x:x.push(random.randrange(x.pop(),x.pop())),
0x43:lambda x:x.push(math.cos(x.pop())),
0x44:lambda x:x.push(x.pop()-1),
0x45:E_fn,
0x46:lambda x:x.push(Fib(x.pop())),
0x47:lambda x:x.push(random.random()),
0x48:lambda x:x.push("Hello, World!"),
0x49:if_fn,
0x4A:j_fn,
0x4B:lambda x:x.push(math.ceil(x.pop())),
0x4C:lambda x:x.push(math.floor(x.pop())),
0x4D:M_fn,
0x4E:lambda x:x.push(NinetyNineBottles()),
0x4F:lambda x:map(lambda y:map(x.push,map(ord,y)[::-1]),x.pop()[::-1]),
0x50:lambda x:x.push(nth_prime(x.pop())),
0x51:lambda x:x.push(x.code),
0x52:r_fn,
0x53:S_fn,
0x54:lambda x:x.push(math.tan(x.pop())),
0x55:lambda x:x.push(list(set(x.pop()).union(x.pop()))),
0x56:lambda x:x.push(random.uniform(x.pop(),x.pop())),
0x58:lambda x:x.pop(),
0x59:lambda x:x.push(0 if x.pop() else 1),
0x5A:zip_fn,
0x5C:idiv_fn,
0x5E:lambda x:x.push(pow(x.pop(),x.pop())),
0x5F:lambda x:x.push(math.log(x.pop())),
0x61:invert_fn,
0x62:lambda x:x.push(int(bool(x.pop()))),
0x63:c_fn,
0x64:deq_fn,
0x65:lambda x:x.push(math.exp(x.pop())),
0x66:f_fn,
0x67:lambda x:x.push(gcd(x.pop(),x.pop())),
0x68:lambda x:x.push(math.hypot(x.pop(),x.pop())),
0x69:i_fn,
0x6A:lambda x:x.push(str.join(x.pop(),map(str,x.pop()))),
0x6B:to_list_fn,
0x6C:lambda x:x.push(len(x.pop())),
0x6D:m_fn,
0x6E:n_fn,
0x6F:psh_fn,
0x70:p_fn,
0x71:enq_fn,
0x72:lr_fn,
0x73:s_fn,
0x74:flat_explode_fn,
0x75:lambda x:x.push(x.pop()+1),
0x76:lambda x:random.seed(x.pop()),
0x77:lambda x:x.push(full_factor(x.pop())),
0x78:lambda x:x.push(range(x.pop(),x.pop())),
0x79:lambda x:x.push(factor(x.pop())),
0x7A:lambda x:map(x.eval,(lambda y:['.' for _ in range(y)])(x.pop())),
0x7B:nrrot_fn,
0x7C:lambda x:x.push(x.pop() | x.pop()),
0x7D:nlrot_fn,
0x7E:lambda x:x.push(~x.pop()),
0x7F:exit_fn,
0x80:comp_fn,
0x81:print_all_fn,
0x82:lambda x:map(lambda y:x.pop(), range(len(x.stack))),
0x83:lambda x:x.push(math.asin(x.pop())),
0x84:lambda x:x.push(math.acos(x.pop())),
0x85:lambda x:x.push(math.atan(x.pop())),
0x86:lambda x:x.push(math.atan2(x.pop(),x.pop())),
0x87:lambda x:x.push(math.asinh(x.pop())),
0x88:lambda x:x.push(math.acosh(x.pop())),
0x89:lambda x:x.push(math.atanh(x.pop())),
0x8A:lambda x:x.push(repr(x.pop())),
0x8B:lambda x:x.push(complex(0,1)),
0x8C:i_mul_fn,
0x8D:inv_fil_fn,
0x8E:lambda x:x.push(math.sinh(x.pop())),
0x8F:lambda x:x.push(math.cosh(x.pop())),
0x90:lambda x:x.push(math.tanh(x.pop())),
0x91:lambda x:x.push((lambda y:mean(y) if y else 0)(x.pop())),
0x92:AE_fn,
0x93:lambda x:x.push(x.pop().strip()),
0x94:lambda x:x.push(x.pop().lstrip()),
0x95:lambda x:x.push(x.pop().rstrip()),
0x96:lambda x:x.push(x.pop().upper()),
0x97:lambda x:x.push(x.pop().lower()),
0x98:lambda x:x.push(x.pop().title()),
0x99:lambda x:x.push(x.pop().swapcase()),
0x9A:lambda x:x.push((lambda y:max(y,key=y.count))(x.pop())),
0x9B:lambda x:x.push(math.copysign(x.pop(),x.pop())),
0x9C:fn_fil_fn,
0x9D:lambda x:x.push(map(operator.add,itertools.izip_longest(x.pop(),x.pop(),fillvalue=0))),
0x9E:lambda x:x.push(cmath.phase(x.pop())),
0x9F:lambda x:x.pop()(x),
0xA0:lambda x:x.push(x.pop().conjugate()),
0xA1:index_fn,
0xA2:cond_quit_fn,
0xA3:lambda x:x.push(''.join(map(chr,range(97,122+1)))),
0xA4:lambda x:x.push(map(list,enumerate(x.pop()))),
0xA5:lambda x:x.push(filter_types(x.pop(),ListType)),
0xA6:lambda x:x.push(x.pop()**2),
0xA7:lambda x:x.push(math.degrees(x.pop())),
0xA8:lambda x:x.push(int(x.pop(),x.pop())),
0xA9:lambda x:x.push(x.pop()+2),
0xAA:lambda x:x.push(x.pop()-2),
0xAB:lambda x:x.push(x.pop()/2),
0xAC:lambda x:x.push(x.pop()/4),
0xAD:lambda x:x.push(str_base(x.pop(),x.pop())),
0xAE:ins_bot_fn,
0xAF:ins_top_fn,
0xB0:lambda x:x.push(list(itertools.compress(x.pop(),x.pop()))),
0xB1:lambda x:x.push((lambda y:sum([1 if gcd(i,y)==1 else 0 for i in range(1,y+1)]))(x.pop())),
0xB2:lambda x:x.push(sum([is_prime(i) for i in range(1,x.pop()+1)])),
0xB3:dupe_all_fn,
0xB4:lambda x:x.push(1 if gcd(x.pop(),x.pop())==1 else 0),
0xB9:lambda x:x.push((lambda y:[nCr(y,k) for k in range(y+1)])(x.pop())),
0xBA:median_fn,
0xBB:lambda x:set_reg(0,x.pop()),
0xBC:lambda x:set_reg(1,x.pop()),
0xBD:lambda x:x.push(get_reg(0)),
0xBE:lambda x:x.push(get_reg(1)),
0xBF:lambda x:set_reg(x.pop(),x.pop()),
0xC0:lambda x:x.push(get_reg(x.pop())),
0xC5:dupe_each_fn,
0xC6:dupe_each_n_fn,
0xC7:npop_list_fn,
0xCB:lambda x:x.push(math.pi),
0xCC:lambda x:x.push(math.e),
0xCE:while_fn,
0xD1:lambda x:x.push(pow(10,x.pop())),
0xD2:lambda x:x.push(math.log(x.pop(),10)),
0xD3:lambda x:x.push(pow(2,x.pop())),
0xD4:lambda x:x.push(math.log(x.pop(),2)),
0xD5:lambda x:x.push(math.log(2)),
0xDB:lambda x:x.push(nCr(x.pop(),x.pop())),
0xDC:lambda x:x.push(nPr(x.pop(),x.pop())),
0xDD:lambda x:x.push(b64decode(x.pop())),
0xDE:lambda x:x.push(b64encode(x.pop())),
0xDF:lambda x:x.push(("0123456789"+string.ascii_uppercase+string.ascii_lowercase+"+/")[:x.pop()]),
0xE2:lambda x:x.push(math.gamma(x.pop())),
0xE3:lambda x:x.push(reduce(operator.mul,x.pop(),1)),
0xE4:sum_fn,
0xE7:lambda x:x.push(x.pop()*2),
0xED:lambda x:x.push(phi),
0xEE:lambda x:x.push(""),
0xEF:lambda x:x.push(list(set(x.pop()).intersection(x.pop()))),
0xF1:lambda x:x.push(-x.pop()),
0xF2:lambda x:x.push(x.pop()>=x.pop()),
0xF3:lambda x:x.push(x.pop()<=x.pop()),
0xF7:lambda x:x.push(int(x.pop())),
0xF8:lambda x:x.push(math.radians(x.pop())),
0xFB:lambda x:x.push(x.pop()**.5),
0xFE:peek_print_fn,
}
|
Fix for admin error when trying to delete object with a GenericRelation
|
# -*- coding: utf-8 -*-
"""Family module for Meta Wiki."""
#
# (C) Pywikibot team, 2005-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import family
# The meta wikimedia family
class Family(family.WikimediaOrgFamily):
"""Family class for Meta Wiki."""
name = 'meta'
def __init__(self):
"""Constructor."""
super(Family, self).__init__()
self.interwiki_forward = 'wikipedia'
self.cross_allowed = ['meta', ]
self.category_redirect_templates = {
'meta': (
'Category redirect',
),
}
self.doc_subpages for Meta-Wiki
Change-Id: I97de4e28ad08803e34305101d7fbae66c17c99fc
# -*- coding: utf-8 -*-
"""Family module for Meta Wiki."""
#
# (C) Pywikibot team, 2005-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import family
# The meta wikimedia family
class Family(family.WikimediaOrgFamily):
"""Family class for Meta Wiki."""
name = 'meta'
def __init__(self):
"""Constructor."""
super(Family, self).__init__()
self.interwiki_forward = 'wikipedia'
self.cross_allowed = ['meta', ]
self.category_redirect_templates = {
'meta': (
'Category redirect',
),
}
# Subpages for documentation.
self.doc_subpages = {
'_default': (('/doc',), ['meta']),
}
|
import os
import guild.cli
import guild.op
import guild.project
DEFAULT_PROJECT_LOCATION = "."
def main(args):
model_name, op_name = _parse_opspec(args.opspec)
model = _resolve_model(model_name, args)
project_op = _resolve_op(op_name, model)
_apply_flags(args, project_op)
op = guild.op.from_project_op(project_op)
if args.yes or _confirm_op(op):
op.run()
def _parse_opspec(spec):
parts = spec.split(":", 1)
if len(parts) == 1:
return None, parts[0]
else:
return parts
def _resolve_model(name, args):
project = _project_for_args(args)
if name is None:
if project is None:
_project_required_error()
return _project_default_model(project)
elif project is not None:
return _project_model(name, project)
else:
package = _package_for_args(args)
if package is None:
_package_not_installed_error(package)
return _package_model(name, package)
def _project_for_args(args):
location = args.project_location or DEFAULT_PROJECT_LOCATION
try:
return guild.project.from_file_or_dir(location)
except guild.project.MissingSourceError:
if args.project_location:
_missing_source_error(args.project_location)
return None
def _missing_source_error(location):
guild.cli.error(
"'%s' does not contain any models\n"
"Try a different location or 'guild run --help' "
"for more information."
% location)
def _project_required_error():
guild.cli.error(
"cannot find a model for this operation\n"
"Try specifying a project, a package or 'guild run --help' "
"for more information.")
def _project_default_model(project):
default = project.default_model()
if default:
return default
else:
_no_models_for_project_error(project)
def _no_models_for_project_error(project):
guild.cli.error("%s does not define any models" % project.src)
def _project_model(name, project):
try:
return project[name]
except KeyError:
_no_such_model_error(name, project)
def _no_such_model_error(name, project):
guild.cli.error(
"model '%s' is not defined in %s\n"
"Try 'guild models%s' for a list of available models."
% (name, project.src, _project_opt(project.src)))
def _project_opt(project_src):
relpath = os.path.relpath(project_src)
if relpath == "MODEL" or relpath == "MODELS":
return ""
else:
return " -p %s" % relpath
def _package_for_args(_args):
# TODO: package resolve here!
return None
def _package_not_installed_error(name):
guild.cli.error(
"package '%s' is not installed\n"
"Try 'guild install %s' to install it first."
% name)
def _package_model(name, package):
raise AssertionError("TODO")
def _resolve_op(name, model):
op = model.get_op(name)
if op is None:
_no_such_operation_error(name, model)
return op
def _no_such_operation_error(name, model):
guild.cli.error(
"operation '%s' is not defined for model '%s'\n"
"Try 'guild operations %s%s' for a list of available operations."
% (name, model.name, model.name, _project_opt(model.project.src)))
def _apply_flags(args, op):
for arg in args.args:
name, val = _parse_flag(arg)
op.flags[name] = val
def _parse_flag(s):
parts = s.split("=", 1)
if len(parts) == 1:
return parts[0], None
else:
return parts
def _confirm_op(op):
flags = _op_flags(op)
if flags:
prompt = (
"You are about to run %s with the following flags:\n"
"%s\n"
"Continue?"
% (op.name, _format_op_flags(flags)))
else:
prompt = (
"You are about to run %s:\n"
"Continue?" % op.name)
return guild.cli.confirm(prompt, default=True)
def _op_flags(op):
flags = []
args = op.cmd_args
i = 1
while i < len(args):
cur_arg = args[i]
i = i + 1
next_arg = args[i] if i < len(args) else None
if cur_arg[0:2] == "--":
if next_arg and next_arg[0:2] != "--":
flags.append((cur_arg[2:], next_arg))
i = i + 1
else:
flags.append((cur_arg[2:], None))
return flags
def _format_op_flags(flags):
return "\n".join([" %s" % _format_flag(name, val)
for name, val in flags])
def _format_flag(name, val):
if val is None:
return "%s: (boolean switch)" % name
else:
return "%s: %s" % (name, val)
Exiting run cmd with op exit_status
import os
import guild.cli
import guild.op
import guild.project
DEFAULT_PROJECT_LOCATION = "."
def main(args):
model_name, op_name = _parse_opspec(args.opspec)
model = _resolve_model(model_name, args)
project_op = _resolve_op(op_name, model)
_apply_flags(args, project_op)
op = guild.op.from_project_op(project_op)
if args.yes or _confirm_op(op):
result = op.run()
_handle_run_result(result)
def _parse_opspec(spec):
parts = spec.split(":", 1)
if len(parts) == 1:
return None, parts[0]
else:
return parts
def _resolve_model(name, args):
project = _project_for_args(args)
if name is None:
if project is None:
_project_required_error()
return _project_default_model(project)
elif project is not None:
return _project_model(name, project)
else:
package = _package_for_args(args)
if package is None:
_package_not_installed_error(package)
return _package_model(name, package)
def _project_for_args(args):
location = args.project_location or DEFAULT_PROJECT_LOCATION
try:
return guild.project.from_file_or_dir(location)
except guild.project.MissingSourceError:
if args.project_location:
_missing_source_error(args.project_location)
return None
def _missing_source_error(location):
guild.cli.error(
"'%s' does not contain any models\n"
"Try a different location or 'guild run --help' "
"for more information."
% location)
def _project_required_error():
guild.cli.error(
"cannot find a model for this operation\n"
"Try specifying a project, a package or 'guild run --help' "
"for more information.")
def _project_default_model(project):
default = project.default_model()
if default:
return default
else:
_no_models_for_project_error(project)
def _no_models_for_project_error(project):
guild.cli.error("%s does not define any models" % project.src)
def _project_model(name, project):
try:
return project[name]
except KeyError:
_no_such_model_error(name, project)
def _no_such_model_error(name, project):
guild.cli.error(
"model '%s' is not defined in %s\n"
"Try 'guild models%s' for a list of available models."
% (name, project.src, _project_opt(project.src)))
def _project_opt(project_src):
relpath = os.path.relpath(project_src)
if relpath == "MODEL" or relpath == "MODELS":
return ""
else:
return " -p %s" % relpath
def _package_for_args(_args):
# TODO: package resolve here!
return None
def _package_not_installed_error(name):
guild.cli.error(
"package '%s' is not installed\n"
"Try 'guild install %s' to install it first."
% name)
def _package_model(name, package):
raise AssertionError("TODO")
def _resolve_op(name, model):
op = model.get_op(name)
if op is None:
_no_such_operation_error(name, model)
return op
def _no_such_operation_error(name, model):
guild.cli.error(
"operation '%s' is not defined for model '%s'\n"
"Try 'guild operations %s%s' for a list of available operations."
% (name, model.name, model.name, _project_opt(model.project.src)))
def _apply_flags(args, op):
for arg in args.args:
name, val = _parse_flag(arg)
op.flags[name] = val
def _parse_flag(s):
parts = s.split("=", 1)
if len(parts) == 1:
return parts[0], None
else:
return parts
def _confirm_op(op):
flags = _op_flags(op)
if flags:
prompt = (
"You are about to run %s with the following flags:\n"
"%s\n"
"Continue?"
% (op.name, _format_op_flags(flags)))
else:
prompt = (
"You are about to run %s:\n"
"Continue?" % op.name)
return guild.cli.confirm(prompt, default=True)
def _op_flags(op):
flags = []
args = op.cmd_args
i = 1
while i < len(args):
cur_arg = args[i]
i = i + 1
next_arg = args[i] if i < len(args) else None
if cur_arg[0:2] == "--":
if next_arg and next_arg[0:2] != "--":
flags.append((cur_arg[2:], next_arg))
i = i + 1
else:
flags.append((cur_arg[2:], None))
return flags
def _format_op_flags(flags):
return "\n".join([" %s" % _format_flag(name, val)
for name, val in flags])
def _format_flag(name, val):
if val is None:
return "%s: (boolean switch)" % name
else:
return "%s: %s" % (name, val)
def _handle_run_result(exit_status):
if exit_status != 0:
guild.cli.error(exit_status=exit_status)
|
from importlib import import_module
from io import BytesIO
import json
from math import ceil, sqrt
import os
from random import random, randrange
import numpy as np
from PIL import Image
from shapeworld import util
class Dataset(object):
def __init__(self, values, world_size, pixel_noise_stddev=None, vectors=None, vocabularies=None, language=None):
assert self.type and self.name
assert all(value_name != 'alternatives' or value_type == 'int' for value_name, value_type in values.items())
self.values = values
if isinstance(world_size, int):
self.world_size = world_size
else:
self.world_size = tuple(world_size)
self.pixel_noise_stddev = pixel_noise_stddev
self.vectors = {value_name: shape if isinstance(shape, int) else tuple(shape) for value_name, shape in vectors.items()}
self.vocabularies = dict()
if vocabularies is not None:
for name, vocabulary in vocabularies.items():
if isinstance(vocabulary, dict):
assert all(isinstance(word, str) and isinstance(index, int) for word, index in vocabulary.items())
assert sorted(vocabulary.values()) == list(range(len(vocabulary)))
self.vocabularies[name] = vocabulary
else:
vocabulary = {word: index for index, word in enumerate((word for word in vocabulary if word != '' and word != '[UNKNOWN]'), 1)}
vocabulary[''] = 0
vocabulary['[UNKNOWN]'] = len(vocabulary)
self.vocabularies[name] = vocabulary
self.language = language
@staticmethod
def create(dtype=None, name=None, variant=None, language=None, config=None, **kwargs):
assert variant is None or name is not None
assert language is None or name is not None
if isinstance(name, str):
try:
name = json.loads(name)
except Exception:
pass
if isinstance(variant, str):
try:
variant = json.loads(variant)
except Exception:
pass
if isinstance(config, str):
try:
config = json.loads(config)
except Exception:
pass
if isinstance(name, (tuple, list)):
try:
if not isinstance(variant, list):
variant = [variant for _ in name]
if not isinstance(config, list):
config = [config for _ in name]
datasets = list()
for n, v, c in zip(name, variant, config):
# for v in vs:
datasets.append(Dataset.create(dtype=dtype, name=n, variant=v, language=language, config=c))
dataset = DatasetMixer(datasets=datasets, **kwargs)
assert dtype == dataset.type
assert language is None or language == dataset.language
return dataset
except TypeError:
assert False
if isinstance(variant, (tuple, list)):
try:
if not isinstance(config, list):
config = [config for _ in variant]
datasets = list()
for v, c in zip(variant, config):
datasets.append(Dataset.create(dtype=dtype, name=name, variant=v, language=language, config=c))
dataset = DatasetMixer(datasets=datasets, **kwargs)
assert dtype == dataset.type
assert name == dataset.name
assert language is None or language == dataset.language
return dataset
except TypeError:
assert False
if isinstance(config, (tuple, list)):
assert len(kwargs) == 0
try:
datasets = list()
for c in config:
# if isinstance(c, dict):
# c = dict(c)
# datasets.append(c)
datasets.append(Dataset.create(dtype=dtype, name=name, variant=variant, language=language, config=c))
dataset = DatasetMixer(datasets=datasets, **kwargs)
assert dtype is None or dtype == dataset.type
assert language is None or language == dataset.language
return dataset
except TypeError:
assert False
if config is None:
config = dict()
elif isinstance(config, dict):
config = dict(config)
elif os.path.isdir(config):
assert dtype is not None and name is not None
full_name = name
if variant is not None:
full_name = '{}-{}'.format(full_name, variant)
if language is not None:
full_name = '{}-{}'.format(full_name, language)
directory = config
config = os.path.join(config, '{}-{}.json'.format(dtype, full_name))
with open(config, 'r') as filehandle:
config = json.load(fp=filehandle)
if 'directory' not in config:
config['directory'] = directory
return Dataset.create(dtype=dtype, name=name, variant=variant, language=language, config=config, **kwargs)
elif os.path.isfile(config):
with open(config, 'r') as filehandle:
config = json.load(fp=filehandle)
d = config.pop('type', None)
if dtype is None:
dtype = d
else:
assert dtype == d
n = config.pop('name', None)
if name is None:
name = n
else:
assert name == n
v = config.pop('variant', None)
if variant is None:
variant = v
else:
assert variant == v
l = config.pop('language', language)
if language is None:
language = l
else:
assert language == l
if 'config' in config:
assert not kwargs
kwargs = config
config = kwargs.pop('config')
return Dataset.create(dtype=dtype, name=name, variant=variant, language=language, config=config, **kwargs)
else:
raise Exception('Invalid config value: ' + str(config))
if config.pop('generated', False):
assert dtype is None or 'type' not in config or config['type'] == dtype
assert name is None or 'name' not in config or config['name'] == name
assert variant is None or 'variant' not in config or config['variant'] == variant
assert language is None or 'language' not in config or config['language'] == language
if 'dtype' in config:
assert dtype == config['type']
dtype = config['type']
else:
assert dtype is not None
config['type'] = dtype
if 'name' in config:
assert name == config['name']
name = config['name']
else:
assert name is not None
config['name'] = name
if 'variant' in config:
assert variant == config['variant']
variant = config.get('variant')
elif variant is not None:
config['variant'] = variant
if 'language' in config:
assert language == config['language']
language = config.get('language')
elif language is not None:
config['language'] = language
dataset = LoadedDataset(specification=config, **kwargs)
assert dtype == dataset.type
assert name == dataset.name
assert variant is None or variant == dataset.variant
assert language is None or language == dataset.language
return dataset
else:
assert variant is None
config.pop('directory', None)
for key, value in kwargs.items():
assert key not in config
config[key] = value
if dtype is None:
dtype = config.pop('type')
else:
dtype_config = config.pop('type', dtype)
assert dtype_config == dtype
if name is None:
name = config.pop('name')
else:
name_config = config.pop('name', name)
assert name_config == name
if 'language' in config:
assert language is None or config['language'] == language
elif language is not None:
config['language'] = language
module = import_module('shapeworld.datasets.{}.{}'.format(dtype, name))
class_name = util.class_name(name) + 'Dataset'
for key, module in module.__dict__.items():
if key == class_name:
break
dataset = module(**config)
assert dtype == dataset.type
assert name == dataset.name
return dataset
def __str__(self):
if self.language is None:
return '{} {}'.format(self.type, self.name)
else:
return '{} {} ({})'.format(self.type, self.name, self.language)
@property
def type(self):
raise NotImplementedError
@property
def name(self):
name = self.__class__.__name__
assert name[-7:] == 'Dataset'
return util.real_name(name[:-7])
def specification(self):
specification = dict(type=self.type, name=self.name, values=self.values)
if isinstance(self.world_size, int):
specification['world_size'] = self.world_size
else:
specification['world_size'] = list(self.world_size)
if self.vectors:
specification['vectors'] = self.vectors
if self.vocabularies:
specification['vocabularies'] = self.vocabularies
if self.language:
specification['language'] = self.language
return specification
def world_shape(self):
if isinstance(self.world_size, int):
return (self.world_size, self.world_size, 3)
else:
return (self.world_size[0], self.world_size[1], 3)
def vector_shape(self, value_name):
shape = self.vectors.get(value_name)
if isinstance(shape, int):
return (self.vectors.get(value_name),)
else:
return shape
def vocabulary_size(self, value_type):
if self.vocabularies is None or value_type not in self.vocabularies:
return -1
else:
return len(self.vocabularies[value_type])
def vocabulary(self, value_type):
if self.vocabularies is None or value_type not in self.vocabularies:
return None
else:
return [word for word, _ in sorted(self.vocabularies[value_type].items(), key=(lambda kv: kv[1]))]
def to_surface(self, value_type, word_ids):
id2word = self.vocabulary(value_type)
assert id2word is not None
if word_ids.ndim == 1:
return ' '.join(id2word[word_id] for word_id in word_ids)
elif word_ids.ndim == 2:
return [self.to_surface(value_type, word_ids) for word_ids in word_ids]
else:
assert False
def from_surface(self, value_type, words):
word2id = self.vocabularies.get(value_type)
assert word2id is not None
if isinstance(words, str):
return np.asarray(word2id[word] for word in words.split(' '))
elif isinstance(words, list):
if len(words) > 0 and ' ' in words[0]:
return [self.from_surface(value_type, words) for words in words]
else:
return np.asarray(word2id[word] for word in words)
else:
assert False
def apply_pixel_noise(self, world):
if self.pixel_noise_stddev is not None and self.pixel_noise_stddev > 0.0:
noise = np.random.normal(loc=0.0, scale=self.pixel_noise_stddev, size=world.shape)
mask = (noise < -2.0 * self.pixel_noise_stddev) + (noise > 2.0 * self.pixel_noise_stddev)
while np.any(a=mask):
noise -= mask * noise
noise += mask * np.random.normal(loc=0.0, scale=self.pixel_noise_stddev, size=world.shape)
mask = (noise < -2.0 * self.pixel_noise_stddev) + (noise > 2.0 * self.pixel_noise_stddev)
world += noise
np.clip(world, a_min=0.0, a_max=1.0, out=world)
return world
def zero_batch(self, n, include_model=False, alternatives=False):
batch = dict()
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if alternatives and alts:
if value_type == 'int':
batch[value_name] = [[] for _ in range(n)]
elif value_type == 'float':
batch[value_name] = [[] for _ in range(n)]
elif value_type == 'vector(int)' or value_type in self.vocabularies:
batch[value_name] = [[np.zeros(shape=self.vector_shape(value_name), dtype=np.int32)] for _ in range(n)]
elif value_type == 'vector(float)':
batch[value_name] = [[np.zeros(shape=self.vector_shape(value_name), dtype=np.float32)] for _ in range(n)]
elif value_type == 'world':
batch[value_name] = [[np.zeros(shape=self.world_shape(), dtype=np.float32)] for _ in range(n)]
elif value_type == 'model' and include_model:
batch[value_name] = [[] for _ in range(n)]
else:
if value_type == 'int' and (value_name != 'alternatives' or alternatives):
batch[value_name] = np.zeros(shape=(n,), dtype=np.int32)
elif value_type == 'float':
batch[value_name] = np.zeros(shape=(n,), dtype=np.float32)
elif value_type == 'vector(int)' or value_type in self.vocabularies:
batch[value_name] = np.zeros(shape=((n,) + self.vector_shape(value_name)), dtype=np.int32)
elif value_type == 'vector(float)':
batch[value_name] = np.zeros(shape=((n,) + self.vector_shape(value_name)), dtype=np.float32)
elif value_type == 'world':
batch[value_name] = np.zeros(shape=((n,) + self.world_shape()), dtype=np.float32)
elif value_type == 'model' and include_model:
batch[value_name] = [None] * n
return batch
def generate(self, n, mode=None, include_model=False, alternatives=False): # mode: None, 'train', 'validation', 'test'
raise NotImplementedError
def iterate(self, n, mode=None, include_model=False, alternatives=False, iterations=None):
i = 0
while iterations is None or i < iterations:
yield self.generate(n=n, mode=mode, include_model=include_model, alternatives=alternatives)
i += 1
def get_html(self, generated, image_format='bmp', image_dir=''):
return None
def serialize(self, path, generated, additional=None, filename=None, archive=None, html=False, numpy_formats=(), image_format='bmp', concat_worlds=False):
assert not additional or all(value_name not in self.values for value_name in additional)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with util.Archive(path=path, mode='w', archive=archive) as write_file:
for value_name, value in generated.items():
self.serialize_value(
path=path,
value=value,
value_name=value_name,
write_file=write_file,
numpy_format=(value_name in numpy_formats),
image_format=image_format,
concat_worlds=concat_worlds
)
if additional:
for value_name, (value, value_type) in additional.items():
self.serialize_value(
path=path,
value=value,
value_name=value_name,
write_file=write_file,
value_type=value_type,
numpy_format=(value_name in numpy_formats),
image_format=image_format,
concat_worlds=concat_worlds
)
if html:
html = self.get_html(generated=generated, image_format=image_format)
assert html is not None
write_file(filename='data.html', value=html)
def serialize_value(self, path, value, value_name, write_file, value_type=None, numpy_format=False, image_format='bmp', concat_worlds=False):
if value_type is None:
value_type = self.values[value_name]
value_type, alts = util.alternatives_type(value_type=value_type)
if value_name == 'alternatives':
assert value_type == 'int'
assert not numpy_format
value = '\n'.join(str(int(x)) for x in value) + '\n'
write_file('alternatives.txt', value)
elif value_type == 'int':
assert not numpy_format
if alts:
value = '\n'.join(';'.join(str(x)for x in xs) for xs in value) + '\n'
else:
value = '\n'.join(str(x) for x in value) + '\n'
write_file(value_name + '.txt', value)
elif value_type == 'float':
assert not numpy_format
if alts:
value = '\n'.join(';'.join(str(round(x, 3))for x in xs) for xs in value) + '\n'
else:
value = '\n'.join(str(round(x, 3)) for x in value) + '\n'
write_file(value_name + '.txt', value)
elif value_type == 'vector(int)':
if numpy_format:
np.save(path + '-' + value_name + '.npy', value)
elif alts:
value = '\n'.join(';'.join(','.join(str(x) for x in vector.flatten()) for vector in vectors) for vectors in value) + '\n'
write_file(value_name + '.txt', value)
else:
value = '\n'.join(','.join(str(x) for x in vector.flatten()) for vector in value) + '\n'
write_file(value_name + '.txt', value)
elif value_type == 'vector(float)':
if numpy_format:
np.save(path + '-' + value_name + '.npy', value)
elif alts:
value = '\n'.join(';'.join(','.join(str(round(x, 3)) for x in vector.flatten()) for vector in vectors) for vectors in value) + '\n'
write_file(value_name + '.txt', value)
else:
value = '\n'.join(','.join(str(round(x, 3)) for x in vector.flatten()) for vector in value) + '\n'
write_file(value_name + '.txt', value)
elif value_type == 'world':
from shapeworld.world import World
if numpy_format:
np.save(path + '-' + value_name + '.npy', value)
elif concat_worlds:
assert not alts
size = ceil(sqrt(len(value)))
worlds = []
for y in range(ceil(len(value) / size)):
if y < len(value) // size:
worlds.append(np.concatenate([value[y * size + x] for x in range(size)], axis=1))
else:
worlds.append(np.concatenate([value[y * size + x] for x in range(len(value) % size)] + [np.zeros_like(a=value[0]) for _ in range(-len(value) % size)], axis=1))
worlds = np.concatenate(worlds, axis=0)
image = World.get_image(world_array=worlds)
image_bytes = BytesIO()
image.save(image_bytes, format=image_format)
write_file('{}.{}'.format(value_name, image_format), image_bytes.getvalue(), binary=True)
image_bytes.close()
else:
for n in range(len(value)):
if alts:
for i, v in enumerate(value[n]):
image = World.get_image(world_array=v)
image_bytes = BytesIO()
image.save(image_bytes, format=image_format)
write_file('{}-{}-{}.{}'.format(value_name, n, i, image_format), image_bytes.getvalue(), binary=True)
image_bytes.close()
else:
image = World.get_image(world_array=value[n])
image_bytes = BytesIO()
image.save(image_bytes, format=image_format)
write_file('{}-{}.{}'.format(value_name, n, image_format), image_bytes.getvalue(), binary=True)
image_bytes.close()
elif value_type == 'model':
assert not numpy_format
value = json.dumps(obj=value, indent=2, sort_keys=True)
write_file(value_name + '.json', value)
else:
assert not numpy_format
id2word = self.vocabulary(value_type=value_type)
if alts:
value = '\n\n'.join('\n'.join(' '.join(id2word[word_id] for word_id in words if word_id) for words in words_alts) for words_alts in value) + '\n\n'
else:
value = '\n'.join(' '.join(id2word[word_id] for word_id in words if word_id) for words in value) + '\n'
write_file(value_name + '.txt', value)
def deserialize_value(self, path, value_name, read_file, value_type=None, numpy_format=False, image_format='bmp', num_concat_worlds=0):
if value_type is None:
value_type = self.values[value_name]
value_type, alts = util.alternatives_type(value_type=value_type)
if value_name == 'alternatives':
assert value_type == 'int'
assert not numpy_format
value = read_file('alternatives.txt')
return [int(x) for x in value.split('\n')[:-1]]
elif value_type == 'int':
assert not numpy_format
value = read_file(value_name + '.txt')
if alts:
value = [[int(x) for x in xs.split(';')] for xs in value.split('\n')[:-1]]
else:
value = [int(x) for x in value.split('\n')[:-1]]
return value
elif value_type == 'float':
assert not numpy_format
value = read_file(value_name + '.txt')
if alts:
value = [[float(x) for x in xs.split(';')] for xs in value.split('\n')[:-1]]
else:
value = [float(x) for x in value.split('\n')[:-1]]
return value
elif value_type == 'vector(int)':
if numpy_format:
path, extension = os.path.splitext(path)
while extension != '':
path, extension = os.path.splitext(path)
value = np.load(path + '-' + value_name + '.npy')
else:
value = read_file(value_name + '.txt')
shape = self.vector_shape(value_name=value_name)
if alts:
value = [[np.array(object=[int(x) for x in vector.split(',')], dtype=np.int32).reshape(shape) for vector in vectors.split(';')] for vectors in value.split('\n')[:-1]]
else:
value = [np.array(object=[int(x) for x in vector.split(',')], dtype=np.int32).reshape(shape) for vector in value.split('\n')[:-1]]
return value
elif value_type == 'vector(float)':
if numpy_format:
path, extension = os.path.splitext(path)
while extension != '':
path, extension = os.path.splitext(path)
value = np.load(path + '-' + value_name + '.npy')
else:
value = read_file(value_name + '.txt')
shape = self.vector_shape(value_name=value_name)
if alts:
value = [[np.array(object=[float(x) for x in vector.split(',')], dtype=np.float32).reshape(shape) for vector in vectors.split(';')] for vectors in value.split('\n')[:-1]]
else:
value = [np.array(object=[0.0 if 'e' in x else float(x) for x in vector.split(',')], dtype=np.float32).reshape(shape) for vector in value.split('\n')[:-1]]
return value
elif value_type == 'world':
from shapeworld.world import World
if numpy_format:
path, extension = os.path.splitext(path)
while extension != '':
path, extension = os.path.splitext(path)
value = np.load(path + '-' + value_name + '.npy')
elif num_concat_worlds:
assert not alts
size = ceil(sqrt(num_concat_worlds))
image_bytes = read_file('{}.{}'.format(value_name, image_format), binary=True)
assert image_bytes is not None
image_bytes = BytesIO(image_bytes)
image = Image.open(image_bytes)
worlds = World.from_image(image)
height = worlds.shape[0] // ceil(num_concat_worlds / size)
assert worlds.shape[0] % ceil(num_concat_worlds / size) == 0
width = worlds.shape[1] // size
assert worlds.shape[1] % size == 0
value = []
for y in range(ceil(num_concat_worlds / size)):
for x in range(size if y < num_concat_worlds // size else num_concat_worlds % size):
value.append(worlds[y * height: (y + 1) * height, x * width: (x + 1) * width, :])
else:
value = list()
n = 0
while True:
if alts:
i = 0
v = list()
while True:
image_bytes = read_file('{}-{}-{}.{}'.format(value_name, n, i, image_format), binary=True)
if image_bytes is None:
break
image_bytes = BytesIO(image_bytes)
image = Image.open(image_bytes)
v.append(World.from_image(image))
i += 1
value.append(v)
else:
image_bytes = read_file('{}-{}.{}'.format(value_name, n, image_format), binary=True)
if image_bytes is None:
break
image_bytes = BytesIO(image_bytes)
image = Image.open(image_bytes)
value.append(World.from_image(image))
n += 1
return value
elif value_type == 'model':
assert not numpy_format
value = read_file(value_name + '.json')
value = json.loads(s=value)
return value
else:
assert not numpy_format
word2id = self.vocabularies.get(value_type)
value = read_file(value_name + '.txt')
if alts:
value = [[[word2id[word] for word in words.split(' ')] for words in words_alts.split('\n')] for words_alts in value.split('\n\n')[:-1]]
else:
value = [[word2id[word] for word in words.split(' ')] for words in value.split('\n')[:-1]]
return value
class LoadedDataset(Dataset):
def __init__(self, specification, random_sampling=True, pixel_noise_stddev=None, exclude_values=()):
self._type = specification.pop('type')
self._name = specification.pop('name')
self.variant = specification.pop('variant', None)
self.directory = specification.pop('directory')
relative_directory = specification.get('relative_directory')
if relative_directory is not None:
self.directory = os.path.join(self.directory, relative_directory)
self.archive = specification.pop('archive', None)
self.include_model = specification.pop('include_model', False)
self.numpy_formats = tuple(specification.pop('numpy_formats', ()))
self.image_format = specification.pop('image_format', 'bmp')
self.num_concat_worlds = specification.pop('num_concat_worlds', 0)
self._specification = specification
self.random_sampling = random_sampling
values = specification.pop('values')
for value in exclude_values:
values.pop(value, None)
if pixel_noise_stddev is None:
pixel_noise_stddev = specification.pop('pixel_noise_stddev', None)
else:
assert 'pixel_noise_stddev' not in specification
super(LoadedDataset, self).__init__(values=values, world_size=specification.pop('world_size'), pixel_noise_stddev=pixel_noise_stddev, vectors=specification.pop('vectors', None), vocabularies=specification.pop('vocabularies', None), language=specification.pop('language', None))
self.shards = None
self.records_shards = None
for root, dirs, files in os.walk(self.directory):
if root == self.directory:
dirs = sorted(d for d in dirs if d[0] != '.' and not d.startswith('temp-'))
files = sorted(f for f in files if f[0] != '.' and not f.endswith('.npy'))
if len(dirs) == 0:
assert all(f[:5] == 'shard' or f[:4] == 'part' for f in files)
if any(f[-13:] != '.tfrecords.gz' for f in files):
self.shards = [os.path.join(root, f) for f in files if f[-13:] != '.tfrecords.gz']
if any(f[-13:] == '.tfrecords.gz' for f in files):
self.records_shards = [os.path.join(root, f) for f in files if f[-13:] == '.tfrecords.gz']
elif set(dirs) <= {'train', 'validation', 'test'}:
assert len(files) == 0
self.shards = dict()
self.records_shards = dict()
else:
assert all(d[:5] == 'shard' or d[:4] == 'part' for d in dirs)
self.shards = [os.path.join(root, d) for d in dirs]
if len(files) > 0:
assert all((f[:5] == 'shard' or f[:4] == 'part') and f[-13:] == '.tfrecords.gz' for f in files)
self.records_shards = [os.path.join(root, f) for f in files]
elif root[len(self.directory) + 1:] in ('train', 'validation', 'test'):
dirs = sorted(d for d in dirs if d[0] != '.' and not d.startswith('temp-'))
files = sorted(f for f in files if f[0] != '.' and not f.endswith('.npy'))
mode = root[len(self.directory) + 1:]
if len(dirs) > 0:
assert all(d[:5] == 'shard' or d[:4] == 'part' for d in dirs)
self.shards[mode] = [os.path.join(root, d) for d in dirs]
if files:
assert all((f[:5] == 'shard' or f[:4] == 'part') and f[-13:] == '.tfrecords.gz' for f in files)
self.records_shards[mode] = [os.path.join(root, f) for f in files]
else:
assert all(f[:5] == 'shard' or f[:4] == 'part' for f in files)
if any(f[-13:] != '.tfrecords.gz' for f in files):
self.shards[mode] = [os.path.join(root, f) for f in files if f[-13:] != '.tfrecords.gz']
if any(f[-13:] == '.tfrecords.gz' for f in files):
self.records_shards[mode] = [os.path.join(root, f) for f in files if f[-13:] == '.tfrecords.gz']
self.loaded = dict()
self.shard = dict()
self.num_instances = dict()
self.num_alternatives = dict()
def __str__(self):
name = '{} {}'.format(self.type, self.name)
if self.variant is not None:
name += '-{}'.format(self.variant)
if self.language is not None:
name += ' ({})'.format(self.language)
return name
@property
def name(self):
return self._name
@property
def type(self):
return self._type
def specification(self):
specification = super(LoadedDataset, self).specification()
specification.update(self._specification)
return specification
def __getattr__(self, name):
try:
return super(LoadedDataset, self).__getattr__(name=name)
except AttributeError:
if name in self._specification:
return self._specification[name]
else:
raise
def get_records_paths(self, mode):
if mode == 'none':
mode = None
assert (mode is None) != isinstance(self.records_shards, dict)
assert (mode is None and self.records_shards is not None) or mode in self.records_shards
if mode is None:
return self.records_shards
else:
assert mode in self.records_shards
return self.records_shards[mode]
def generate(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
assert (mode is None) != isinstance(self.shards, dict)
assert (mode is None and self.shards is not None) or mode in self.shards
assert not include_model or self.include_model
if mode is None:
mode_shards = self.shards
else:
mode_shards = self.shards[mode]
if mode not in self.loaded:
self.loaded[mode] = dict()
self.shard[mode] = -1
self.num_instances[mode] = 0
self.num_alternatives[mode] = 0
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if value_type != 'model' or self.include_model:
self.loaded[mode][value_name] = list()
while (self.num_instances[mode] < n) if (self.random_sampling or alternatives) else (self.num_alternatives[mode] < n):
if self.random_sampling:
next_shard = self.shard[mode]
while len(mode_shards) > 1 and next_shard == self.shard[mode]:
next_shard = randrange(len(mode_shards))
self.shard[mode] = next_shard
else:
self.shard[mode] = (self.shard[mode] + 1) % len(mode_shards)
self.num_instances[mode] = 0
with util.Archive(path=mode_shards[self.shard[mode]], mode='r', archive=self.archive) as read_file:
for value_name, value in self.loaded[mode].items():
value.extend(self.deserialize_value(
path=mode_shards[self.shard[mode]],
value_name=value_name,
read_file=read_file,
numpy_format=(value_name in self.numpy_formats),
image_format=self.image_format,
num_concat_worlds=self.num_concat_worlds
))
if self.num_instances[mode] == 0:
self.num_instances[mode] = self.num_alternatives[mode] = len(value)
else:
assert len(value) == self.num_instances[mode]
if value_name == 'alternatives':
self.num_alternatives[mode] = sum(value)
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
for i in range(n):
if self.random_sampling:
index = randrange(self.num_instances[mode])
else:
index = 0
if 'alternatives' not in self.values:
alt_index = -1
self.num_instances[mode] -= 1
self.num_alternatives[mode] -= 1
elif alternatives or self.loaded[mode]['alternatives'][index] == 1:
alt_index = -1
self.num_instances[mode] -= 1
self.num_alternatives[mode] -= self.loaded[mode]['alternatives'][index]
elif self.random_sampling:
alt_index = randrange(self.loaded[mode]['alternatives'][index])
self.num_instances[mode] -= 1
self.num_alternatives[mode] -= self.loaded[mode]['alternatives'][index]
else:
alt_index = 0
self.loaded[mode]['alternatives'][index] -= 1
self.num_alternatives[mode] -= 1
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if value_type == 'model' and not self.include_model:
continue
if self.random_sampling or alt_index == -1:
value = self.loaded[mode][value_name].pop(index)
else:
value = self.loaded[mode][value_name][index]
if value_type == 'model' and not include_model:
continue
if not alternatives and alts:
value = value.pop(alt_index)
if not alternatives and value_name == 'alternatives':
continue
if value_type in self.vocabularies:
batch[value_name][i][:len(value)] = value
else:
batch[value_name][i] = value
for value_name, value_type in self.values.items():
value_type, _ = util.alternatives_type(value_type=value_type)
if value_type == 'world':
batch[value_name] = self.apply_pixel_noise(world=batch[value_name])
return batch
def epoch(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
assert (mode is None) != isinstance(self.shards, dict)
assert (mode is None and self.shards is not None) or mode in self.shards
assert not include_model or self.include_model
if mode is None:
mode_shards = self.shards
else:
mode_shards = self.shards[mode]
available_shards = list(range(len(mode_shards)))
loaded = dict()
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if value_type != 'model' or self.include_model:
loaded[value_name] = list()
num_instances = 0
while available_shards:
if self.random_sampling:
shard = available_shards.pop(randrange(len(available_shards)))
else:
shard = available_shards.pop(0)
num_instances = 0
with util.Archive(path=mode_shards[shard], mode='r', archive=self.archive) as read_file:
for value_name, value in loaded.items():
value.extend(self.deserialize_value(
path=mode_shards[shard],
value_name=value_name,
read_file=read_file,
numpy_format=(value_name in self.numpy_formats),
image_format=self.image_format,
num_concat_worlds=self.num_concat_worlds
))
if num_instances == 0:
num_instances = num_alternatives = len(value)
else:
assert len(value) == num_instances
if value_name == 'alternatives':
num_alternatives = sum(value)
while (num_instances >= n) if alternatives else (num_alternatives >= n):
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
for i in range(n):
if self.random_sampling:
index = randrange(num_instances)
else:
index = 0
if 'alternatives' not in self.values:
alt_index = -1
num_instances -= 1
num_alternatives -= 1
elif alternatives or loaded['alternatives'][index] == 1:
alt_index = -1
num_instances -= 1
num_alternatives -= loaded['alternatives'][index]
else:
if self.random_sampling:
alt_index = randrange(loaded['alternatives'][index])
else:
alt_index = 0
loaded['alternatives'][index] -= 1
num_alternatives -= 1
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if value_type == 'model' and not self.include_model:
continue
if alt_index == -1:
value = loaded[value_name].pop(index)
if not alternatives and alts:
value = value.pop(0)
elif alts:
value = loaded[value_name][index].pop(alt_index)
else:
value = loaded[value_name][index]
if value_type == 'model' and not include_model:
continue
if not alternatives and value_name == 'alternatives':
continue
if value_type in self.vocabularies:
batch[value_name][i][:len(value)] = value
else:
batch[value_name][i] = value
for value_name, value_type in self.values.items():
value_type, _ = util.alternatives_type(value_type=value_type)
if value_type == 'world':
batch[value_name] = self.apply_pixel_noise(world=batch[value_name])
yield batch
if not available_shards:
if alternatives:
if 0 < num_instances < n:
n = num_instances
else:
if 0 < num_alternatives < n:
n = num_alternatives
assert num_instances == 0 and num_alternatives == 0
def get_html(self, generated, image_format='bmp', image_dir=''):
module = import_module('shapeworld.datasets.{}.{}'.format(self.type, self.name))
class_name = util.class_name(self.name) + 'Dataset'
for key, dclass in module.__dict__.items():
if key == class_name:
break
return dclass.get_html(self, generated=generated, image_format=image_format, image_dir=image_dir)
class DatasetMixer(Dataset):
# accepts Dataset, config, str
def __init__(self, datasets, consistent_batches=False, distribution=None, train_distribution=None, validation_distribution=None, test_distribution=None):
assert len(datasets) >= 1
self.datasets = list()
for dataset in datasets:
if not isinstance(dataset, Dataset):
dataset = Dataset.create(config=dataset)
self.datasets.append(dataset)
assert all(dataset.type == self.datasets[0].type for dataset in self.datasets)
assert all(dataset.language == self.datasets[0].language for dataset in self.datasets)
assert all(dataset.values == self.datasets[0].values for dataset in self.datasets)
assert all(dataset.world_size == self.datasets[0].world_size for dataset in self.datasets)
assert all(sorted(dataset.vectors) == sorted(self.datasets[0].vectors) for dataset in self.datasets)
assert all(sorted(dataset.vocabularies) == sorted(self.datasets[0].vocabularies) for dataset in self.datasets)
# combine vectors and words information
values = self.datasets[0].values
world_size = self.datasets[0].world_size
vectors = {value_name: max(dataset.vectors[value_name] for dataset in self.datasets) for value_name in self.datasets[0].vectors}
vocabularies = dict()
for name in self.datasets[0].vocabularies:
vocabularies[name] = sorted(set(word for dataset in self.datasets for word in dataset.vocabularies[name]))
language = self.datasets[0].language
super(DatasetMixer, self).__init__(values=values, world_size=world_size, vectors=vectors, vocabularies=vocabularies, language=language)
self.translations = list()
for dataset in self.datasets:
dataset.vectors = self.vectors
dataset.vocabularies = self.vocabularies
if isinstance(dataset, LoadedDataset):
translation = dict()
for name, vocabulary in dataset.vocabularies.items():
translation[name] = np.vectorize({index: self.vocabularies[name][word] for word, index in vocabulary.items()}.__getitem__)
self.translations.append(translation)
else:
self.translations.append(None)
self.consistent_batches = consistent_batches
assert not distribution or len(distribution) == len(self.datasets)
distribution = util.value_or_default(distribution, [1] * len(self.datasets))
self.distribution = util.cumulative_distribution(distribution)
assert bool(train_distribution) == bool(validation_distribution) == bool(test_distribution)
assert not train_distribution or len(train_distribution) == len(validation_distribution) == len(test_distribution) == len(self.distribution)
self.train_distribution = util.cumulative_distribution(util.value_or_default(train_distribution, distribution))
self.validation_distribution = util.cumulative_distribution(util.value_or_default(validation_distribution, distribution))
self.test_distribution = util.cumulative_distribution(util.value_or_default(test_distribution, distribution))
@property
def type(self):
return self.datasets[0].type
@property
def name(self):
return '+'.join(dataset.name for dataset in self.datasets)
def generate(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
if mode is None:
distribution = self.distribution
if mode == 'train':
distribution = self.train_distribution
elif mode == 'validation':
distribution = self.validation_distribution
elif mode == 'test':
distribution = self.test_distribution
if self.consistent_batches:
dataset = util.sample(distribution, self.datasets)
return dataset.generate(n=n, mode=mode, include_model=include_model, alternatives=alternatives)
else:
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
for i in range(n):
sample = util.sample(distribution)
generated = self.datasets[sample].generate(n=1, mode=mode, include_model=include_model, alternatives=alternatives)
for value_name, value in generated.items():
value_type = self.values[value_name]
if value_type in self.vocabularies:
batch[value_name][i][:value.shape[1]] = value[0]
else:
batch[value_name][i] = value[0]
return batch
class ClassificationDataset(Dataset):
def __init__(self, world_generator, num_classes, multi_class=False, count_class=False, pixel_noise_stddev=None):
values = dict(world='world', world_model='model', classification='vector(float)')
vectors = dict(classification=num_classes)
super(ClassificationDataset, self).__init__(values=values, world_size=world_generator.world_size, vectors=vectors, pixel_noise_stddev=pixel_noise_stddev)
assert multi_class or not count_class
self.world_generator = world_generator
self.num_classes = num_classes
self.multi_class = multi_class
self.count_class = count_class
@property
def type(self):
return 'classification'
def specification(self):
specification = super(ClassificationDataset, self).specification()
specification['num_classes'] = self.num_classes
specification['multi_class'] = self.multi_class
specification['count_class'] = self.count_class
return specification
def get_classes(self, world): # iterable of classes
raise NotImplementedError
def generate(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
for i in range(n):
while not self.world_generator.initialize(mode=mode):
pass
while True:
world = self.world_generator()
if world is not None:
break
batch['world'][i] = self.apply_pixel_noise(world=world.get_array(world_array=batch['world'][i]))
if include_model:
batch['world_model'][i] = world.model()
c = None
for c in self.get_classes(world):
if self.count_class:
batch['classification'][i][c] += 1.0
else:
batch['classification'][i][c] = 1.0
if not self.multi_class:
assert c is not None
return batch
def get_html(self, generated, image_format='bmp', image_dir=''):
classifications = generated['classification']
data_html = list()
for n, classification in enumerate(classifications):
data_html.append('<div class="instance"><div class="world"><img src="{image_dir}world-{world}.{format}" alt="world-{world}.{format}"></div><div class="num"><p><b>({num})</b></p></div><div class="classification"><p>'.format(image_dir=image_dir, world=n, format=image_format, num=(n + 1)))
comma = False
for c, count in enumerate(classification):
if count == 0.0:
continue
if comma:
data_html.append(', ')
else:
comma = True
if self.count_class:
data_html.append('{count:.0f} × class {c}'.format(c=c, count=count))
else:
data_html.append('class {c}'.format(c=c))
data_html.append('</p></div></div>')
html = '<!DOCTYPE html><html><head><title>{dtype} {name}</title><style>.data{{width: 100%; height: 100%;}} .instance{{width: 100%; display: flex; margin-top: 1px; margin-bottom: 1px; background-color: #DDEEFF; vertical-align: middle; align-items: center;}} .world{{height: {world_height}px; display: inline-block; flex-grow: 0; vertical-align: middle;}} .num{{width: 50px; display: inline-block; flex-grow: 0; text-align: center; vertical-align: middle; margin-left: 10px;}} .classification{{display: inline-block; flex-grow: 1; vertical-align: middle; margin-left: 10px;}}</style></head><body><div class="data">{data}</div></body></html>'.format(
dtype=self.type,
name=self.name,
world_height=self.world_shape()[0],
data=''.join(data_html)
)
return html
class CaptionAgreementDataset(Dataset):
GENERATOR_INIT_FREQUENCY = 25
CAPTIONER_INIT_FREQUENCY = 100
CAPTIONER_INIT_FREQUENCY2 = 5
def __init__(self, world_generator, world_captioner, caption_size, vocabulary, pixel_noise_stddev=None, caption_realizer='dmrs', language=None, worlds_per_instance=1, captions_per_instance=1, correct_ratio=0.5, train_correct_ratio=None, validation_correct_ratio=None, test_correct_ratio=None):
if worlds_per_instance > 1 or captions_per_instance > 1:
values = dict(agreement='alternatives(float)')
else:
values = dict(agreement='float')
if worlds_per_instance > 1:
values.update(world='alternatives(world)', world_model='alternatives(model)', alternatives='int')
else:
values.update(world='world', world_model='model')
if captions_per_instance > 1:
values.update(caption='alternatives(language)', caption_length='alternatives(int)', caption_pn='alternatives(pn)', caption_pn_length='alternatives(int)', caption_rpn='alternatives(pn)', caption_rpn_length='alternatives(int)', caption_model='alternatives(model)', alternatives='int')
else:
values.update(caption='language', caption_length='int', caption_pn='pn', caption_pn_length='int', caption_rpn='pn', caption_rpn_length='int', caption_model='model')
assert isinstance(caption_size, int) and caption_size > 0
vocabulary = list(vocabulary)
assert len(vocabulary) > 0 and vocabulary == sorted(vocabulary), sorted(vocabulary) # [(w1, w2) for w1, w2 in zip(vocabulary, sorted(vocabulary)) if w1 != w2]
self.world_generator = world_generator
self.world_captioner = world_captioner
from shapeworld.realizers import CaptionRealizer
if isinstance(caption_realizer, CaptionRealizer):
self.caption_realizer = caption_realizer
else:
assert caption_realizer is None or isinstance(caption_realizer, str)
self.caption_realizer = CaptionRealizer.from_name(
name=caption_realizer,
language=util.value_or_default(language, 'english')
)
self.world_captioner.set_realizer(self.caption_realizer)
vectors = dict(
caption=caption_size,
caption_pn=self.world_captioner.pn_length(),
caption_rpn=self.world_captioner.pn_length()
)
vocabularies = dict(
language=vocabulary,
pn=sorted(self.world_captioner.pn_symbols())
)
super(CaptionAgreementDataset, self).__init__(
values=values,
world_size=world_generator.world_size,
pixel_noise_stddev=pixel_noise_stddev,
vectors=vectors,
vocabularies=vocabularies,
language=language
)
assert worlds_per_instance == 1 or captions_per_instance == 1
self.worlds_per_instance = worlds_per_instance
self.captions_per_instance = captions_per_instance
self.correct_ratio = correct_ratio
self.train_correct_ratio = util.value_or_default(train_correct_ratio, self.correct_ratio)
self.validation_correct_ratio = util.value_or_default(validation_correct_ratio, self.correct_ratio)
self.test_correct_ratio = util.value_or_default(test_correct_ratio, self.correct_ratio)
self.pn_arity = self.world_captioner.pn_arity()
self.pn_arity[''] = 1
self.pn_arity['[UNKNOWN]'] = 1
@property
def type(self):
return 'agreement'
def specification(self):
specification = super(CaptionAgreementDataset, self).specification()
specification['worlds_per_instance'] = self.worlds_per_instance
specification['captions_per_instance'] = self.captions_per_instance
specification['pn_arity'] = self.pn_arity
return specification
def generate(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
if mode == 'train':
correct_ratio = self.train_correct_ratio
elif mode == 'validation':
correct_ratio = self.validation_correct_ratio
elif mode == 'test':
correct_ratio = self.test_correct_ratio
else:
correct_ratio = self.correct_ratio
pn2id = self.vocabularies['pn']
unknown = pn2id['[UNKNOWN]']
pn_size = self.vector_shape('caption_pn')[0]
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
captions = list()
for i in range(n):
correct = random() < correct_ratio
# print(i, correct, flush=True)
# print(i, correct, end=', ', flush=True)
resample = 0
while True:
if resample % self.__class__.GENERATOR_INIT_FREQUENCY == 0:
if resample // self.__class__.GENERATOR_INIT_FREQUENCY >= 1:
# print(i, 'world')
pass
while not self.world_generator.initialize(mode=mode):
pass
# print(self.world_generator.model())
if resample % self.__class__.CAPTIONER_INIT_FREQUENCY == 0:
if resample // self.__class__.CAPTIONER_INIT_FREQUENCY >= 1:
# print(i, 'caption')
# print(i, resample, 'caption', correct, self.world_captioner.model())
# assert False
pass
if self.worlds_per_instance > 1:
correct = True
while not self.world_captioner.initialize(mode=mode, correct=False):
pass
else:
while not self.world_captioner.initialize(mode=mode, correct=correct):
pass
assert self.world_captioner.incorrect_possible()
# print(self.world_captioner.model(), flush=True)
resample += 1
world = self.world_generator()
if world is None:
continue
if self.worlds_per_instance > 1:
caption = self.world_captioner(world=world)
if caption is None:
continue
caption = self.world_captioner.get_correct_caption()
else:
caption = self.world_captioner(world=world)
# print('c', caption)
if caption is not None:
break
if alternatives and (self.worlds_per_instance > 1 or self.captions_per_instance > 1):
batch['agreement'][i].append(float(correct))
else:
batch['agreement'][i] = float(correct)
if alternatives and self.captions_per_instance > 1:
batch['alternatives'][i] = self.captions_per_instance
batch['caption'][i].extend(batch['caption'][i][0].copy() for _ in range(self.captions_per_instance - 1))
batch['caption_pn'][i].extend(batch['caption_pn'][i][0].copy() for _ in range(self.captions_per_instance - 1))
batch['caption_rpn'][i].extend(batch['caption_rpn'][i][0].copy() for _ in range(self.captions_per_instance - 1))
captions.append(caption)
pn = caption.polish_notation()
assert len(pn) <= pn_size, (len(pn), pn_size, pn)
for k, pn_symbol in enumerate(pn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_pn'][i][0][k] = pn2id.get(pn_symbol, unknown)
batch['caption_pn_length'][i].append(len(pn))
rpn = caption.polish_notation(reverse=True)
assert len(rpn) <= pn_size, (len(rpn), pn_size, rpn)
for k, pn_symbol in enumerate(rpn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_rpn'][i][0][k] = pn2id.get(pn_symbol, unknown)
batch['caption_rpn_length'][i].append(len(rpn))
if include_model:
batch['caption_model'][i].append(caption.model())
for j in range(1, self.captions_per_instance):
correct = random() < correct_ratio
resample = 0
while True:
if resample % self.__class__.CAPTIONER_INIT_FREQUENCY2 == 0:
if resample // self.__class__.CAPTIONER_INIT_FREQUENCY2 >= 1:
# print(i, j, '2nd caption')
# print(i, 'caption', correct, self.world_captioner.model())
pass
while not self.world_captioner.initialize(mode=mode, correct=correct):
pass
resample += 1
caption = self.world_captioner(world=world)
if caption is not None:
break
captions.append(caption)
pn = caption.polish_notation()
assert len(pn) <= pn_size, (len(pn), pn_size, pn)
for k, pn_symbol in enumerate(pn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_pn'][i][j][k] = pn2id.get(pn_symbol, unknown)
batch['caption_pn_length'][i].append(len(pn))
rpn = caption.polish_notation(reverse=True)
assert len(rpn) <= pn_size, (len(rpn), pn_size, rpn)
for k, pn_symbol in enumerate(rpn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_rpn'][i][j][k] = pn2id.get(pn_symbol, unknown)
batch['caption_rpn_length'][i].append(len(rpn))
if include_model:
batch['caption_model'][i].append(caption.model())
batch['agreement'][i].append(float(correct))
else:
captions.append(caption)
pn = caption.polish_notation()
assert len(pn) <= pn_size, (len(pn), pn_size, pn)
for k, pn_symbol in enumerate(pn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_pn'][i][k] = pn2id.get(pn_symbol, unknown)
batch['caption_pn_length'][i] = len(pn)
rpn = caption.polish_notation(reverse=True)
assert len(rpn) <= pn_size, (len(rpn), pn_size, rpn)
for k, pn_symbol in enumerate(rpn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_rpn'][i][k] = pn2id.get(pn_symbol, unknown)
batch['caption_rpn_length'][i] = len(rpn)
if include_model:
batch['caption_model'][i] = caption.model()
if alternatives and self.worlds_per_instance > 1:
from shapeworld.captions import PragmaticalPredication
batch['alternatives'][i] = self.worlds_per_instance
batch['world'][i].extend(batch['world'][i][0].copy() for _ in range(self.worlds_per_instance - 1))
batch['world'][i][0] = self.apply_pixel_noise(world=world.get_array(world_array=batch['world'][i][0]))
if include_model:
batch['world_model'][i].append(world.model())
for j in range(1, self.worlds_per_instance):
correct = random() < correct_ratio
while True:
world = self.world_generator()
if world is None:
continue
caption = self.world_captioner.get_correct_caption()
predication = PragmaticalPredication(agreeing=world.entities)
caption.apply_to_predication(predication=predication)
agreement = caption.agreement(predication=predication, world=world)
if not correct:
if agreement >= 0.0:
continue
predication = PragmaticalPredication(agreeing=world.entities)
if not self.world_captioner.incorrect(caption=caption, predication=predication, world=world):
continue
agreement = caption.agreement(predication=predication, world=world)
if agreement > 0.0:
break
batch['world'][i][j] = self.apply_pixel_noise(world=world.get_array(world_array=batch['world'][i][j]))
if include_model:
batch['world_model'][i].append(world.model())
batch['agreement'][i].append(float(correct))
else:
batch['world'][i] = self.apply_pixel_noise(world=world.get_array(world_array=batch['world'][i]))
if include_model:
batch['world_model'][i] = world.model()
word2id = self.vocabularies['language']
unknown = word2id['[UNKNOWN]']
caption_size = self.vector_shape('caption')[0]
unused_words = set(word2id) # for assert
unused_words.remove('')
unused_words.remove('[UNKNOWN]')
missing_words = set() # for assert
max_caption_size = caption_size # for assert
assert len(captions) == n * self.captions_per_instance if alternatives else len(captions) == n
captions = self.caption_realizer.realize(captions=captions)
for i, caption in enumerate(captions):
caption = util.sentence2tokens(sentence=caption)
if len(caption) > caption_size:
if len(caption) > max_caption_size:
max_caption_size = len(caption)
continue
if alternatives and self.captions_per_instance > 1:
j = i % self.captions_per_instance
i = i // self.captions_per_instance
batch['caption_length'][i].append(len(caption))
caption_array = batch['caption'][i][j]
else:
batch['caption_length'][i] = len(caption)
caption_array = batch['caption'][i]
for k, word in enumerate(caption):
if word in word2id:
unused_words.discard(word)
else:
missing_words.add(word)
caption_array[k] = word2id.get(word, unknown)
if util.debug() and len(unused_words) > 0:
print('Words unused in vocabulary: \'{}\''.format('\', \''.join(sorted(unused_words))))
if util.debug() and max_caption_size < caption_size:
print('Caption size smaller than max size: {} < {}'.format(max_caption_size, caption_size))
if len(missing_words) > 0:
print('Words missing in vocabulary: \'{}\''.format('\', \''.join(sorted(missing_words))))
if max_caption_size > caption_size:
print('Caption size exceeds max size: {} > {}'.format(max_caption_size, caption_size))
assert not missing_words, missing_words
assert max_caption_size <= caption_size, (max_caption_size, caption_size)
return batch
def get_html(self, generated, image_format='bmp', image_dir=''):
id2word = self.vocabulary(value_type='language')
worlds = generated['world']
captions = generated['caption']
caption_lengths = generated['caption_length']
agreements = generated['agreement']
data_html = list()
for n, (world, caption, caption_length, agreement) in enumerate(zip(worlds, captions, caption_lengths, agreements)):
if self.worlds_per_instance > 1 or self.captions_per_instance > 1:
data_html.append('<div class="instance">')
else:
if agreement == 1.0:
agreement = 'correct'
elif agreement == 0.0:
agreement = 'incorrect'
else:
agreement = 'ambiguous'
data_html.append('<div class="{agreement}">'.format(agreement=agreement))
if self.worlds_per_instance > 1:
for i, agreement in enumerate(agreement):
if agreement == 1.0:
agreement = 'correct'
elif agreement == 0.0:
agreement = 'incorrect'
else:
agreement = 'ambiguous'
data_html.append('<div class="{agreement}" style="padding: 5px;"><div class="world"><img src="{image_dir}world-{world}-{alt}.{format}" alt="world-{world}-{alt}.{format}"></div></div>'.format(
agreement=agreement,
image_dir=image_dir,
world=n,
format=image_format,
alt=i
))
else:
data_html.append('<div class="world"><img src="{image_dir}world-{world}.{format}" alt="world-{world}.{format}"></div>'.format(image_dir=image_dir, world=n, format=image_format))
data_html.append('<div class="num"><b>({num})</b></div>'.format(num=(n + 1)))
if self.captions_per_instance > 1:
data_html.append('<div class="caption">')
for caption, caption_length, agreement in zip(caption, caption_length, agreement):
if agreement == 1.0:
agreement = 'correct'
elif agreement == 0.0:
agreement = 'incorrect'
else:
agreement = 'ambiguous'
data_html.append('<div class="{agreement}">{caption}</div>'.format(
agreement=agreement,
caption=util.tokens2sentence(id2word[word] for word in caption[:caption_length])
))
data_html.append('</div>')
else:
data_html.append('<div class="caption">{caption}</div>'.format(
caption=util.tokens2sentence(id2word[word] for word in caption[:caption_length])
))
data_html.append('</div>')
html = '<!DOCTYPE html><html><head><title>{dtype} {name}</title><style>.data{{width: 100%; height: 100%;}} .instance{{width: 100%; display: flex; margin-top: 1px; margin-bottom: 1px; background-color: #DDEEFF; vertical-align: middle; align-items: center;}} .world{{height: {world_height}px; display: inline-block; flex-grow: 0; vertical-align: middle;}} .num{{width: 50px; display: inline-block; flex-grow: 0; text-align: center; vertical-align: middle; margin-left: 10px;}} .caption{{display: inline-block; flex-grow: 1; vertical-align: middle; margin-left: 10px;}} .correct{{margin-top: 1px; margin-bottom: 1px; background-color: #BBFFBB;}} .incorrect{{margin-top: 1px; margin-bottom: 1px; background-color: #FFBBBB;}} .ambiguous{{margin-top: 1px; margin-bottom: 1px; background-color: #FFFFBB;}}</style></head><body><div class="data">{data}</div></body></html>'.format(
dtype=self.type,
name=self.name,
world_height=self.world_shape()[0],
data=''.join(data_html)
)
return html
alt infinite loop fix
from importlib import import_module
from io import BytesIO
import json
from math import ceil, sqrt
import os
from random import random, randrange
import numpy as np
from PIL import Image
from shapeworld import util
class Dataset(object):
def __init__(self, values, world_size, pixel_noise_stddev=None, vectors=None, vocabularies=None, language=None):
assert self.type and self.name
assert all(value_name != 'alternatives' or value_type == 'int' for value_name, value_type in values.items())
self.values = values
if isinstance(world_size, int):
self.world_size = world_size
else:
self.world_size = tuple(world_size)
self.pixel_noise_stddev = pixel_noise_stddev
self.vectors = {value_name: shape if isinstance(shape, int) else tuple(shape) for value_name, shape in vectors.items()}
self.vocabularies = dict()
if vocabularies is not None:
for name, vocabulary in vocabularies.items():
if isinstance(vocabulary, dict):
assert all(isinstance(word, str) and isinstance(index, int) for word, index in vocabulary.items())
assert sorted(vocabulary.values()) == list(range(len(vocabulary)))
self.vocabularies[name] = vocabulary
else:
vocabulary = {word: index for index, word in enumerate((word for word in vocabulary if word != '' and word != '[UNKNOWN]'), 1)}
vocabulary[''] = 0
vocabulary['[UNKNOWN]'] = len(vocabulary)
self.vocabularies[name] = vocabulary
self.language = language
@staticmethod
def create(dtype=None, name=None, variant=None, language=None, config=None, **kwargs):
assert variant is None or name is not None
assert language is None or name is not None
if isinstance(name, str):
try:
name = json.loads(name)
except Exception:
pass
if isinstance(variant, str):
try:
variant = json.loads(variant)
except Exception:
pass
if isinstance(config, str):
try:
config = json.loads(config)
except Exception:
pass
if isinstance(name, (tuple, list)):
try:
if not isinstance(variant, list):
variant = [variant for _ in name]
if not isinstance(config, list):
config = [config for _ in name]
datasets = list()
for n, v, c in zip(name, variant, config):
# for v in vs:
datasets.append(Dataset.create(dtype=dtype, name=n, variant=v, language=language, config=c))
dataset = DatasetMixer(datasets=datasets, **kwargs)
assert dtype == dataset.type
assert language is None or language == dataset.language
return dataset
except TypeError:
assert False
if isinstance(variant, (tuple, list)):
try:
if not isinstance(config, list):
config = [config for _ in variant]
datasets = list()
for v, c in zip(variant, config):
datasets.append(Dataset.create(dtype=dtype, name=name, variant=v, language=language, config=c))
dataset = DatasetMixer(datasets=datasets, **kwargs)
assert dtype == dataset.type
assert name == dataset.name
assert language is None or language == dataset.language
return dataset
except TypeError:
assert False
if isinstance(config, (tuple, list)):
assert len(kwargs) == 0
try:
datasets = list()
for c in config:
# if isinstance(c, dict):
# c = dict(c)
# datasets.append(c)
datasets.append(Dataset.create(dtype=dtype, name=name, variant=variant, language=language, config=c))
dataset = DatasetMixer(datasets=datasets, **kwargs)
assert dtype is None or dtype == dataset.type
assert language is None or language == dataset.language
return dataset
except TypeError:
assert False
if config is None:
config = dict()
elif isinstance(config, dict):
config = dict(config)
elif os.path.isdir(config):
assert dtype is not None and name is not None
full_name = name
if variant is not None:
full_name = '{}-{}'.format(full_name, variant)
if language is not None:
full_name = '{}-{}'.format(full_name, language)
directory = config
config = os.path.join(config, '{}-{}.json'.format(dtype, full_name))
with open(config, 'r') as filehandle:
config = json.load(fp=filehandle)
if 'directory' not in config:
config['directory'] = directory
return Dataset.create(dtype=dtype, name=name, variant=variant, language=language, config=config, **kwargs)
elif os.path.isfile(config):
with open(config, 'r') as filehandle:
config = json.load(fp=filehandle)
d = config.pop('type', None)
if dtype is None:
dtype = d
else:
assert dtype == d
n = config.pop('name', None)
if name is None:
name = n
else:
assert name == n
v = config.pop('variant', None)
if variant is None:
variant = v
else:
assert variant == v
l = config.pop('language', language)
if language is None:
language = l
else:
assert language == l
if 'config' in config:
assert not kwargs
kwargs = config
config = kwargs.pop('config')
return Dataset.create(dtype=dtype, name=name, variant=variant, language=language, config=config, **kwargs)
else:
raise Exception('Invalid config value: ' + str(config))
if config.pop('generated', False):
assert dtype is None or 'type' not in config or config['type'] == dtype
assert name is None or 'name' not in config or config['name'] == name
assert variant is None or 'variant' not in config or config['variant'] == variant
assert language is None or 'language' not in config or config['language'] == language
if 'dtype' in config:
assert dtype == config['type']
dtype = config['type']
else:
assert dtype is not None
config['type'] = dtype
if 'name' in config:
assert name == config['name']
name = config['name']
else:
assert name is not None
config['name'] = name
if 'variant' in config:
assert variant == config['variant']
variant = config.get('variant')
elif variant is not None:
config['variant'] = variant
if 'language' in config:
assert language == config['language']
language = config.get('language')
elif language is not None:
config['language'] = language
dataset = LoadedDataset(specification=config, **kwargs)
assert dtype == dataset.type
assert name == dataset.name
assert variant is None or variant == dataset.variant
assert language is None or language == dataset.language
return dataset
else:
assert variant is None
config.pop('directory', None)
for key, value in kwargs.items():
assert key not in config
config[key] = value
if dtype is None:
dtype = config.pop('type')
else:
dtype_config = config.pop('type', dtype)
assert dtype_config == dtype
if name is None:
name = config.pop('name')
else:
name_config = config.pop('name', name)
assert name_config == name
if 'language' in config:
assert language is None or config['language'] == language
elif language is not None:
config['language'] = language
module = import_module('shapeworld.datasets.{}.{}'.format(dtype, name))
class_name = util.class_name(name) + 'Dataset'
for key, module in module.__dict__.items():
if key == class_name:
break
dataset = module(**config)
assert dtype == dataset.type
assert name == dataset.name
return dataset
def __str__(self):
if self.language is None:
return '{} {}'.format(self.type, self.name)
else:
return '{} {} ({})'.format(self.type, self.name, self.language)
@property
def type(self):
raise NotImplementedError
@property
def name(self):
name = self.__class__.__name__
assert name[-7:] == 'Dataset'
return util.real_name(name[:-7])
def specification(self):
specification = dict(type=self.type, name=self.name, values=self.values)
if isinstance(self.world_size, int):
specification['world_size'] = self.world_size
else:
specification['world_size'] = list(self.world_size)
if self.vectors:
specification['vectors'] = self.vectors
if self.vocabularies:
specification['vocabularies'] = self.vocabularies
if self.language:
specification['language'] = self.language
return specification
def world_shape(self):
if isinstance(self.world_size, int):
return (self.world_size, self.world_size, 3)
else:
return (self.world_size[0], self.world_size[1], 3)
def vector_shape(self, value_name):
shape = self.vectors.get(value_name)
if isinstance(shape, int):
return (self.vectors.get(value_name),)
else:
return shape
def vocabulary_size(self, value_type):
if self.vocabularies is None or value_type not in self.vocabularies:
return -1
else:
return len(self.vocabularies[value_type])
def vocabulary(self, value_type):
if self.vocabularies is None or value_type not in self.vocabularies:
return None
else:
return [word for word, _ in sorted(self.vocabularies[value_type].items(), key=(lambda kv: kv[1]))]
def to_surface(self, value_type, word_ids):
id2word = self.vocabulary(value_type)
assert id2word is not None
if word_ids.ndim == 1:
return ' '.join(id2word[word_id] for word_id in word_ids)
elif word_ids.ndim == 2:
return [self.to_surface(value_type, word_ids) for word_ids in word_ids]
else:
assert False
def from_surface(self, value_type, words):
word2id = self.vocabularies.get(value_type)
assert word2id is not None
if isinstance(words, str):
return np.asarray(word2id[word] for word in words.split(' '))
elif isinstance(words, list):
if len(words) > 0 and ' ' in words[0]:
return [self.from_surface(value_type, words) for words in words]
else:
return np.asarray(word2id[word] for word in words)
else:
assert False
def apply_pixel_noise(self, world):
if self.pixel_noise_stddev is not None and self.pixel_noise_stddev > 0.0:
noise = np.random.normal(loc=0.0, scale=self.pixel_noise_stddev, size=world.shape)
mask = (noise < -2.0 * self.pixel_noise_stddev) + (noise > 2.0 * self.pixel_noise_stddev)
while np.any(a=mask):
noise -= mask * noise
noise += mask * np.random.normal(loc=0.0, scale=self.pixel_noise_stddev, size=world.shape)
mask = (noise < -2.0 * self.pixel_noise_stddev) + (noise > 2.0 * self.pixel_noise_stddev)
world += noise
np.clip(world, a_min=0.0, a_max=1.0, out=world)
return world
def zero_batch(self, n, include_model=False, alternatives=False):
batch = dict()
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if alternatives and alts:
if value_type == 'int':
batch[value_name] = [[] for _ in range(n)]
elif value_type == 'float':
batch[value_name] = [[] for _ in range(n)]
elif value_type == 'vector(int)' or value_type in self.vocabularies:
batch[value_name] = [[np.zeros(shape=self.vector_shape(value_name), dtype=np.int32)] for _ in range(n)]
elif value_type == 'vector(float)':
batch[value_name] = [[np.zeros(shape=self.vector_shape(value_name), dtype=np.float32)] for _ in range(n)]
elif value_type == 'world':
batch[value_name] = [[np.zeros(shape=self.world_shape(), dtype=np.float32)] for _ in range(n)]
elif value_type == 'model' and include_model:
batch[value_name] = [[] for _ in range(n)]
else:
if value_type == 'int' and (value_name != 'alternatives' or alternatives):
batch[value_name] = np.zeros(shape=(n,), dtype=np.int32)
elif value_type == 'float':
batch[value_name] = np.zeros(shape=(n,), dtype=np.float32)
elif value_type == 'vector(int)' or value_type in self.vocabularies:
batch[value_name] = np.zeros(shape=((n,) + self.vector_shape(value_name)), dtype=np.int32)
elif value_type == 'vector(float)':
batch[value_name] = np.zeros(shape=((n,) + self.vector_shape(value_name)), dtype=np.float32)
elif value_type == 'world':
batch[value_name] = np.zeros(shape=((n,) + self.world_shape()), dtype=np.float32)
elif value_type == 'model' and include_model:
batch[value_name] = [None] * n
return batch
def generate(self, n, mode=None, include_model=False, alternatives=False): # mode: None, 'train', 'validation', 'test'
raise NotImplementedError
def iterate(self, n, mode=None, include_model=False, alternatives=False, iterations=None):
i = 0
while iterations is None or i < iterations:
yield self.generate(n=n, mode=mode, include_model=include_model, alternatives=alternatives)
i += 1
def get_html(self, generated, image_format='bmp', image_dir=''):
return None
def serialize(self, path, generated, additional=None, filename=None, archive=None, html=False, numpy_formats=(), image_format='bmp', concat_worlds=False):
assert not additional or all(value_name not in self.values for value_name in additional)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with util.Archive(path=path, mode='w', archive=archive) as write_file:
for value_name, value in generated.items():
self.serialize_value(
path=path,
value=value,
value_name=value_name,
write_file=write_file,
numpy_format=(value_name in numpy_formats),
image_format=image_format,
concat_worlds=concat_worlds
)
if additional:
for value_name, (value, value_type) in additional.items():
self.serialize_value(
path=path,
value=value,
value_name=value_name,
write_file=write_file,
value_type=value_type,
numpy_format=(value_name in numpy_formats),
image_format=image_format,
concat_worlds=concat_worlds
)
if html:
html = self.get_html(generated=generated, image_format=image_format)
assert html is not None
write_file(filename='data.html', value=html)
def serialize_value(self, path, value, value_name, write_file, value_type=None, numpy_format=False, image_format='bmp', concat_worlds=False):
if value_type is None:
value_type = self.values[value_name]
value_type, alts = util.alternatives_type(value_type=value_type)
if value_name == 'alternatives':
assert value_type == 'int'
assert not numpy_format
value = '\n'.join(str(int(x)) for x in value) + '\n'
write_file('alternatives.txt', value)
elif value_type == 'int':
assert not numpy_format
if alts:
value = '\n'.join(';'.join(str(x)for x in xs) for xs in value) + '\n'
else:
value = '\n'.join(str(x) for x in value) + '\n'
write_file(value_name + '.txt', value)
elif value_type == 'float':
assert not numpy_format
if alts:
value = '\n'.join(';'.join(str(round(x, 3))for x in xs) for xs in value) + '\n'
else:
value = '\n'.join(str(round(x, 3)) for x in value) + '\n'
write_file(value_name + '.txt', value)
elif value_type == 'vector(int)':
if numpy_format:
np.save(path + '-' + value_name + '.npy', value)
elif alts:
value = '\n'.join(';'.join(','.join(str(x) for x in vector.flatten()) for vector in vectors) for vectors in value) + '\n'
write_file(value_name + '.txt', value)
else:
value = '\n'.join(','.join(str(x) for x in vector.flatten()) for vector in value) + '\n'
write_file(value_name + '.txt', value)
elif value_type == 'vector(float)':
if numpy_format:
np.save(path + '-' + value_name + '.npy', value)
elif alts:
value = '\n'.join(';'.join(','.join(str(round(x, 3)) for x in vector.flatten()) for vector in vectors) for vectors in value) + '\n'
write_file(value_name + '.txt', value)
else:
value = '\n'.join(','.join(str(round(x, 3)) for x in vector.flatten()) for vector in value) + '\n'
write_file(value_name + '.txt', value)
elif value_type == 'world':
from shapeworld.world import World
if numpy_format:
np.save(path + '-' + value_name + '.npy', value)
elif concat_worlds:
assert not alts
size = ceil(sqrt(len(value)))
worlds = []
for y in range(ceil(len(value) / size)):
if y < len(value) // size:
worlds.append(np.concatenate([value[y * size + x] for x in range(size)], axis=1))
else:
worlds.append(np.concatenate([value[y * size + x] for x in range(len(value) % size)] + [np.zeros_like(a=value[0]) for _ in range(-len(value) % size)], axis=1))
worlds = np.concatenate(worlds, axis=0)
image = World.get_image(world_array=worlds)
image_bytes = BytesIO()
image.save(image_bytes, format=image_format)
write_file('{}.{}'.format(value_name, image_format), image_bytes.getvalue(), binary=True)
image_bytes.close()
else:
for n in range(len(value)):
if alts:
for i, v in enumerate(value[n]):
image = World.get_image(world_array=v)
image_bytes = BytesIO()
image.save(image_bytes, format=image_format)
write_file('{}-{}-{}.{}'.format(value_name, n, i, image_format), image_bytes.getvalue(), binary=True)
image_bytes.close()
else:
image = World.get_image(world_array=value[n])
image_bytes = BytesIO()
image.save(image_bytes, format=image_format)
write_file('{}-{}.{}'.format(value_name, n, image_format), image_bytes.getvalue(), binary=True)
image_bytes.close()
elif value_type == 'model':
assert not numpy_format
value = json.dumps(obj=value, indent=2, sort_keys=True)
write_file(value_name + '.json', value)
else:
assert not numpy_format
id2word = self.vocabulary(value_type=value_type)
if alts:
value = '\n\n'.join('\n'.join(' '.join(id2word[word_id] for word_id in words if word_id) for words in words_alts) for words_alts in value) + '\n\n'
else:
value = '\n'.join(' '.join(id2word[word_id] for word_id in words if word_id) for words in value) + '\n'
write_file(value_name + '.txt', value)
def deserialize_value(self, path, value_name, read_file, value_type=None, numpy_format=False, image_format='bmp', num_concat_worlds=0):
if value_type is None:
value_type = self.values[value_name]
value_type, alts = util.alternatives_type(value_type=value_type)
if value_name == 'alternatives':
assert value_type == 'int'
assert not numpy_format
value = read_file('alternatives.txt')
return [int(x) for x in value.split('\n')[:-1]]
elif value_type == 'int':
assert not numpy_format
value = read_file(value_name + '.txt')
if alts:
value = [[int(x) for x in xs.split(';')] for xs in value.split('\n')[:-1]]
else:
value = [int(x) for x in value.split('\n')[:-1]]
return value
elif value_type == 'float':
assert not numpy_format
value = read_file(value_name + '.txt')
if alts:
value = [[float(x) for x in xs.split(';')] for xs in value.split('\n')[:-1]]
else:
value = [float(x) for x in value.split('\n')[:-1]]
return value
elif value_type == 'vector(int)':
if numpy_format:
path, extension = os.path.splitext(path)
while extension != '':
path, extension = os.path.splitext(path)
value = np.load(path + '-' + value_name + '.npy')
else:
value = read_file(value_name + '.txt')
shape = self.vector_shape(value_name=value_name)
if alts:
value = [[np.array(object=[int(x) for x in vector.split(',')], dtype=np.int32).reshape(shape) for vector in vectors.split(';')] for vectors in value.split('\n')[:-1]]
else:
value = [np.array(object=[int(x) for x in vector.split(',')], dtype=np.int32).reshape(shape) for vector in value.split('\n')[:-1]]
return value
elif value_type == 'vector(float)':
if numpy_format:
path, extension = os.path.splitext(path)
while extension != '':
path, extension = os.path.splitext(path)
value = np.load(path + '-' + value_name + '.npy')
else:
value = read_file(value_name + '.txt')
shape = self.vector_shape(value_name=value_name)
if alts:
value = [[np.array(object=[float(x) for x in vector.split(',')], dtype=np.float32).reshape(shape) for vector in vectors.split(';')] for vectors in value.split('\n')[:-1]]
else:
value = [np.array(object=[0.0 if 'e' in x else float(x) for x in vector.split(',')], dtype=np.float32).reshape(shape) for vector in value.split('\n')[:-1]]
return value
elif value_type == 'world':
from shapeworld.world import World
if numpy_format:
path, extension = os.path.splitext(path)
while extension != '':
path, extension = os.path.splitext(path)
value = np.load(path + '-' + value_name + '.npy')
elif num_concat_worlds:
assert not alts
size = ceil(sqrt(num_concat_worlds))
image_bytes = read_file('{}.{}'.format(value_name, image_format), binary=True)
assert image_bytes is not None
image_bytes = BytesIO(image_bytes)
image = Image.open(image_bytes)
worlds = World.from_image(image)
height = worlds.shape[0] // ceil(num_concat_worlds / size)
assert worlds.shape[0] % ceil(num_concat_worlds / size) == 0
width = worlds.shape[1] // size
assert worlds.shape[1] % size == 0
value = []
for y in range(ceil(num_concat_worlds / size)):
for x in range(size if y < num_concat_worlds // size else num_concat_worlds % size):
value.append(worlds[y * height: (y + 1) * height, x * width: (x + 1) * width, :])
else:
value = list()
n = 0
flag = True
while flag:
if alts:
i = 0
v = list()
while True:
image_bytes = read_file('{}-{}-{}.{}'.format(value_name, n, i, image_format), binary=True)
if image_bytes is None:
flag = False
break
image_bytes = BytesIO(image_bytes)
image = Image.open(image_bytes)
v.append(World.from_image(image))
i += 1
value.append(v)
else:
image_bytes = read_file('{}-{}.{}'.format(value_name, n, image_format), binary=True)
if image_bytes is None:
break
image_bytes = BytesIO(image_bytes)
image = Image.open(image_bytes)
value.append(World.from_image(image))
n += 1
return value
elif value_type == 'model':
assert not numpy_format
value = read_file(value_name + '.json')
value = json.loads(s=value)
return value
else:
assert not numpy_format
word2id = self.vocabularies.get(value_type)
value = read_file(value_name + '.txt')
if alts:
value = [[[word2id[word] for word in words.split(' ')] for words in words_alts.split('\n')] for words_alts in value.split('\n\n')[:-1]]
else:
value = [[word2id[word] for word in words.split(' ')] for words in value.split('\n')[:-1]]
return value
class LoadedDataset(Dataset):
def __init__(self, specification, random_sampling=True, pixel_noise_stddev=None, exclude_values=()):
self._type = specification.pop('type')
self._name = specification.pop('name')
self.variant = specification.pop('variant', None)
self.directory = specification.pop('directory')
relative_directory = specification.get('relative_directory')
if relative_directory is not None:
self.directory = os.path.join(self.directory, relative_directory)
self.archive = specification.pop('archive', None)
self.include_model = specification.pop('include_model', False)
self.numpy_formats = tuple(specification.pop('numpy_formats', ()))
self.image_format = specification.pop('image_format', 'bmp')
self.num_concat_worlds = specification.pop('num_concat_worlds', 0)
self._specification = specification
self.random_sampling = random_sampling
values = specification.pop('values')
for value in exclude_values:
values.pop(value, None)
if pixel_noise_stddev is None:
pixel_noise_stddev = specification.pop('pixel_noise_stddev', None)
else:
assert 'pixel_noise_stddev' not in specification
super(LoadedDataset, self).__init__(values=values, world_size=specification.pop('world_size'), pixel_noise_stddev=pixel_noise_stddev, vectors=specification.pop('vectors', None), vocabularies=specification.pop('vocabularies', None), language=specification.pop('language', None))
self.shards = None
self.records_shards = None
for root, dirs, files in os.walk(self.directory):
if root == self.directory:
dirs = sorted(d for d in dirs if d[0] != '.' and not d.startswith('temp-'))
files = sorted(f for f in files if f[0] != '.' and not f.endswith('.npy'))
if len(dirs) == 0:
assert all(f[:5] == 'shard' or f[:4] == 'part' for f in files)
if any(f[-13:] != '.tfrecords.gz' for f in files):
self.shards = [os.path.join(root, f) for f in files if f[-13:] != '.tfrecords.gz']
if any(f[-13:] == '.tfrecords.gz' for f in files):
self.records_shards = [os.path.join(root, f) for f in files if f[-13:] == '.tfrecords.gz']
elif set(dirs) <= {'train', 'validation', 'test'}:
assert len(files) == 0
self.shards = dict()
self.records_shards = dict()
else:
assert all(d[:5] == 'shard' or d[:4] == 'part' for d in dirs)
self.shards = [os.path.join(root, d) for d in dirs]
if len(files) > 0:
assert all((f[:5] == 'shard' or f[:4] == 'part') and f[-13:] == '.tfrecords.gz' for f in files)
self.records_shards = [os.path.join(root, f) for f in files]
elif root[len(self.directory) + 1:] in ('train', 'validation', 'test'):
dirs = sorted(d for d in dirs if d[0] != '.' and not d.startswith('temp-'))
files = sorted(f for f in files if f[0] != '.' and not f.endswith('.npy'))
mode = root[len(self.directory) + 1:]
if len(dirs) > 0:
assert all(d[:5] == 'shard' or d[:4] == 'part' for d in dirs)
self.shards[mode] = [os.path.join(root, d) for d in dirs]
if files:
assert all((f[:5] == 'shard' or f[:4] == 'part') and f[-13:] == '.tfrecords.gz' for f in files)
self.records_shards[mode] = [os.path.join(root, f) for f in files]
else:
assert all(f[:5] == 'shard' or f[:4] == 'part' for f in files)
if any(f[-13:] != '.tfrecords.gz' for f in files):
self.shards[mode] = [os.path.join(root, f) for f in files if f[-13:] != '.tfrecords.gz']
if any(f[-13:] == '.tfrecords.gz' for f in files):
self.records_shards[mode] = [os.path.join(root, f) for f in files if f[-13:] == '.tfrecords.gz']
self.loaded = dict()
self.shard = dict()
self.num_instances = dict()
self.num_alternatives = dict()
def __str__(self):
name = '{} {}'.format(self.type, self.name)
if self.variant is not None:
name += '-{}'.format(self.variant)
if self.language is not None:
name += ' ({})'.format(self.language)
return name
@property
def name(self):
return self._name
@property
def type(self):
return self._type
def specification(self):
specification = super(LoadedDataset, self).specification()
specification.update(self._specification)
return specification
def __getattr__(self, name):
try:
return super(LoadedDataset, self).__getattr__(name=name)
except AttributeError:
if name in self._specification:
return self._specification[name]
else:
raise
def get_records_paths(self, mode):
if mode == 'none':
mode = None
assert (mode is None) != isinstance(self.records_shards, dict)
assert (mode is None and self.records_shards is not None) or mode in self.records_shards
if mode is None:
return self.records_shards
else:
assert mode in self.records_shards
return self.records_shards[mode]
def generate(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
assert (mode is None) != isinstance(self.shards, dict)
assert (mode is None and self.shards is not None) or mode in self.shards
assert not include_model or self.include_model
if mode is None:
mode_shards = self.shards
else:
mode_shards = self.shards[mode]
if mode not in self.loaded:
self.loaded[mode] = dict()
self.shard[mode] = -1
self.num_instances[mode] = 0
self.num_alternatives[mode] = 0
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if value_type != 'model' or self.include_model:
self.loaded[mode][value_name] = list()
while (self.num_instances[mode] < n) if (self.random_sampling or alternatives) else (self.num_alternatives[mode] < n):
if self.random_sampling:
next_shard = self.shard[mode]
while len(mode_shards) > 1 and next_shard == self.shard[mode]:
next_shard = randrange(len(mode_shards))
self.shard[mode] = next_shard
else:
self.shard[mode] = (self.shard[mode] + 1) % len(mode_shards)
self.num_instances[mode] = 0
with util.Archive(path=mode_shards[self.shard[mode]], mode='r', archive=self.archive) as read_file:
for value_name, value in self.loaded[mode].items():
value.extend(self.deserialize_value(
path=mode_shards[self.shard[mode]],
value_name=value_name,
read_file=read_file,
numpy_format=(value_name in self.numpy_formats),
image_format=self.image_format,
num_concat_worlds=self.num_concat_worlds
))
if self.num_instances[mode] == 0:
self.num_instances[mode] = self.num_alternatives[mode] = len(value)
else:
assert len(value) == self.num_instances[mode]
if value_name == 'alternatives':
self.num_alternatives[mode] = sum(value)
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
for i in range(n):
if self.random_sampling:
index = randrange(self.num_instances[mode])
else:
index = 0
if 'alternatives' not in self.values:
alt_index = -1
self.num_instances[mode] -= 1
self.num_alternatives[mode] -= 1
elif alternatives or self.loaded[mode]['alternatives'][index] == 1:
alt_index = -1
self.num_instances[mode] -= 1
self.num_alternatives[mode] -= self.loaded[mode]['alternatives'][index]
elif self.random_sampling:
alt_index = randrange(self.loaded[mode]['alternatives'][index])
self.num_instances[mode] -= 1
self.num_alternatives[mode] -= self.loaded[mode]['alternatives'][index]
else:
alt_index = 0
self.loaded[mode]['alternatives'][index] -= 1
self.num_alternatives[mode] -= 1
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if value_type == 'model' and not self.include_model:
continue
if self.random_sampling or alt_index == -1:
value = self.loaded[mode][value_name].pop(index)
else:
value = self.loaded[mode][value_name][index]
if value_type == 'model' and not include_model:
continue
if not alternatives and alts:
value = value.pop(alt_index)
if not alternatives and value_name == 'alternatives':
continue
if value_type in self.vocabularies:
batch[value_name][i][:len(value)] = value
else:
batch[value_name][i] = value
for value_name, value_type in self.values.items():
value_type, _ = util.alternatives_type(value_type=value_type)
if value_type == 'world':
batch[value_name] = self.apply_pixel_noise(world=batch[value_name])
return batch
def epoch(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
assert (mode is None) != isinstance(self.shards, dict)
assert (mode is None and self.shards is not None) or mode in self.shards
assert not include_model or self.include_model
if mode is None:
mode_shards = self.shards
else:
mode_shards = self.shards[mode]
available_shards = list(range(len(mode_shards)))
loaded = dict()
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if value_type != 'model' or self.include_model:
loaded[value_name] = list()
num_instances = 0
while available_shards:
if self.random_sampling:
shard = available_shards.pop(randrange(len(available_shards)))
else:
shard = available_shards.pop(0)
num_instances = 0
with util.Archive(path=mode_shards[shard], mode='r', archive=self.archive) as read_file:
for value_name, value in loaded.items():
value.extend(self.deserialize_value(
path=mode_shards[shard],
value_name=value_name,
read_file=read_file,
numpy_format=(value_name in self.numpy_formats),
image_format=self.image_format,
num_concat_worlds=self.num_concat_worlds
))
if num_instances == 0:
num_instances = num_alternatives = len(value)
else:
assert len(value) == num_instances
if value_name == 'alternatives':
num_alternatives = sum(value)
while (num_instances >= n) if alternatives else (num_alternatives >= n):
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
for i in range(n):
if self.random_sampling:
index = randrange(num_instances)
else:
index = 0
if 'alternatives' not in self.values:
alt_index = -1
num_instances -= 1
num_alternatives -= 1
elif alternatives or loaded['alternatives'][index] == 1:
alt_index = -1
num_instances -= 1
num_alternatives -= loaded['alternatives'][index]
else:
if self.random_sampling:
alt_index = randrange(loaded['alternatives'][index])
else:
alt_index = 0
loaded['alternatives'][index] -= 1
num_alternatives -= 1
for value_name, value_type in self.values.items():
value_type, alts = util.alternatives_type(value_type=value_type)
if value_type == 'model' and not self.include_model:
continue
if alt_index == -1:
value = loaded[value_name].pop(index)
if not alternatives and alts:
value = value.pop(0)
elif alts:
value = loaded[value_name][index].pop(alt_index)
else:
value = loaded[value_name][index]
if value_type == 'model' and not include_model:
continue
if not alternatives and value_name == 'alternatives':
continue
if value_type in self.vocabularies:
batch[value_name][i][:len(value)] = value
else:
batch[value_name][i] = value
for value_name, value_type in self.values.items():
value_type, _ = util.alternatives_type(value_type=value_type)
if value_type == 'world':
batch[value_name] = self.apply_pixel_noise(world=batch[value_name])
yield batch
if not available_shards:
if alternatives:
if 0 < num_instances < n:
n = num_instances
else:
if 0 < num_alternatives < n:
n = num_alternatives
assert num_instances == 0 and num_alternatives == 0
def get_html(self, generated, image_format='bmp', image_dir=''):
module = import_module('shapeworld.datasets.{}.{}'.format(self.type, self.name))
class_name = util.class_name(self.name) + 'Dataset'
for key, dclass in module.__dict__.items():
if key == class_name:
break
return dclass.get_html(self, generated=generated, image_format=image_format, image_dir=image_dir)
class DatasetMixer(Dataset):
# accepts Dataset, config, str
def __init__(self, datasets, consistent_batches=False, distribution=None, train_distribution=None, validation_distribution=None, test_distribution=None):
assert len(datasets) >= 1
self.datasets = list()
for dataset in datasets:
if not isinstance(dataset, Dataset):
dataset = Dataset.create(config=dataset)
self.datasets.append(dataset)
assert all(dataset.type == self.datasets[0].type for dataset in self.datasets)
assert all(dataset.language == self.datasets[0].language for dataset in self.datasets)
assert all(dataset.values == self.datasets[0].values for dataset in self.datasets)
assert all(dataset.world_size == self.datasets[0].world_size for dataset in self.datasets)
assert all(sorted(dataset.vectors) == sorted(self.datasets[0].vectors) for dataset in self.datasets)
assert all(sorted(dataset.vocabularies) == sorted(self.datasets[0].vocabularies) for dataset in self.datasets)
# combine vectors and words information
values = self.datasets[0].values
world_size = self.datasets[0].world_size
vectors = {value_name: max(dataset.vectors[value_name] for dataset in self.datasets) for value_name in self.datasets[0].vectors}
vocabularies = dict()
for name in self.datasets[0].vocabularies:
vocabularies[name] = sorted(set(word for dataset in self.datasets for word in dataset.vocabularies[name]))
language = self.datasets[0].language
super(DatasetMixer, self).__init__(values=values, world_size=world_size, vectors=vectors, vocabularies=vocabularies, language=language)
self.translations = list()
for dataset in self.datasets:
dataset.vectors = self.vectors
dataset.vocabularies = self.vocabularies
if isinstance(dataset, LoadedDataset):
translation = dict()
for name, vocabulary in dataset.vocabularies.items():
translation[name] = np.vectorize({index: self.vocabularies[name][word] for word, index in vocabulary.items()}.__getitem__)
self.translations.append(translation)
else:
self.translations.append(None)
self.consistent_batches = consistent_batches
assert not distribution or len(distribution) == len(self.datasets)
distribution = util.value_or_default(distribution, [1] * len(self.datasets))
self.distribution = util.cumulative_distribution(distribution)
assert bool(train_distribution) == bool(validation_distribution) == bool(test_distribution)
assert not train_distribution or len(train_distribution) == len(validation_distribution) == len(test_distribution) == len(self.distribution)
self.train_distribution = util.cumulative_distribution(util.value_or_default(train_distribution, distribution))
self.validation_distribution = util.cumulative_distribution(util.value_or_default(validation_distribution, distribution))
self.test_distribution = util.cumulative_distribution(util.value_or_default(test_distribution, distribution))
@property
def type(self):
return self.datasets[0].type
@property
def name(self):
return '+'.join(dataset.name for dataset in self.datasets)
def generate(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
if mode is None:
distribution = self.distribution
if mode == 'train':
distribution = self.train_distribution
elif mode == 'validation':
distribution = self.validation_distribution
elif mode == 'test':
distribution = self.test_distribution
if self.consistent_batches:
dataset = util.sample(distribution, self.datasets)
return dataset.generate(n=n, mode=mode, include_model=include_model, alternatives=alternatives)
else:
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
for i in range(n):
sample = util.sample(distribution)
generated = self.datasets[sample].generate(n=1, mode=mode, include_model=include_model, alternatives=alternatives)
for value_name, value in generated.items():
value_type = self.values[value_name]
if value_type in self.vocabularies:
batch[value_name][i][:value.shape[1]] = value[0]
else:
batch[value_name][i] = value[0]
return batch
class ClassificationDataset(Dataset):
def __init__(self, world_generator, num_classes, multi_class=False, count_class=False, pixel_noise_stddev=None):
values = dict(world='world', world_model='model', classification='vector(float)')
vectors = dict(classification=num_classes)
super(ClassificationDataset, self).__init__(values=values, world_size=world_generator.world_size, vectors=vectors, pixel_noise_stddev=pixel_noise_stddev)
assert multi_class or not count_class
self.world_generator = world_generator
self.num_classes = num_classes
self.multi_class = multi_class
self.count_class = count_class
@property
def type(self):
return 'classification'
def specification(self):
specification = super(ClassificationDataset, self).specification()
specification['num_classes'] = self.num_classes
specification['multi_class'] = self.multi_class
specification['count_class'] = self.count_class
return specification
def get_classes(self, world): # iterable of classes
raise NotImplementedError
def generate(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
for i in range(n):
while not self.world_generator.initialize(mode=mode):
pass
while True:
world = self.world_generator()
if world is not None:
break
batch['world'][i] = self.apply_pixel_noise(world=world.get_array(world_array=batch['world'][i]))
if include_model:
batch['world_model'][i] = world.model()
c = None
for c in self.get_classes(world):
if self.count_class:
batch['classification'][i][c] += 1.0
else:
batch['classification'][i][c] = 1.0
if not self.multi_class:
assert c is not None
return batch
def get_html(self, generated, image_format='bmp', image_dir=''):
classifications = generated['classification']
data_html = list()
for n, classification in enumerate(classifications):
data_html.append('<div class="instance"><div class="world"><img src="{image_dir}world-{world}.{format}" alt="world-{world}.{format}"></div><div class="num"><p><b>({num})</b></p></div><div class="classification"><p>'.format(image_dir=image_dir, world=n, format=image_format, num=(n + 1)))
comma = False
for c, count in enumerate(classification):
if count == 0.0:
continue
if comma:
data_html.append(', ')
else:
comma = True
if self.count_class:
data_html.append('{count:.0f} × class {c}'.format(c=c, count=count))
else:
data_html.append('class {c}'.format(c=c))
data_html.append('</p></div></div>')
html = '<!DOCTYPE html><html><head><title>{dtype} {name}</title><style>.data{{width: 100%; height: 100%;}} .instance{{width: 100%; display: flex; margin-top: 1px; margin-bottom: 1px; background-color: #DDEEFF; vertical-align: middle; align-items: center;}} .world{{height: {world_height}px; display: inline-block; flex-grow: 0; vertical-align: middle;}} .num{{width: 50px; display: inline-block; flex-grow: 0; text-align: center; vertical-align: middle; margin-left: 10px;}} .classification{{display: inline-block; flex-grow: 1; vertical-align: middle; margin-left: 10px;}}</style></head><body><div class="data">{data}</div></body></html>'.format(
dtype=self.type,
name=self.name,
world_height=self.world_shape()[0],
data=''.join(data_html)
)
return html
class CaptionAgreementDataset(Dataset):
GENERATOR_INIT_FREQUENCY = 25
CAPTIONER_INIT_FREQUENCY = 100
CAPTIONER_INIT_FREQUENCY2 = 5
def __init__(self, world_generator, world_captioner, caption_size, vocabulary, pixel_noise_stddev=None, caption_realizer='dmrs', language=None, worlds_per_instance=1, captions_per_instance=1, correct_ratio=0.5, train_correct_ratio=None, validation_correct_ratio=None, test_correct_ratio=None):
if worlds_per_instance > 1 or captions_per_instance > 1:
values = dict(agreement='alternatives(float)')
else:
values = dict(agreement='float')
if worlds_per_instance > 1:
values.update(world='alternatives(world)', world_model='alternatives(model)', alternatives='int')
else:
values.update(world='world', world_model='model')
if captions_per_instance > 1:
values.update(caption='alternatives(language)', caption_length='alternatives(int)', caption_pn='alternatives(pn)', caption_pn_length='alternatives(int)', caption_rpn='alternatives(pn)', caption_rpn_length='alternatives(int)', caption_model='alternatives(model)', alternatives='int')
else:
values.update(caption='language', caption_length='int', caption_pn='pn', caption_pn_length='int', caption_rpn='pn', caption_rpn_length='int', caption_model='model')
assert isinstance(caption_size, int) and caption_size > 0
vocabulary = list(vocabulary)
assert len(vocabulary) > 0 and vocabulary == sorted(vocabulary), sorted(vocabulary) # [(w1, w2) for w1, w2 in zip(vocabulary, sorted(vocabulary)) if w1 != w2]
self.world_generator = world_generator
self.world_captioner = world_captioner
from shapeworld.realizers import CaptionRealizer
if isinstance(caption_realizer, CaptionRealizer):
self.caption_realizer = caption_realizer
else:
assert caption_realizer is None or isinstance(caption_realizer, str)
self.caption_realizer = CaptionRealizer.from_name(
name=caption_realizer,
language=util.value_or_default(language, 'english')
)
self.world_captioner.set_realizer(self.caption_realizer)
vectors = dict(
caption=caption_size,
caption_pn=self.world_captioner.pn_length(),
caption_rpn=self.world_captioner.pn_length()
)
vocabularies = dict(
language=vocabulary,
pn=sorted(self.world_captioner.pn_symbols())
)
super(CaptionAgreementDataset, self).__init__(
values=values,
world_size=world_generator.world_size,
pixel_noise_stddev=pixel_noise_stddev,
vectors=vectors,
vocabularies=vocabularies,
language=language
)
assert worlds_per_instance == 1 or captions_per_instance == 1
self.worlds_per_instance = worlds_per_instance
self.captions_per_instance = captions_per_instance
self.correct_ratio = correct_ratio
self.train_correct_ratio = util.value_or_default(train_correct_ratio, self.correct_ratio)
self.validation_correct_ratio = util.value_or_default(validation_correct_ratio, self.correct_ratio)
self.test_correct_ratio = util.value_or_default(test_correct_ratio, self.correct_ratio)
self.pn_arity = self.world_captioner.pn_arity()
self.pn_arity[''] = 1
self.pn_arity['[UNKNOWN]'] = 1
@property
def type(self):
return 'agreement'
def specification(self):
specification = super(CaptionAgreementDataset, self).specification()
specification['worlds_per_instance'] = self.worlds_per_instance
specification['captions_per_instance'] = self.captions_per_instance
specification['pn_arity'] = self.pn_arity
return specification
def generate(self, n, mode=None, include_model=False, alternatives=False):
if mode == 'none':
mode = None
if mode == 'train':
correct_ratio = self.train_correct_ratio
elif mode == 'validation':
correct_ratio = self.validation_correct_ratio
elif mode == 'test':
correct_ratio = self.test_correct_ratio
else:
correct_ratio = self.correct_ratio
pn2id = self.vocabularies['pn']
unknown = pn2id['[UNKNOWN]']
pn_size = self.vector_shape('caption_pn')[0]
batch = self.zero_batch(n, include_model=include_model, alternatives=alternatives)
captions = list()
for i in range(n):
correct = random() < correct_ratio
# print(i, correct, flush=True)
# print(i, correct, end=', ', flush=True)
resample = 0
while True:
if resample % self.__class__.GENERATOR_INIT_FREQUENCY == 0:
if resample // self.__class__.GENERATOR_INIT_FREQUENCY >= 1:
# print(i, 'world')
pass
while not self.world_generator.initialize(mode=mode):
pass
# print(self.world_generator.model())
if resample % self.__class__.CAPTIONER_INIT_FREQUENCY == 0:
if resample // self.__class__.CAPTIONER_INIT_FREQUENCY >= 1:
# print(i, 'caption')
# print(i, resample, 'caption', correct, self.world_captioner.model())
# assert False
pass
if self.worlds_per_instance > 1:
correct = True
while not self.world_captioner.initialize(mode=mode, correct=False):
pass
else:
while not self.world_captioner.initialize(mode=mode, correct=correct):
pass
assert self.world_captioner.incorrect_possible()
# print(self.world_captioner.model(), flush=True)
resample += 1
world = self.world_generator()
if world is None:
continue
if self.worlds_per_instance > 1:
caption = self.world_captioner(world=world)
if caption is None:
continue
caption = self.world_captioner.get_correct_caption()
else:
caption = self.world_captioner(world=world)
# print('c', caption)
if caption is not None:
break
if alternatives and (self.worlds_per_instance > 1 or self.captions_per_instance > 1):
batch['agreement'][i].append(float(correct))
else:
batch['agreement'][i] = float(correct)
if alternatives and self.captions_per_instance > 1:
batch['alternatives'][i] = self.captions_per_instance
batch['caption'][i].extend(batch['caption'][i][0].copy() for _ in range(self.captions_per_instance - 1))
batch['caption_pn'][i].extend(batch['caption_pn'][i][0].copy() for _ in range(self.captions_per_instance - 1))
batch['caption_rpn'][i].extend(batch['caption_rpn'][i][0].copy() for _ in range(self.captions_per_instance - 1))
captions.append(caption)
pn = caption.polish_notation()
assert len(pn) <= pn_size, (len(pn), pn_size, pn)
for k, pn_symbol in enumerate(pn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_pn'][i][0][k] = pn2id.get(pn_symbol, unknown)
batch['caption_pn_length'][i].append(len(pn))
rpn = caption.polish_notation(reverse=True)
assert len(rpn) <= pn_size, (len(rpn), pn_size, rpn)
for k, pn_symbol in enumerate(rpn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_rpn'][i][0][k] = pn2id.get(pn_symbol, unknown)
batch['caption_rpn_length'][i].append(len(rpn))
if include_model:
batch['caption_model'][i].append(caption.model())
for j in range(1, self.captions_per_instance):
correct = random() < correct_ratio
resample = 0
while True:
if resample % self.__class__.CAPTIONER_INIT_FREQUENCY2 == 0:
if resample // self.__class__.CAPTIONER_INIT_FREQUENCY2 >= 1:
# print(i, j, '2nd caption')
# print(i, 'caption', correct, self.world_captioner.model())
pass
while not self.world_captioner.initialize(mode=mode, correct=correct):
pass
resample += 1
caption = self.world_captioner(world=world)
if caption is not None:
break
captions.append(caption)
pn = caption.polish_notation()
assert len(pn) <= pn_size, (len(pn), pn_size, pn)
for k, pn_symbol in enumerate(pn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_pn'][i][j][k] = pn2id.get(pn_symbol, unknown)
batch['caption_pn_length'][i].append(len(pn))
rpn = caption.polish_notation(reverse=True)
assert len(rpn) <= pn_size, (len(rpn), pn_size, rpn)
for k, pn_symbol in enumerate(rpn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_rpn'][i][j][k] = pn2id.get(pn_symbol, unknown)
batch['caption_rpn_length'][i].append(len(rpn))
if include_model:
batch['caption_model'][i].append(caption.model())
batch['agreement'][i].append(float(correct))
else:
captions.append(caption)
pn = caption.polish_notation()
assert len(pn) <= pn_size, (len(pn), pn_size, pn)
for k, pn_symbol in enumerate(pn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_pn'][i][k] = pn2id.get(pn_symbol, unknown)
batch['caption_pn_length'][i] = len(pn)
rpn = caption.polish_notation(reverse=True)
assert len(rpn) <= pn_size, (len(rpn), pn_size, rpn)
for k, pn_symbol in enumerate(rpn):
assert pn_symbol in pn2id, (pn_symbol, pn2id)
batch['caption_rpn'][i][k] = pn2id.get(pn_symbol, unknown)
batch['caption_rpn_length'][i] = len(rpn)
if include_model:
batch['caption_model'][i] = caption.model()
if alternatives and self.worlds_per_instance > 1:
from shapeworld.captions import PragmaticalPredication
batch['alternatives'][i] = self.worlds_per_instance
batch['world'][i].extend(batch['world'][i][0].copy() for _ in range(self.worlds_per_instance - 1))
batch['world'][i][0] = self.apply_pixel_noise(world=world.get_array(world_array=batch['world'][i][0]))
if include_model:
batch['world_model'][i].append(world.model())
for j in range(1, self.worlds_per_instance):
correct = random() < correct_ratio
while True:
world = self.world_generator()
if world is None:
continue
caption = self.world_captioner.get_correct_caption()
predication = PragmaticalPredication(agreeing=world.entities)
caption.apply_to_predication(predication=predication)
agreement = caption.agreement(predication=predication, world=world)
if not correct:
if agreement >= 0.0:
continue
predication = PragmaticalPredication(agreeing=world.entities)
if not self.world_captioner.incorrect(caption=caption, predication=predication, world=world):
continue
agreement = caption.agreement(predication=predication, world=world)
if agreement > 0.0:
break
batch['world'][i][j] = self.apply_pixel_noise(world=world.get_array(world_array=batch['world'][i][j]))
if include_model:
batch['world_model'][i].append(world.model())
batch['agreement'][i].append(float(correct))
else:
batch['world'][i] = self.apply_pixel_noise(world=world.get_array(world_array=batch['world'][i]))
if include_model:
batch['world_model'][i] = world.model()
word2id = self.vocabularies['language']
unknown = word2id['[UNKNOWN]']
caption_size = self.vector_shape('caption')[0]
unused_words = set(word2id) # for assert
unused_words.remove('')
unused_words.remove('[UNKNOWN]')
missing_words = set() # for assert
max_caption_size = caption_size # for assert
assert len(captions) == n * self.captions_per_instance if alternatives else len(captions) == n
captions = self.caption_realizer.realize(captions=captions)
for i, caption in enumerate(captions):
caption = util.sentence2tokens(sentence=caption)
if len(caption) > caption_size:
if len(caption) > max_caption_size:
max_caption_size = len(caption)
continue
if alternatives and self.captions_per_instance > 1:
j = i % self.captions_per_instance
i = i // self.captions_per_instance
batch['caption_length'][i].append(len(caption))
caption_array = batch['caption'][i][j]
else:
batch['caption_length'][i] = len(caption)
caption_array = batch['caption'][i]
for k, word in enumerate(caption):
if word in word2id:
unused_words.discard(word)
else:
missing_words.add(word)
caption_array[k] = word2id.get(word, unknown)
if util.debug() and len(unused_words) > 0:
print('Words unused in vocabulary: \'{}\''.format('\', \''.join(sorted(unused_words))))
if util.debug() and max_caption_size < caption_size:
print('Caption size smaller than max size: {} < {}'.format(max_caption_size, caption_size))
if len(missing_words) > 0:
print('Words missing in vocabulary: \'{}\''.format('\', \''.join(sorted(missing_words))))
if max_caption_size > caption_size:
print('Caption size exceeds max size: {} > {}'.format(max_caption_size, caption_size))
assert not missing_words, missing_words
assert max_caption_size <= caption_size, (max_caption_size, caption_size)
return batch
def get_html(self, generated, image_format='bmp', image_dir=''):
id2word = self.vocabulary(value_type='language')
worlds = generated['world']
captions = generated['caption']
caption_lengths = generated['caption_length']
agreements = generated['agreement']
data_html = list()
for n, (world, caption, caption_length, agreement) in enumerate(zip(worlds, captions, caption_lengths, agreements)):
if self.worlds_per_instance > 1 or self.captions_per_instance > 1:
data_html.append('<div class="instance">')
else:
if agreement == 1.0:
agreement = 'correct'
elif agreement == 0.0:
agreement = 'incorrect'
else:
agreement = 'ambiguous'
data_html.append('<div class="{agreement}">'.format(agreement=agreement))
if self.worlds_per_instance > 1:
for i, agreement in enumerate(agreement):
if agreement == 1.0:
agreement = 'correct'
elif agreement == 0.0:
agreement = 'incorrect'
else:
agreement = 'ambiguous'
data_html.append('<div class="{agreement}" style="padding: 5px;"><div class="world"><img src="{image_dir}world-{world}-{alt}.{format}" alt="world-{world}-{alt}.{format}"></div></div>'.format(
agreement=agreement,
image_dir=image_dir,
world=n,
format=image_format,
alt=i
))
else:
data_html.append('<div class="world"><img src="{image_dir}world-{world}.{format}" alt="world-{world}.{format}"></div>'.format(image_dir=image_dir, world=n, format=image_format))
data_html.append('<div class="num"><b>({num})</b></div>'.format(num=(n + 1)))
if self.captions_per_instance > 1:
data_html.append('<div class="caption">')
for caption, caption_length, agreement in zip(caption, caption_length, agreement):
if agreement == 1.0:
agreement = 'correct'
elif agreement == 0.0:
agreement = 'incorrect'
else:
agreement = 'ambiguous'
data_html.append('<div class="{agreement}">{caption}</div>'.format(
agreement=agreement,
caption=util.tokens2sentence(id2word[word] for word in caption[:caption_length])
))
data_html.append('</div>')
else:
data_html.append('<div class="caption">{caption}</div>'.format(
caption=util.tokens2sentence(id2word[word] for word in caption[:caption_length])
))
data_html.append('</div>')
html = '<!DOCTYPE html><html><head><title>{dtype} {name}</title><style>.data{{width: 100%; height: 100%;}} .instance{{width: 100%; display: flex; margin-top: 1px; margin-bottom: 1px; background-color: #DDEEFF; vertical-align: middle; align-items: center;}} .world{{height: {world_height}px; display: inline-block; flex-grow: 0; vertical-align: middle;}} .num{{width: 50px; display: inline-block; flex-grow: 0; text-align: center; vertical-align: middle; margin-left: 10px;}} .caption{{display: inline-block; flex-grow: 1; vertical-align: middle; margin-left: 10px;}} .correct{{margin-top: 1px; margin-bottom: 1px; background-color: #BBFFBB;}} .incorrect{{margin-top: 1px; margin-bottom: 1px; background-color: #FFBBBB;}} .ambiguous{{margin-top: 1px; margin-bottom: 1px; background-color: #FFFFBB;}}</style></head><body><div class="data">{data}</div></body></html>'.format(
dtype=self.type,
name=self.name,
world_height=self.world_shape()[0],
data=''.join(data_html)
)
return html
|
# -*- coding: utf-8 -*-
import inspect
import os
from django.conf import settings
from django.core.management import color
from django.core.management import BaseCommand
from django.utils import termcolors
from django.utils.encoding import smart_text
from django_extensions.compat import load_tag_library
from django_extensions.management.utils import signalcommand
def color_style():
style = color.color_style()
style.FILTER = termcolors.make_style(fg='yellow', opts=('bold',))
style.MODULE_NAME = termcolors.make_style(fg='green', opts=('bold',))
style.TAG = termcolors.make_style(fg='red', opts=('bold',))
style.TAGLIB = termcolors.make_style(fg='blue', opts=('bold',))
return style
def format_block(block, nlspaces=0):
"""Format the given block of text, trimming leading/trailing
empty lines and any leading whitespace that is common to all lines.
The purpose is to let us list a code block as a multiline,
triple-quoted Python string, taking care of
indentation concerns.
http://code.activestate.com/recipes/145672/"""
import re
# separate block into lines
lines = smart_text(block).split('\n')
# remove leading/trailing empty lines
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# look at first line to see how much indentation to trim
ws = re.match(r'\s*', lines[0]).group(0)
if ws:
lines = map(lambda x: x.replace(ws, '', 1), lines)
# remove leading/trailing blank lines (after leading ws removal)
# we do this again in case there were pure-whitespace lines
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# account for user-specified leading spaces
flines = ['%s%s' % (' ' * nlspaces, line) for line in lines]
return '\n'.join(flines) + '\n'
class Command(BaseCommand):
help = "Displays template tags and filters available in the current project."
results = ""
def add_result(self, s, depth=0):
self.results += '%s\n' % s.rjust(depth * 4 + len(s))
@signalcommand
def handle(self, *args, **options):
if args:
appname, = args
style = color_style()
if getattr(settings, 'ADMIN_FOR', None):
settings_modules = [__import__(m, {}, {}, ['']) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
for settings_mod in settings_modules:
for app in settings_mod.INSTALLED_APPS:
try:
templatetag_mod = __import__(app + '.templatetags', {}, {}, [''])
except ImportError:
continue
mod_path = inspect.getabsfile(templatetag_mod)
mod_files = os.listdir(os.path.dirname(mod_path))
tag_files = [i.rstrip('.py') for i in mod_files if i.endswith('.py') and i[0] != '_']
app_labeled = False
for taglib in tag_files:
lib = load_tag_library(taglib)
if lib is None:
continue
if not app_labeled:
self.add_result('App: %s' % style.MODULE_NAME(app))
app_labeled = True
self.add_result('load: %s' % style.TAGLIB(taglib), 1)
libstuff = [
(lib.tags, 'Tag:', style.TAG),
(lib.filters, 'Filter:', style.FILTER)
]
for items, label, style_func in libstuff:
for item in items:
self.add_result('%s %s' % (label, style_func(item)), 2)
doc = inspect.getdoc(items[item])
if doc:
self.add_result(format_block(doc, 12))
return self.results
show_template_tags should handle AppConfig class in INSTALLED applications
https://docs.djangoproject.com/en/2.0/ref/applications/#django.apps.AppConfig
# -*- coding: utf-8 -*-
import inspect
import os
from django.apps import apps
from django.core.management import color
from django.core.management import BaseCommand
from django.utils import termcolors
from django.utils.encoding import smart_text
from django_extensions.compat import load_tag_library
from django_extensions.management.utils import signalcommand
def color_style():
style = color.color_style()
style.FILTER = termcolors.make_style(fg='yellow', opts=('bold',))
style.MODULE_NAME = termcolors.make_style(fg='green', opts=('bold',))
style.TAG = termcolors.make_style(fg='red', opts=('bold',))
style.TAGLIB = termcolors.make_style(fg='blue', opts=('bold',))
return style
def format_block(block, nlspaces=0):
"""Format the given block of text, trimming leading/trailing
empty lines and any leading whitespace that is common to all lines.
The purpose is to let us list a code block as a multiline,
triple-quoted Python string, taking care of
indentation concerns.
http://code.activestate.com/recipes/145672/"""
import re
# separate block into lines
lines = smart_text(block).split('\n')
# remove leading/trailing empty lines
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# look at first line to see how much indentation to trim
ws = re.match(r'\s*', lines[0]).group(0)
if ws:
lines = map(lambda x: x.replace(ws, '', 1), lines)
# remove leading/trailing blank lines (after leading ws removal)
# we do this again in case there were pure-whitespace lines
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# account for user-specified leading spaces
flines = ['%s%s' % (' ' * nlspaces, line) for line in lines]
return '\n'.join(flines) + '\n'
class Command(BaseCommand):
help = "Displays template tags and filters available in the current project."
results = ""
def add_result(self, s, depth=0):
self.results += '%s\n' % s.rjust(depth * 4 + len(s))
@signalcommand
def handle(self, *args, **options):
if args:
appname, = args
style = color_style()
for app_config in apps.get_app_configs():
app = app_config.name
try:
templatetag_mod = __import__(app + '.templatetags', {}, {}, [''])
except ImportError:
continue
mod_path = inspect.getabsfile(templatetag_mod)
mod_files = os.listdir(os.path.dirname(mod_path))
tag_files = [i.rstrip('.py') for i in mod_files if i.endswith('.py') and i[0] != '_']
app_labeled = False
for taglib in tag_files:
lib = load_tag_library(taglib)
if lib is None:
continue
if not app_labeled:
self.add_result('App: %s' % style.MODULE_NAME(app))
app_labeled = True
self.add_result('load: %s' % style.TAGLIB(taglib), 1)
libstuff = [
(lib.tags, 'Tag:', style.TAG),
(lib.filters, 'Filter:', style.FILTER)
]
for items, label, style_func in libstuff:
for item in items:
self.add_result('%s %s' % (label, style_func(item)), 2)
doc = inspect.getdoc(items[item])
if doc:
self.add_result(format_block(doc, 12))
return self.results
|
#!/usr/bin/env python3
import argparse
from collections import OrderedDict
import json
import os
def list_files(input_dir):
"""
Go through each file in the input_dir
If it ends in a .conf suffix, add it to a set
For each element in set open it with json.load()
If it works, add it to a list
If it fails, print a message and ignore the file
Sort the final list, if there is an element called 'global.conf'
make sure it's the last element in the list
Return the final list
"""
conf_files = {
os.path.join(input_dir, files)
for files in os.listdir(input_dir)
if files.endswith(".conf")
}
conf_files_filtered = list()
for files in conf_files:
try:
with open(files) as f:
json.load(f)
except ValueError as e:
print("Got ValueError: {0}".format(e))
else:
conf_files_filtered.append(files)
conf_files_filtered_sorted = sorted(conf_files_filtered)
global_config_file = os.path.join(input_dir, "global.conf")
if global_config_file in conf_files_filtered_sorted:
conf_files_filtered_sorted.remove(global_config_file)
conf_files_filtered_sorted.append(global_config_file)
return conf_files_filtered_sorted
def load_files(input_dir):
"""
For each file gotten from list_files() function
Create a dictionary with the name of the file and content as OrderedDict()
Return the OrderedDict()
"""
conf_data = OrderedDict()
for conf_files in list_files(input_dir):
with open(conf_files) as conf_file:
conf_data[conf_files] = json.load(conf_file, object_pairs_hook=OrderedDict)
return conf_data
def parse_files(input_dir, output_type):
conf_files = load_files(input_dir)
out = list()
host_out = list()
# If the output_type is file
# Try to remove the existing FILE_NAME
# I'm intentionally not using `with suppress`
if output_type == "file":
try:
os.remove(FILE_NAME)
except OSError:
pass
for dict_file_key, dict_file in conf_files.items():
default_values = set()
out.append("# Content from {0}".format(dict_file_key))
for inner_key, inner_value in dict_file.items():
if inner_key == "Options":
for elem in inner_value.items():
default_values.add((elem[0], elem[1]))
elif inner_key == "Hosts":
for host in inner_value:
[
host_out.append("{0} {1}".format(host_data[0], host_data[1]))
if host_data[0] == "Host"
else host_out.append(
" {0} {1}".format(host_data[0], host_data[1])
)
for host_data in host.items()
]
if default_values:
for default_value in default_values:
if not any(
default_value[0] in host_data for host_data in host_out
):
host_out.append(
" {0} {1}".format(
default_value[0], default_value[1]
)
)
# Write out the host data for current host
[out.append(host_data) for host_data in host_out]
# Clear host data after each host gets processed
del host_out[:]
if output_type == "screen":
print("{0}".format("\n".join(out)))
elif output_type == "file":
with os.fdopen(os.open(FILE_NAME, os.O_WRONLY | os.O_CREAT, 0o600), "a") as f:
f.write("{0}\n".format("\n".join(out)))
def do_it():
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--source-dir",
dest="source_dir",
help="Set the source directory from which the files \
will be loaded",
type=str,
action="store",
default=os.path.expanduser("~") + "/.ssh/confs/",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="Set the \
output type",
type=str,
action="store",
default="screen",
)
parser.add_argument(
"-f",
"--file",
dest="file_name",
help="Set the file \
name of the output file",
type=str,
action="store",
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "outfile-example"
),
)
args = parser.parse_args()
if args.output == "file" and args.file_name:
global FILE_NAME
FILE_NAME = args.file_name
if args.source_dir:
if not os.path.exists(args.source_dir):
print("{} does not exist.".format(args.source_dir))
elif os.path.isdir(args.source_dir):
parse_files(args.source_dir, args.output)
else:
print("{} is not a directory.".format(args.source_dir))
if __name__ == "__main__":
do_it()
Remove extra line breaks
#!/usr/bin/env python3
import argparse
from collections import OrderedDict
import json
import os
def list_files(input_dir):
"""
Go through each file in the input_dir
If it ends in a .conf suffix, add it to a set
For each element in set open it with json.load()
If it works, add it to a list
If it fails, print a message and ignore the file
Sort the final list, if there is an element called 'global.conf'
make sure it's the last element in the list
Return the final list
"""
conf_files = {
os.path.join(input_dir, files)
for files in os.listdir(input_dir)
if files.endswith(".conf")
}
conf_files_filtered = list()
for files in conf_files:
try:
with open(files) as f:
json.load(f)
except ValueError as e:
print("Got ValueError: {0}".format(e))
else:
conf_files_filtered.append(files)
conf_files_filtered_sorted = sorted(conf_files_filtered)
global_config_file = os.path.join(input_dir, "global.conf")
if global_config_file in conf_files_filtered_sorted:
conf_files_filtered_sorted.remove(global_config_file)
conf_files_filtered_sorted.append(global_config_file)
return conf_files_filtered_sorted
def load_files(input_dir):
"""
For each file gotten from list_files() function
Create a dictionary with the name of the file and content as OrderedDict()
Return the OrderedDict()
"""
conf_data = OrderedDict()
for conf_files in list_files(input_dir):
with open(conf_files) as conf_file:
conf_data[conf_files] = json.load(conf_file, object_pairs_hook=OrderedDict)
return conf_data
def parse_files(input_dir, output_type):
conf_files = load_files(input_dir)
out = list()
host_out = list()
# If the output_type is file
# Try to remove the existing FILE_NAME
# I'm intentionally not using `with suppress`
if output_type == "file":
try:
os.remove(FILE_NAME)
except OSError:
pass
for dict_file_key, dict_file in conf_files.items():
default_values = set()
out.append("# Content from {0}".format(dict_file_key))
for inner_key, inner_value in dict_file.items():
if inner_key == "Options":
for elem in inner_value.items():
default_values.add((elem[0], elem[1]))
elif inner_key == "Hosts":
for host in inner_value:
[
host_out.append("{0} {1}".format(host_data[0], host_data[1]))
if host_data[0] == "Host"
else host_out.append(
" {0} {1}".format(host_data[0], host_data[1])
)
for host_data in host.items()
]
if default_values:
for default_value in default_values:
if not any(
default_value[0] in host_data for host_data in host_out
):
host_out.append(
" {0} {1}".format(
default_value[0], default_value[1]
)
)
# Write out the host data for current host
[out.append(host_data) for host_data in host_out]
# Clear host data after each host gets processed
del host_out[:]
if output_type == "screen":
print("{0}".format("\n".join(out)))
elif output_type == "file":
with os.fdopen(os.open(FILE_NAME, os.O_WRONLY | os.O_CREAT, 0o600), "a") as f:
f.write("{0}\n".format("\n".join(out)))
def do_it():
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--source-dir",
dest="source_dir",
help="Set the source directory from which the files will be loaded",
type=str,
action="store",
default=os.path.expanduser("~") + "/.ssh/confs/",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="Set the output type",
type=str,
action="store",
default="screen",
)
parser.add_argument(
"-f",
"--file",
dest="file_name",
help="Set the file name of the output file",
type=str,
action="store",
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "outfile-example"
),
)
args = parser.parse_args()
if args.output == "file" and args.file_name:
global FILE_NAME
FILE_NAME = args.file_name
if args.source_dir:
if not os.path.exists(args.source_dir):
print("{} does not exist.".format(args.source_dir))
elif os.path.isdir(args.source_dir):
parse_files(args.source_dir, args.output)
else:
print("{} is not a directory.".format(args.source_dir))
if __name__ == "__main__":
do_it()
|
#!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
from xml.dom.minidom import parseString
class YouTube(VideoExtractor):
name = "YouTube"
# YouTube media encoding options, in descending quality order.
# http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs. Retrieved July 17, 2014.
stream_types = [
{'itag': '38', 'container': 'MP4', 'video_resolution': '3072p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3.5-5', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '85', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '3-4', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '46', 'container': 'WebM', 'video_resolution': '1080p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '37', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3-4.3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '102', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '45', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '2', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '84', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '22', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '120', 'container': 'FLV', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'Main@L3.1', 'video_bitrate': '2', 'audio_encoding': 'AAC', 'audio_bitrate': '128'}, # Live streaming only
{'itag': '44', 'container': 'WebM', 'video_resolution': '480p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '1', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '35', 'container': 'FLV', 'video_resolution': '480p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.8-1', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '101', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '100', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '43', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '34', 'container': 'FLV', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '82', 'container': 'MP4', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '18', 'container': 'MP4', 'video_resolution': '270p/360p', 'video_encoding': 'H.264', 'video_profile': 'Baseline', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '6', 'container': 'FLV', 'video_resolution': '270p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.8', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
#{'itag': '83', 'container': 'MP4', 'video_resolution': '240p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '13', 'container': '3GP', 'video_resolution': '', 'video_encoding': 'MPEG-4 Visual', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': ''},
{'itag': '5', 'container': 'FLV', 'video_resolution': '240p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.25', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
{'itag': '36', 'container': '3GP', 'video_resolution': '240p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.175', 'audio_encoding': 'AAC', 'audio_bitrate': '36'},
{'itag': '17', 'container': '3GP', 'video_resolution': '144p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.05', 'audio_encoding': 'AAC', 'audio_bitrate': '24'},
]
def decipher(js, s):
# Examples:
# - https://www.youtube.com/yts/jsbin/player-da_DK-vflWlK-zq/base.js
# - https://www.youtube.com/yts/jsbin/player-vflvABTsY/da_DK/base.js
def tr_js(code):
code = re.sub(r'function', r'def', code)
code = re.sub(r'(\W)(as|if|in|is|or)\(', r'\1_\2(', code)
code = re.sub(r'\$', '_dollar', code)
code = re.sub(r'\{', r':\n\t', code)
code = re.sub(r'\}', r'\n', code)
code = re.sub(r'var\s+', r'', code)
code = re.sub(r'(\w+).join\(""\)', r'"".join(\1)', code)
code = re.sub(r'(\w+).length', r'len(\1)', code)
code = re.sub(r'(\w+).slice\((\w+)\)', r'\1[\2:]', code)
code = re.sub(r'(\w+).splice\((\w+),(\w+)\)', r'del \1[\2:\2+\3]', code)
code = re.sub(r'(\w+).split\(""\)', r'list(\1)', code)
return code
js = js.replace('\n', ' ')
f1 = match1(js, r'\.set\(\w+\.sp,([$\w]+)\(\w+\.s\)\)') or \
match1(js, r'"signature",([$\w]+)\(\w+\.\w+\)')
f1def = match1(js, r'function %s(\(\w+\)\{[^\{]+\})' % re.escape(f1)) or \
match1(js, r'\W%s=function(\(\w+\)\{[^\{]+\})' % re.escape(f1))
f1def = re.sub(r'([$\w]+\.)([$\w]+\(\w+,\d+\))', r'\2', f1def)
f1def = 'function %s%s' % (f1, f1def)
code = tr_js(f1def)
f2s = set(re.findall(r'([$\w]+)\(\w+,\d+\)', f1def))
for f2 in f2s:
f2e = re.escape(f2)
f2def = re.search(r'[^$\w]%s:function\((\w+,\w+)\)(\{[^\{\}]+\})' % f2e, js)
if f2def:
f2def = 'function {}({}){}'.format(f2e, f2def.group(1), f2def.group(2))
else:
f2def = re.search(r'[^$\w]%s:function\((\w+)\)(\{[^\{\}]+\})' % f2e, js)
f2def = 'function {}({},b){}'.format(f2e, f2def.group(1), f2def.group(2))
f2 = re.sub(r'(\W)(as|if|in|is|or)\(', r'\1_\2(', f2)
f2 = re.sub(r'\$', '_dollar', f2)
code = code + 'global %s\n' % f2 + tr_js(f2def)
f1 = re.sub(r'(as|if|in|is|or)', r'_\1', f1)
f1 = re.sub(r'\$', '_dollar', f1)
code = code + 'sig=%s(s)' % f1
exec(code, globals(), locals())
return locals()['sig']
def chunk_by_range(url, size):
urls = []
chunk_size = 10485760
start, end = 0, chunk_size - 1
urls.append('%s&range=%s-%s' % (url, start, end))
while end + 1 < size: # processed size < expected size
start, end = end + 1, end + chunk_size
urls.append('%s&range=%s-%s' % (url, start, end))
return urls
def get_url_from_vid(vid):
return 'https://youtu.be/{}'.format(vid)
def get_vid_from_url(url):
"""Extracts video ID from URL.
"""
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
match1(url, r'youtube\.com/watch/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v')
def get_playlist_id_from_url(url):
"""Extracts playlist ID from URL.
"""
return parse_query_param(url, 'list') or \
parse_query_param(url, 'p')
def download_playlist_by_url(self, url, **kwargs):
self.url = url
playlist_id = self.__class__.get_playlist_id_from_url(self.url)
if playlist_id is None:
log.wtf('[Failed] Unsupported URL pattern.')
video_page = get_content('https://www.youtube.com/playlist?list=%s' % playlist_id)
from html.parser import HTMLParser
videos = sorted([HTMLParser().unescape(video)
for video in re.findall(r'<a href="(/watch\?[^"]+)"', video_page)
if parse_query_param(video, 'index')],
key=lambda video: parse_query_param(video, 'index'))
# Parse browse_ajax page for more videos to load
load_more_href = match1(video_page, r'data-uix-load-more-href="([^"]+)"')
while load_more_href:
browse_ajax = get_content('https://www.youtube.com/%s' % load_more_href)
browse_data = json.loads(browse_ajax)
load_more_widget_html = browse_data['load_more_widget_html']
content_html = browse_data['content_html']
vs = set(re.findall(r'href="(/watch\?[^"]+)"', content_html))
videos += sorted([HTMLParser().unescape(video)
for video in list(vs)
if parse_query_param(video, 'index')])
load_more_href = match1(load_more_widget_html, r'data-uix-load-more-href="([^"]+)"')
self.title = re.search(r'<meta name="title" content="([^"]+)"', video_page).group(1)
self.p_playlist()
for video in videos:
vid = parse_query_param(video, 'v')
index = parse_query_param(video, 'index')
self.__class__().download_by_url(self.__class__.get_url_from_vid(vid), index=index, **kwargs)
def prepare(self, **kwargs):
assert self.url or self.vid
if not self.vid and self.url:
self.vid = self.__class__.get_vid_from_url(self.url)
if self.vid is None:
self.download_playlist_by_url(self.url, **kwargs)
exit(0)
video_info = parse.parse_qs(get_content('https://www.youtube.com/get_video_info?video_id={}'.format(self.vid)))
ytplayer_config = None
if 'status' not in video_info:
log.wtf('[Failed] Unknown status.')
elif video_info['status'] == ['ok']:
if 'use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']:
self.title = parse.unquote_plus(video_info['title'][0])
# Parse video page (for DASH)
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
# Workaround: get_video_info returns bad s. Why?
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
except:
stream_list = video_info['url_encoded_fmt_stream_map'][0].split(',')
self.html5player = None
else:
# Parse video page instead
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.title = ytplayer_config['args']['title']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
elif video_info['status'] == ['fail']:
if video_info['errorcode'] == ['150']:
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+});ytplayer', video_page).group(1))
except:
msg = re.search('class="message">([^<]+)<', video_page).group(1)
log.wtf('[Failed] "%s"' % msg.strip())
if 'title' in ytplayer_config['args']:
# 150 Restricted from playback on certain sites
# Parse video page instead
self.title = ytplayer_config['args']['title']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
else:
log.wtf('[Error] The uploader has not made this video available in your country.')
#self.title = re.search('<meta name="title" content="([^"]+)"', video_page).group(1)
#stream_list = []
elif video_info['errorcode'] == ['100']:
log.wtf('[Failed] This video does not exist.', exit_code=int(video_info['errorcode'][0]))
else:
log.wtf('[Failed] %s' % video_info['reason'][0], exit_code=int(video_info['errorcode'][0]))
else:
log.wtf('[Failed] Invalid status.')
# YouTube Live
if ytplayer_config and (ytplayer_config['args'].get('livestream') == '1' or ytplayer_config['args'].get('live_playback') == '1'):
hlsvp = ytplayer_config['args']['hlsvp']
if 'info_only' in kwargs and kwargs['info_only']:
return
else:
download_url_ffmpeg(hlsvp, self.title, 'mp4')
exit(0)
for stream in stream_list:
metadata = parse.parse_qs(stream)
stream_itag = metadata['itag'][0]
self.streams[stream_itag] = {
'itag': metadata['itag'][0],
'url': metadata['url'][0],
'sig': metadata['sig'][0] if 'sig' in metadata else None,
's': metadata['s'][0] if 's' in metadata else None,
'quality': metadata['quality'][0],
'type': metadata['type'][0],
'mime': metadata['type'][0].split(';')[0],
'container': mime_to_container(metadata['type'][0].split(';')[0]),
}
# Prepare caption tracks
try:
caption_tracks = json.loads(ytplayer_config['args']['player_response'])['captions']['playerCaptionsTracklistRenderer']['captionTracks']
for ct in caption_tracks:
ttsurl, lang = ct['baseUrl'], ct['languageCode']
tts_xml = parseString(get_content(ttsurl))
transcript = tts_xml.getElementsByTagName('transcript')[0]
texts = transcript.getElementsByTagName('text')
srt = ""; seq = 0
for text in texts:
if text.firstChild is None: continue # empty element
seq += 1
start = float(text.getAttribute('start'))
if text.getAttribute('dur'):
dur = float(text.getAttribute('dur'))
else: dur = 1.0 # could be ill-formed XML
finish = start + dur
m, s = divmod(start, 60); h, m = divmod(m, 60)
start = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
m, s = divmod(finish, 60); h, m = divmod(m, 60)
finish = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
content = unescape_html(text.firstChild.nodeValue)
srt += '%s\n' % str(seq)
srt += '%s --> %s\n' % (start, finish)
srt += '%s\n\n' % content
self.caption_tracks[lang] = srt
except: pass
# Prepare DASH streams
try:
dashmpd = ytplayer_config['args']['dashmpd']
dash_xml = parseString(get_content(dashmpd))
for aset in dash_xml.getElementsByTagName('AdaptationSet'):
mimeType = aset.getAttribute('mimeType')
if mimeType == 'audio/mp4':
rep = aset.getElementsByTagName('Representation')[-1]
burls = rep.getElementsByTagName('BaseURL')
dash_mp4_a_url = burls[0].firstChild.nodeValue
dash_mp4_a_size = burls[0].getAttribute('yt:contentLength')
if not dash_mp4_a_size:
try: dash_mp4_a_size = url_size(dash_mp4_a_url)
except: continue
elif mimeType == 'audio/webm':
rep = aset.getElementsByTagName('Representation')[-1]
burls = rep.getElementsByTagName('BaseURL')
dash_webm_a_url = burls[0].firstChild.nodeValue
dash_webm_a_size = burls[0].getAttribute('yt:contentLength')
if not dash_webm_a_size:
try: dash_webm_a_size = url_size(dash_webm_a_url)
except: continue
elif mimeType == 'video/mp4':
for rep in aset.getElementsByTagName('Representation'):
w = int(rep.getAttribute('width'))
h = int(rep.getAttribute('height'))
itag = rep.getAttribute('id')
burls = rep.getElementsByTagName('BaseURL')
dash_url = burls[0].firstChild.nodeValue
dash_size = burls[0].getAttribute('yt:contentLength')
if not dash_size:
try: dash_size = url_size(dash_url)
except: continue
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': '%sx%s' % (w, h),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif mimeType == 'video/webm':
for rep in aset.getElementsByTagName('Representation'):
w = int(rep.getAttribute('width'))
h = int(rep.getAttribute('height'))
itag = rep.getAttribute('id')
burls = rep.getElementsByTagName('BaseURL')
dash_url = burls[0].firstChild.nodeValue
dash_size = burls[0].getAttribute('yt:contentLength')
if not dash_size:
try: dash_size = url_size(dash_url)
except: continue
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_webm_a_urls = self.__class__.chunk_by_range(dash_webm_a_url, int(dash_webm_a_size))
self.dash_streams[itag] = {
'quality': '%sx%s' % (w, h),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, dash_webm_a_urls],
'size': int(dash_size) + int(dash_webm_a_size)
}
except:
# VEVO
if not self.html5player: return
self.js = get_content(self.html5player)
if 'adaptive_fmts' in ytplayer_config['args']:
streams = [dict([(i.split('=')[0],
parse.unquote(i.split('=')[1]))
for i in afmt.split('&')])
for afmt in ytplayer_config['args']['adaptive_fmts'].split(',')]
for stream in streams: # get over speed limiting
stream['url'] += '&ratebypass=yes'
for stream in streams: # audio
if stream['type'].startswith('audio/mp4'):
dash_mp4_a_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_mp4_a_url += '&signature={}'.format(sig)
dash_mp4_a_size = stream['clen']
elif stream['type'].startswith('audio/webm'):
dash_webm_a_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_webm_a_url += '&signature={}'.format(sig)
dash_webm_a_size = stream['clen']
for stream in streams: # video
if 'size' in stream:
if stream['type'].startswith('video/mp4'):
mimeType = 'video/mp4'
dash_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif stream['type'].startswith('video/webm'):
mimeType = 'video/webm'
dash_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
audio_url = None
audio_size = None
try:
audio_url = dash_webm_a_url
audio_size = int(dash_webm_a_size)
except UnboundLocalError as e:
audio_url = dash_mp4_a_url
audio_size = int(dash_mp4_a_size)
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
audio_urls = self.__class__.chunk_by_range(audio_url, int(audio_size))
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, audio_urls],
'size': int(dash_size) + int(audio_size)
}
def extract(self, **kwargs):
if not self.streams_sorted:
# No stream is available
return
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams and stream_id not in self.dash_streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# Extract stream with the best quality
stream_id = self.streams_sorted[0]['itag']
if stream_id in self.streams:
src = self.streams[stream_id]['url']
if self.streams[stream_id]['sig'] is not None:
sig = self.streams[stream_id]['sig']
src += '&signature={}'.format(sig)
elif self.streams[stream_id]['s'] is not None:
if not hasattr(self, 'js'):
self.js = get_content(self.html5player)
s = self.streams[stream_id]['s']
sig = self.__class__.decipher(self.js, s)
src += '&signature={}'.format(sig)
self.streams[stream_id]['src'] = [src]
self.streams[stream_id]['size'] = urls_size(self.streams[stream_id]['src'])
site = YouTube()
download = site.download_by_url
download_playlist = site.download_playlist_by_url
[youtube] whatever this (0,window.encodeURIComponent) thing is (fix #2652)
#!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
from xml.dom.minidom import parseString
class YouTube(VideoExtractor):
name = "YouTube"
# YouTube media encoding options, in descending quality order.
# http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs. Retrieved July 17, 2014.
stream_types = [
{'itag': '38', 'container': 'MP4', 'video_resolution': '3072p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3.5-5', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '85', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '3-4', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '46', 'container': 'WebM', 'video_resolution': '1080p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '37', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3-4.3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '102', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '45', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '2', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '84', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '22', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '120', 'container': 'FLV', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'Main@L3.1', 'video_bitrate': '2', 'audio_encoding': 'AAC', 'audio_bitrate': '128'}, # Live streaming only
{'itag': '44', 'container': 'WebM', 'video_resolution': '480p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '1', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '35', 'container': 'FLV', 'video_resolution': '480p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.8-1', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '101', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '100', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '43', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '34', 'container': 'FLV', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '82', 'container': 'MP4', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '18', 'container': 'MP4', 'video_resolution': '270p/360p', 'video_encoding': 'H.264', 'video_profile': 'Baseline', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '6', 'container': 'FLV', 'video_resolution': '270p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.8', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
#{'itag': '83', 'container': 'MP4', 'video_resolution': '240p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '13', 'container': '3GP', 'video_resolution': '', 'video_encoding': 'MPEG-4 Visual', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': ''},
{'itag': '5', 'container': 'FLV', 'video_resolution': '240p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.25', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
{'itag': '36', 'container': '3GP', 'video_resolution': '240p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.175', 'audio_encoding': 'AAC', 'audio_bitrate': '36'},
{'itag': '17', 'container': '3GP', 'video_resolution': '144p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.05', 'audio_encoding': 'AAC', 'audio_bitrate': '24'},
]
def decipher(js, s):
# Examples:
# - https://www.youtube.com/yts/jsbin/player-da_DK-vflWlK-zq/base.js
# - https://www.youtube.com/yts/jsbin/player-vflvABTsY/da_DK/base.js
# - https://www.youtube.com/yts/jsbin/player-vfls4aurX/da_DK/base.js
def tr_js(code):
code = re.sub(r'function', r'def', code)
code = re.sub(r'(\W)(as|if|in|is|or)\(', r'\1_\2(', code)
code = re.sub(r'\$', '_dollar', code)
code = re.sub(r'\{', r':\n\t', code)
code = re.sub(r'\}', r'\n', code)
code = re.sub(r'var\s+', r'', code)
code = re.sub(r'(\w+).join\(""\)', r'"".join(\1)', code)
code = re.sub(r'(\w+).length', r'len(\1)', code)
code = re.sub(r'(\w+).slice\((\w+)\)', r'\1[\2:]', code)
code = re.sub(r'(\w+).splice\((\w+),(\w+)\)', r'del \1[\2:\2+\3]', code)
code = re.sub(r'(\w+).split\(""\)', r'list(\1)', code)
return code
js = js.replace('\n', ' ')
f1 = match1(js, r'\.set\(\w+\.sp,\(0,window\.encodeURIComponent\)\(([$\w]+)') or \
match1(js, r'\.set\(\w+\.sp,([$\w]+)\(\w+\.s\)\)') or \
match1(js, r'"signature",([$\w]+)\(\w+\.\w+\)')
f1def = match1(js, r'function %s(\(\w+\)\{[^\{]+\})' % re.escape(f1)) or \
match1(js, r'\W%s=function(\(\w+\)\{[^\{]+\})' % re.escape(f1))
f1def = re.sub(r'([$\w]+\.)([$\w]+\(\w+,\d+\))', r'\2', f1def)
f1def = 'function %s%s' % (f1, f1def)
code = tr_js(f1def)
f2s = set(re.findall(r'([$\w]+)\(\w+,\d+\)', f1def))
for f2 in f2s:
f2e = re.escape(f2)
f2def = re.search(r'[^$\w]%s:function\((\w+,\w+)\)(\{[^\{\}]+\})' % f2e, js)
if f2def:
f2def = 'function {}({}){}'.format(f2e, f2def.group(1), f2def.group(2))
else:
f2def = re.search(r'[^$\w]%s:function\((\w+)\)(\{[^\{\}]+\})' % f2e, js)
f2def = 'function {}({},b){}'.format(f2e, f2def.group(1), f2def.group(2))
f2 = re.sub(r'(\W)(as|if|in|is|or)\(', r'\1_\2(', f2)
f2 = re.sub(r'\$', '_dollar', f2)
code = code + 'global %s\n' % f2 + tr_js(f2def)
f1 = re.sub(r'(as|if|in|is|or)', r'_\1', f1)
f1 = re.sub(r'\$', '_dollar', f1)
code = code + 'sig=%s(s)' % f1
exec(code, globals(), locals())
return locals()['sig']
def chunk_by_range(url, size):
urls = []
chunk_size = 10485760
start, end = 0, chunk_size - 1
urls.append('%s&range=%s-%s' % (url, start, end))
while end + 1 < size: # processed size < expected size
start, end = end + 1, end + chunk_size
urls.append('%s&range=%s-%s' % (url, start, end))
return urls
def get_url_from_vid(vid):
return 'https://youtu.be/{}'.format(vid)
def get_vid_from_url(url):
"""Extracts video ID from URL.
"""
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
match1(url, r'youtube\.com/watch/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v')
def get_playlist_id_from_url(url):
"""Extracts playlist ID from URL.
"""
return parse_query_param(url, 'list') or \
parse_query_param(url, 'p')
def download_playlist_by_url(self, url, **kwargs):
self.url = url
playlist_id = self.__class__.get_playlist_id_from_url(self.url)
if playlist_id is None:
log.wtf('[Failed] Unsupported URL pattern.')
video_page = get_content('https://www.youtube.com/playlist?list=%s' % playlist_id)
from html.parser import HTMLParser
videos = sorted([HTMLParser().unescape(video)
for video in re.findall(r'<a href="(/watch\?[^"]+)"', video_page)
if parse_query_param(video, 'index')],
key=lambda video: parse_query_param(video, 'index'))
# Parse browse_ajax page for more videos to load
load_more_href = match1(video_page, r'data-uix-load-more-href="([^"]+)"')
while load_more_href:
browse_ajax = get_content('https://www.youtube.com/%s' % load_more_href)
browse_data = json.loads(browse_ajax)
load_more_widget_html = browse_data['load_more_widget_html']
content_html = browse_data['content_html']
vs = set(re.findall(r'href="(/watch\?[^"]+)"', content_html))
videos += sorted([HTMLParser().unescape(video)
for video in list(vs)
if parse_query_param(video, 'index')])
load_more_href = match1(load_more_widget_html, r'data-uix-load-more-href="([^"]+)"')
self.title = re.search(r'<meta name="title" content="([^"]+)"', video_page).group(1)
self.p_playlist()
for video in videos:
vid = parse_query_param(video, 'v')
index = parse_query_param(video, 'index')
self.__class__().download_by_url(self.__class__.get_url_from_vid(vid), index=index, **kwargs)
def prepare(self, **kwargs):
assert self.url or self.vid
if not self.vid and self.url:
self.vid = self.__class__.get_vid_from_url(self.url)
if self.vid is None:
self.download_playlist_by_url(self.url, **kwargs)
exit(0)
video_info = parse.parse_qs(get_content('https://www.youtube.com/get_video_info?video_id={}'.format(self.vid)))
ytplayer_config = None
if 'status' not in video_info:
log.wtf('[Failed] Unknown status.')
elif video_info['status'] == ['ok']:
if 'use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']:
self.title = parse.unquote_plus(video_info['title'][0])
# Parse video page (for DASH)
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
# Workaround: get_video_info returns bad s. Why?
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
except:
stream_list = video_info['url_encoded_fmt_stream_map'][0].split(',')
self.html5player = None
else:
# Parse video page instead
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.title = ytplayer_config['args']['title']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
elif video_info['status'] == ['fail']:
if video_info['errorcode'] == ['150']:
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+});ytplayer', video_page).group(1))
except:
msg = re.search('class="message">([^<]+)<', video_page).group(1)
log.wtf('[Failed] "%s"' % msg.strip())
if 'title' in ytplayer_config['args']:
# 150 Restricted from playback on certain sites
# Parse video page instead
self.title = ytplayer_config['args']['title']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
else:
log.wtf('[Error] The uploader has not made this video available in your country.')
#self.title = re.search('<meta name="title" content="([^"]+)"', video_page).group(1)
#stream_list = []
elif video_info['errorcode'] == ['100']:
log.wtf('[Failed] This video does not exist.', exit_code=int(video_info['errorcode'][0]))
else:
log.wtf('[Failed] %s' % video_info['reason'][0], exit_code=int(video_info['errorcode'][0]))
else:
log.wtf('[Failed] Invalid status.')
# YouTube Live
if ytplayer_config and (ytplayer_config['args'].get('livestream') == '1' or ytplayer_config['args'].get('live_playback') == '1'):
hlsvp = ytplayer_config['args']['hlsvp']
if 'info_only' in kwargs and kwargs['info_only']:
return
else:
download_url_ffmpeg(hlsvp, self.title, 'mp4')
exit(0)
for stream in stream_list:
metadata = parse.parse_qs(stream)
stream_itag = metadata['itag'][0]
self.streams[stream_itag] = {
'itag': metadata['itag'][0],
'url': metadata['url'][0],
'sig': metadata['sig'][0] if 'sig' in metadata else None,
's': metadata['s'][0] if 's' in metadata else None,
'quality': metadata['quality'][0],
'type': metadata['type'][0],
'mime': metadata['type'][0].split(';')[0],
'container': mime_to_container(metadata['type'][0].split(';')[0]),
}
# Prepare caption tracks
try:
caption_tracks = json.loads(ytplayer_config['args']['player_response'])['captions']['playerCaptionsTracklistRenderer']['captionTracks']
for ct in caption_tracks:
ttsurl, lang = ct['baseUrl'], ct['languageCode']
tts_xml = parseString(get_content(ttsurl))
transcript = tts_xml.getElementsByTagName('transcript')[0]
texts = transcript.getElementsByTagName('text')
srt = ""; seq = 0
for text in texts:
if text.firstChild is None: continue # empty element
seq += 1
start = float(text.getAttribute('start'))
if text.getAttribute('dur'):
dur = float(text.getAttribute('dur'))
else: dur = 1.0 # could be ill-formed XML
finish = start + dur
m, s = divmod(start, 60); h, m = divmod(m, 60)
start = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
m, s = divmod(finish, 60); h, m = divmod(m, 60)
finish = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
content = unescape_html(text.firstChild.nodeValue)
srt += '%s\n' % str(seq)
srt += '%s --> %s\n' % (start, finish)
srt += '%s\n\n' % content
self.caption_tracks[lang] = srt
except: pass
# Prepare DASH streams
try:
dashmpd = ytplayer_config['args']['dashmpd']
dash_xml = parseString(get_content(dashmpd))
for aset in dash_xml.getElementsByTagName('AdaptationSet'):
mimeType = aset.getAttribute('mimeType')
if mimeType == 'audio/mp4':
rep = aset.getElementsByTagName('Representation')[-1]
burls = rep.getElementsByTagName('BaseURL')
dash_mp4_a_url = burls[0].firstChild.nodeValue
dash_mp4_a_size = burls[0].getAttribute('yt:contentLength')
if not dash_mp4_a_size:
try: dash_mp4_a_size = url_size(dash_mp4_a_url)
except: continue
elif mimeType == 'audio/webm':
rep = aset.getElementsByTagName('Representation')[-1]
burls = rep.getElementsByTagName('BaseURL')
dash_webm_a_url = burls[0].firstChild.nodeValue
dash_webm_a_size = burls[0].getAttribute('yt:contentLength')
if not dash_webm_a_size:
try: dash_webm_a_size = url_size(dash_webm_a_url)
except: continue
elif mimeType == 'video/mp4':
for rep in aset.getElementsByTagName('Representation'):
w = int(rep.getAttribute('width'))
h = int(rep.getAttribute('height'))
itag = rep.getAttribute('id')
burls = rep.getElementsByTagName('BaseURL')
dash_url = burls[0].firstChild.nodeValue
dash_size = burls[0].getAttribute('yt:contentLength')
if not dash_size:
try: dash_size = url_size(dash_url)
except: continue
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': '%sx%s' % (w, h),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif mimeType == 'video/webm':
for rep in aset.getElementsByTagName('Representation'):
w = int(rep.getAttribute('width'))
h = int(rep.getAttribute('height'))
itag = rep.getAttribute('id')
burls = rep.getElementsByTagName('BaseURL')
dash_url = burls[0].firstChild.nodeValue
dash_size = burls[0].getAttribute('yt:contentLength')
if not dash_size:
try: dash_size = url_size(dash_url)
except: continue
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_webm_a_urls = self.__class__.chunk_by_range(dash_webm_a_url, int(dash_webm_a_size))
self.dash_streams[itag] = {
'quality': '%sx%s' % (w, h),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, dash_webm_a_urls],
'size': int(dash_size) + int(dash_webm_a_size)
}
except:
# VEVO
if not self.html5player: return
self.js = get_content(self.html5player)
if 'adaptive_fmts' in ytplayer_config['args']:
streams = [dict([(i.split('=')[0],
parse.unquote(i.split('=')[1]))
for i in afmt.split('&')])
for afmt in ytplayer_config['args']['adaptive_fmts'].split(',')]
for stream in streams: # get over speed limiting
stream['url'] += '&ratebypass=yes'
for stream in streams: # audio
if stream['type'].startswith('audio/mp4'):
dash_mp4_a_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_mp4_a_url += '&signature={}'.format(sig)
dash_mp4_a_size = stream['clen']
elif stream['type'].startswith('audio/webm'):
dash_webm_a_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_webm_a_url += '&signature={}'.format(sig)
dash_webm_a_size = stream['clen']
for stream in streams: # video
if 'size' in stream:
if stream['type'].startswith('video/mp4'):
mimeType = 'video/mp4'
dash_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif stream['type'].startswith('video/webm'):
mimeType = 'video/webm'
dash_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
audio_url = None
audio_size = None
try:
audio_url = dash_webm_a_url
audio_size = int(dash_webm_a_size)
except UnboundLocalError as e:
audio_url = dash_mp4_a_url
audio_size = int(dash_mp4_a_size)
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
audio_urls = self.__class__.chunk_by_range(audio_url, int(audio_size))
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, audio_urls],
'size': int(dash_size) + int(audio_size)
}
def extract(self, **kwargs):
if not self.streams_sorted:
# No stream is available
return
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams and stream_id not in self.dash_streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# Extract stream with the best quality
stream_id = self.streams_sorted[0]['itag']
if stream_id in self.streams:
src = self.streams[stream_id]['url']
if self.streams[stream_id]['sig'] is not None:
sig = self.streams[stream_id]['sig']
src += '&signature={}'.format(sig)
elif self.streams[stream_id]['s'] is not None:
if not hasattr(self, 'js'):
self.js = get_content(self.html5player)
s = self.streams[stream_id]['s']
sig = self.__class__.decipher(self.js, s)
src += '&signature={}'.format(sig)
self.streams[stream_id]['src'] = [src]
self.streams[stream_id]['size'] = urls_size(self.streams[stream_id]['src'])
site = YouTube()
download = site.download_by_url
download_playlist = site.download_playlist_by_url
|
from asyncio import gather, sleep
from contextlib import contextmanager
from datetime import datetime, timedelta
from itertools import chain
from logging import getLogger
from discord import (
CategoryChannel, Colour, Forbidden, HTTPException,
Member, PermissionOverwrite, Role, TextChannel
)
from discord.ext.commands import bot_has_permissions, command, group, guild_only, has_permissions
from ..db import MuteGuild, MuteUser
from ..utils import maybe_send
from .basecog import BaseCog
logger = getLogger(__name__)
units = {
's': 1,
'sec': 1,
'secs': 1,
'second': 1,
'seconds': 1,
'm': 1 * 60,
'min': 1 * 60,
'mins': 1 * 60,
'minute': 1 * 60,
'minutes': 1 * 60,
'h': 1 * 60 * 60,
'hour': 1 * 60 * 60,
'hours': 1 * 60 * 60,
'd': 1 * 60 * 60 * 24,
'day': 1 * 60 * 60 * 24,
'days': 1 * 60 * 60 * 24
}
def to_timedelta(arg):
"""
Try converting a string with an optional suffix to :class:`timedelta`.
Args:
arg (str): String to parse.
Returns:
timedelta: Parsed value of the input string.
Raises:
ValueError: Argument could not be parsed or was not strictly positive.
"""
if not arg:
return None
arg = arg.lower()
value = None
for unit, multiplier in units.items():
if not arg.endswith(unit):
continue
value = arg[:-len(unit)].strip() # Remove unit and any potential whitespace
value = float(value) * multiplier
break
if value is None:
value = float(arg.strip()) # Special case: no unit; default to seconds
if value <= 0:
raise ValueError("Duration must be strictly positive.")
return timedelta(seconds=value)
async def init_role(ctx, db_guild=None):
"""
Create a mute role and register it with the database.
Args:
ctx (cardinal.context.Context): Context to create the role in.
db_guild (cardinal.db.MuteGuild): Database binding for the guild if one exists.
Returns:
discord.Role: Newly created role with permissions already set.
"""
mute_role = await ctx.guild.create_role(name='Muted',
colour=Colour.red(),
hoist=True,
reason='Initialising mute role.')
try:
# Position mute role directly below own top role
new_position = ctx.me.top_role.position - 1
await mute_role.edit(position=new_position)
new_overwrite = PermissionOverwrite(send_messages=False, speak=False)
# Process all categories before touching specific channels
# Makes use of permission sync
await gather(
*(category.set_permissions(mute_role, overwrite=new_overwrite)
for category in ctx.guild.categories)
)
# Process channels unaffected by sync
# Note: Individual channel objects aren't updated with the new overwrites
# Only the channel lists on the guild object are updated
def is_synced(channel):
current_overwrite = channel.overwrites_for(mute_role)
return current_overwrite.send_messages is False and current_overwrite.speak is False
await gather(
*(channel.set_permissions(mute_role, overwrite=new_overwrite)
for channel in chain(ctx.guild.text_channels, ctx.guild.voice_channels)
if not is_synced(channel))
)
except HTTPException as e:
logger.exception(
'Setting up mute role for guild {} failed due to HTTP error {}.'
.format(ctx.guild, e.response.status)
)
# Something went wrong, clean up role before re-raising
await mute_role.delete(reason='Internal error initialising mute role.')
raise
if db_guild:
db_guild.role_id = mute_role.id
else:
db_guild = MuteGuild(guild_id=ctx.guild.id, role_id=mute_role.id)
ctx.session.add(db_guild)
ctx.session.commit() # Ensure database entry is created/updated even if later calls fail
return mute_role
async def unmute_member(member, mute_role, channel=None, *, delay_until=None, delay_delta=None):
"""
Unmute a given member, optionally after a given delay.
Args:
member (discord.Member): Member to unmute.
mute_role (discord.Role): Role to remove.
channel (discord.TextChannel): Channel to send the auto-unmute message in.
delay_until (datetime): Delay execution until a given timestamp passes.
delay_delta (timedelta): Delay execution for a given timespan.
"""
delay_seconds = 0
if delay_until:
delay_delta = delay_delta or (delay_until - datetime.utcnow())
if delay_delta:
delay_seconds = delay_delta.total_seconds()
if delay_seconds > 0:
await sleep(delay_seconds)
try:
await member.remove_roles(mute_role, reason='Mute duration ran out.')
except Forbidden:
logger.warning(
'No permission to unmute user {0} ({0.id}) on guild {0.guild} ({0.guild.id}).'
.format(member)
)
return
except HTTPException as e:
logger.exception(
'Failed to unmute user {0} ({0.id}) on guild {0.guild} ({0.guild.id}) '
'due to HTTP error {1}.'
.format(member, e.response.status)
)
return
if not channel:
return
await maybe_send(channel, 'User {} was unmuted automatically.'.format(member.mention))
# TODO: Maybe remove when lock problem fixed
def make_lock_key(member: Member):
"""
Construct a key tuple from a member object.
Args:
member (discord.Member): Member to build the key from.
Returns:
tuple[int, int]: Snowflake IDs of the member and the associated guild.
"""
member_id = member.id
guild_id = member.guild.id
return member_id, guild_id
class Mute(BaseCog):
"""
Mute utility commands.
"""
def __init__(self, bot, check_period=30):
super().__init__(bot)
self.check_period = check_period
self._locks = set()
self.bot.loop.create_task(self.check_mute_timeouts())
# TODO: Find cleaner solution for this hackjob
# Issue: Possible race condition if same member is processed concurrently by multiple calls
# Possible solution: make context manager store how many locks exist for a given member
@contextmanager
def lock_member(self, member):
key = make_lock_key(member)
self._locks.add(key)
try:
yield
finally:
self._locks.remove(key)
def member_is_locked(self, member):
key = make_lock_key(member)
return key in self._locks
def _process_guild(self, db_guild):
guild = self.bot.get_guild(db_guild.guild_id)
if not guild:
return
mute_role = guild.get_role(db_guild.role_id)
if not mute_role:
return
for db_mute in db_guild.mutes:
member = guild.get_member(db_mute.user_id)
if not member:
continue
yield unmute_member(
member,
mute_role,
guild.get_channel(db_mute.channel_id),
delay_until=db_mute.muted_until
)
def _get_unmutes(self):
with self.bot.session_scope() as session:
# Query for mutes that run before the next iteration
# No need to delete by hand, self.on_guild_member_update() will clean up
next_iteration_timestamp = (datetime.utcnow() + timedelta(seconds=self.check_period))
q = session.query(MuteGuild) \
.join(MuteGuild.mutes) \
.filter(MuteUser.muted_until.isnot(None),
next_iteration_timestamp >= MuteUser.muted_until)
return chain.from_iterable(self._process_guild(db_guild) for db_guild in q)
async def check_mute_timeouts(self):
await self.bot.wait_until_ready()
while True:
unmutes = self._get_unmutes()
await gather(*unmutes)
await sleep(self.check_period)
@BaseCog.listener()
async def on_guild_channel_create(self, channel):
with self.bot.session_scope() as session:
db_guild = session.query(MuteGuild).get(channel.guild.id)
if not db_guild:
return
mute_role = channel.guild.get_role(db_guild.role_id)
if not mute_role:
return
if isinstance(channel, CategoryChannel):
await channel.set_permissions(mute_role, send_messages=False, speak=False)
elif isinstance(channel, TextChannel):
await channel.set_permissions(mute_role, send_messages=False)
else:
await channel.set_permissions(mute_role, speak=False)
@BaseCog.listener()
async def on_guild_role_delete(self, role):
with self.bot.session_scope() as session:
# Delete any bindings if the corresponding role is deleted
# Use Query.delete() to prevent redundant SELECT
# Role ID is indexed so delete is faster than querying by guild ID and comparing
session.query(MuteGuild).filter_by(role_id=role.id).delete(synchronize_session=False)
@BaseCog.listener()
async def on_member_join(self, member):
with self.bot.session_scope() as session:
db_mute = session.query(MuteUser).get((member.id, member.guild.id))
# Do not re-mute if mute should have run out already
# Leave cleanup to self.check_mute_timeouts()
if not db_mute or db_mute.muted_until <= datetime.utcnow():
return
role = member.guild.get_role(db_mute.guild.role_id)
if not role:
return
# Re-mute people who left while muted
await member.add_roles(role, reason='Muted member rejoined')
@BaseCog.listener()
async def on_member_update(self, before, after):
if self.member_is_locked(before):
return # Don't touch locked members
with self.bot.session_scope() as session:
db_guild = session.query(MuteGuild).get(before.guild.id)
if not db_guild:
return
mute_role = before.guild.get_role(db_guild.role_id)
if not mute_role:
return
roles_before = set(before.roles)
roles_after = set(after.roles)
mute_removed = mute_role in (roles_before - roles_after)
mute_added = mute_role in (roles_after - roles_before)
db_mute = session.query(MuteUser).get((before.id, before.guild.id))
if mute_removed and db_mute:
session.delete(db_mute)
# Check if binding exists already to prevent double create
if mute_added and not db_mute:
db_mute = MuteUser(user_id=before.id, guild_id=before.guild.id)
session.add(db_mute)
# Ensure this is neither parsed nor called for anything but the mute command itself
@group(invoke_without_command=True, aliases=['gag'])
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def mute(self, ctx, member: Member, *, duration: to_timedelta = None):
"""
Mute a user from chat, optionally specifying an automatic timeout.
Does nothing if the user is already muted.
Arguments:
- member: Member to mute.
- [optional] duration: Duration of the mute. Defaults to infinite.
Default unit if unspecified is seconds.
Note that it is not possible to explicitly specify infinity as a duration.
Required context: Server
Required permissions:
- Manage Roles
Required bot permissions:
- Manage Roles
"""
db_guild = ctx.session.query(MuteGuild).get(ctx.guild.id)
mute_role = None
if db_guild:
mute_role = ctx.guild.get_role(db_guild.role_id)
if not mute_role:
# No role => create new and save to DB
mute_role = await init_role(ctx, db_guild)
if mute_role in member.roles: # Mute role already there, nothing to do here
return
# Add mute to DB prior to role assigment, member update handler triggers otherwise
is_short_mute = duration and duration.total_seconds() <= self.check_period
if not is_short_mute:
db_mute = MuteUser(user_id=member.id, guild_id=ctx.guild.id)
if duration:
db_mute.muted_until = datetime.utcnow() + duration
db_mute.channel_id = ctx.channel.id
ctx.session.add(db_mute)
with self.lock_member(member): # Lock member until command terminates
await member.add_roles(mute_role, reason='Muted by {}.'.format(ctx.author))
# TODO: Include duration in message
await maybe_send(ctx, 'User {} was muted by {}.'
.format(member.mention, ctx.author.mention))
# User should be muted for less than one check period
# => queue unmute directly and don't touch DB from here
if is_short_mute:
await unmute_member(member, mute_role, ctx.channel, delay_delta=duration)
@mute.command()
@guild_only()
@has_permissions(manage_roles=True)
async def setrole(self, ctx, role: Role):
"""
Set the mute role to use with mute commands and mute detection.
Setting it via this command overwrites any existing role.
However, this command does NOT touch permissions in any way.
Instead, it assumes you have already set up the role's permissions in a satisfactory manner.
If the new role is the same as the old one, this command has no effect.
Internally, this unmarks those previously marked as muted if they do not have the new role.
Inversely, it marks everyone having the new role as muted.
Required context: Server
Required permissions:
- Manage Roles
"""
db_guild = ctx.session.query(MuteGuild).get(ctx.guild.id)
if not db_guild:
db_guild = MuteGuild(guild_id=ctx.guild.id, role_id=role.id)
ctx.session.add(db_guild)
return
if db_guild.role_id == role.id:
return
# Remove usages of old role from DB
role_member_ids = {member.id for member in role.members} # Set for later use
ctx.session.query(MuteUser).filter(
MuteUser.guild_id == ctx.guild.id,
MuteUser.user_id.notin_(list(role_member_ids)) # Make list because SQLAlchemy wants one
).delete(synchronize_session=False)
# Query remaining mutes
existing_db_mute_ids = {
db_mute.user_id
for db_mute in ctx.session.query(MuteUser).filter_by(guild_id=ctx.guild.id)
}
# Make set with members that need to be added to DB
new_mute_member_ids = role_member_ids - existing_db_mute_ids
ctx.session.add_all(
MuteUser(user_id=member_id, guild_id=ctx.guild.id) for member_id in new_mute_member_ids
)
@command()
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def unmute(self, ctx, member: Member):
"""
Unmute a user. Does nothing if the user is not muted.
Required context: Server
Required permissions:
- Manage Roles
Required bot permissions:
- Manage Roles
"""
db_mute = ctx.session.query(MuteUser).get((member.id, ctx.guild.id))
if not db_mute:
return
db_guild = db_mute.guild
mute_role = ctx.guild.get_role(db_guild.role_id)
if not mute_role:
return
# No need to manually delete DB row, member update handler will
await member.remove_roles(mute_role, reason='Explicit unmute by {}.'.format(ctx.author))
await maybe_send(
ctx,
'User {} was unmuted by {}.'.format(member.mention, ctx.author.mention)
)
Attempt to fix possible race condition in locking
By replacing the set with a `defaultdict` that simply defaults to 0, one
can concisely track the number of active locks. However, to not waste
memory, unused keys must be manually removed.
There is currently an assertion in the code as I haven't yet had the
chance to properly test this. If I don't notice any issues, I'll
probably remove it at some point.
I also removed a leftover TODO, since I no longer consider locking a
"problem", nor am I inclined to remove locking anytime soon.
from asyncio import gather, sleep
from collections import defaultdict
from contextlib import contextmanager
from datetime import datetime, timedelta
from itertools import chain
from logging import getLogger
from discord import (
CategoryChannel, Colour, Forbidden, HTTPException,
Member, PermissionOverwrite, Role, TextChannel
)
from discord.ext.commands import bot_has_permissions, command, group, guild_only, has_permissions
from ..db import MuteGuild, MuteUser
from ..utils import maybe_send
from .basecog import BaseCog
logger = getLogger(__name__)
units = {
's': 1,
'sec': 1,
'secs': 1,
'second': 1,
'seconds': 1,
'm': 1 * 60,
'min': 1 * 60,
'mins': 1 * 60,
'minute': 1 * 60,
'minutes': 1 * 60,
'h': 1 * 60 * 60,
'hour': 1 * 60 * 60,
'hours': 1 * 60 * 60,
'd': 1 * 60 * 60 * 24,
'day': 1 * 60 * 60 * 24,
'days': 1 * 60 * 60 * 24
}
def to_timedelta(arg):
"""
Try converting a string with an optional suffix to :class:`timedelta`.
Args:
arg (str): String to parse.
Returns:
timedelta: Parsed value of the input string.
Raises:
ValueError: Argument could not be parsed or was not strictly positive.
"""
if not arg:
return None
arg = arg.lower()
value = None
for unit, multiplier in units.items():
if not arg.endswith(unit):
continue
value = arg[:-len(unit)].strip() # Remove unit and any potential whitespace
value = float(value) * multiplier
break
if value is None:
value = float(arg.strip()) # Special case: no unit; default to seconds
if value <= 0:
raise ValueError("Duration must be strictly positive.")
return timedelta(seconds=value)
async def init_role(ctx, db_guild=None):
"""
Create a mute role and register it with the database.
Args:
ctx (cardinal.context.Context): Context to create the role in.
db_guild (cardinal.db.MuteGuild): Database binding for the guild if one exists.
Returns:
discord.Role: Newly created role with permissions already set.
"""
mute_role = await ctx.guild.create_role(name='Muted',
colour=Colour.red(),
hoist=True,
reason='Initialising mute role.')
try:
# Position mute role directly below own top role
new_position = ctx.me.top_role.position - 1
await mute_role.edit(position=new_position)
new_overwrite = PermissionOverwrite(send_messages=False, speak=False)
# Process all categories before touching specific channels
# Makes use of permission sync
await gather(
*(category.set_permissions(mute_role, overwrite=new_overwrite)
for category in ctx.guild.categories)
)
# Process channels unaffected by sync
# Note: Individual channel objects aren't updated with the new overwrites
# Only the channel lists on the guild object are updated
def is_synced(channel):
current_overwrite = channel.overwrites_for(mute_role)
return current_overwrite.send_messages is False and current_overwrite.speak is False
await gather(
*(channel.set_permissions(mute_role, overwrite=new_overwrite)
for channel in chain(ctx.guild.text_channels, ctx.guild.voice_channels)
if not is_synced(channel))
)
except HTTPException as e:
logger.exception(
'Setting up mute role for guild {} failed due to HTTP error {}.'
.format(ctx.guild, e.response.status)
)
# Something went wrong, clean up role before re-raising
await mute_role.delete(reason='Internal error initialising mute role.')
raise
if db_guild:
db_guild.role_id = mute_role.id
else:
db_guild = MuteGuild(guild_id=ctx.guild.id, role_id=mute_role.id)
ctx.session.add(db_guild)
ctx.session.commit() # Ensure database entry is created/updated even if later calls fail
return mute_role
async def unmute_member(member, mute_role, channel=None, *, delay_until=None, delay_delta=None):
"""
Unmute a given member, optionally after a given delay.
Args:
member (discord.Member): Member to unmute.
mute_role (discord.Role): Role to remove.
channel (discord.TextChannel): Channel to send the auto-unmute message in.
delay_until (datetime): Delay execution until a given timestamp passes.
delay_delta (timedelta): Delay execution for a given timespan.
"""
delay_seconds = 0
if delay_until:
delay_delta = delay_delta or (delay_until - datetime.utcnow())
if delay_delta:
delay_seconds = delay_delta.total_seconds()
if delay_seconds > 0:
await sleep(delay_seconds)
try:
await member.remove_roles(mute_role, reason='Mute duration ran out.')
except Forbidden:
logger.warning(
'No permission to unmute user {0} ({0.id}) on guild {0.guild} ({0.guild.id}).'
.format(member)
)
return
except HTTPException as e:
logger.exception(
'Failed to unmute user {0} ({0.id}) on guild {0.guild} ({0.guild.id}) '
'due to HTTP error {1}.'
.format(member, e.response.status)
)
return
if not channel:
return
await maybe_send(channel, 'User {} was unmuted automatically.'.format(member.mention))
def make_lock_key(member: Member):
"""
Construct a key tuple from a member object.
Args:
member (discord.Member): Member to build the key from.
Returns:
tuple[int, int]: Snowflake IDs of the member and the associated guild.
"""
member_id = member.id
guild_id = member.guild.id
return member_id, guild_id
class Mute(BaseCog):
"""
Mute utility commands.
"""
def __init__(self, bot, check_period=30):
super().__init__(bot)
self.check_period = check_period
self._locks = defaultdict(lambda: 0)
self.bot.loop.create_task(self.check_mute_timeouts())
@contextmanager
def lock_member(self, member):
key = make_lock_key(member)
self._locks[key] += 1
try:
yield
finally:
self._locks[key] -= 1
assert self._locks[key] >= 0 # Leave this in until I know this works
if self._locks[key] == 0:
del self._locks[key] # Expunge unused keys to reduce memory usage
def member_is_locked(self, member):
key = make_lock_key(member)
return self._locks[key] > 0
def _process_guild(self, db_guild):
guild = self.bot.get_guild(db_guild.guild_id)
if not guild:
return
mute_role = guild.get_role(db_guild.role_id)
if not mute_role:
return
for db_mute in db_guild.mutes:
member = guild.get_member(db_mute.user_id)
if not member:
continue
yield unmute_member(
member,
mute_role,
guild.get_channel(db_mute.channel_id),
delay_until=db_mute.muted_until
)
def _get_unmutes(self):
with self.bot.session_scope() as session:
# Query for mutes that run before the next iteration
# No need to delete by hand, self.on_guild_member_update() will clean up
next_iteration_timestamp = (datetime.utcnow() + timedelta(seconds=self.check_period))
q = session.query(MuteGuild) \
.join(MuteGuild.mutes) \
.filter(MuteUser.muted_until.isnot(None),
next_iteration_timestamp >= MuteUser.muted_until)
return chain.from_iterable(self._process_guild(db_guild) for db_guild in q)
async def check_mute_timeouts(self):
await self.bot.wait_until_ready()
while True:
unmutes = self._get_unmutes()
await gather(*unmutes)
await sleep(self.check_period)
@BaseCog.listener()
async def on_guild_channel_create(self, channel):
with self.bot.session_scope() as session:
db_guild = session.query(MuteGuild).get(channel.guild.id)
if not db_guild:
return
mute_role = channel.guild.get_role(db_guild.role_id)
if not mute_role:
return
if isinstance(channel, CategoryChannel):
await channel.set_permissions(mute_role, send_messages=False, speak=False)
elif isinstance(channel, TextChannel):
await channel.set_permissions(mute_role, send_messages=False)
else:
await channel.set_permissions(mute_role, speak=False)
@BaseCog.listener()
async def on_guild_role_delete(self, role):
with self.bot.session_scope() as session:
# Delete any bindings if the corresponding role is deleted
# Use Query.delete() to prevent redundant SELECT
# Role ID is indexed so delete is faster than querying by guild ID and comparing
session.query(MuteGuild).filter_by(role_id=role.id).delete(synchronize_session=False)
@BaseCog.listener()
async def on_member_join(self, member):
with self.bot.session_scope() as session:
db_mute = session.query(MuteUser).get((member.id, member.guild.id))
# Do not re-mute if mute should have run out already
# Leave cleanup to self.check_mute_timeouts()
if not db_mute or db_mute.muted_until <= datetime.utcnow():
return
role = member.guild.get_role(db_mute.guild.role_id)
if not role:
return
# Re-mute people who left while muted
await member.add_roles(role, reason='Muted member rejoined')
@BaseCog.listener()
async def on_member_update(self, before, after):
if self.member_is_locked(before):
return # Don't touch locked members
with self.bot.session_scope() as session:
db_guild = session.query(MuteGuild).get(before.guild.id)
if not db_guild:
return
mute_role = before.guild.get_role(db_guild.role_id)
if not mute_role:
return
roles_before = set(before.roles)
roles_after = set(after.roles)
mute_removed = mute_role in (roles_before - roles_after)
mute_added = mute_role in (roles_after - roles_before)
db_mute = session.query(MuteUser).get((before.id, before.guild.id))
if mute_removed and db_mute:
session.delete(db_mute)
# Check if binding exists already to prevent double create
if mute_added and not db_mute:
db_mute = MuteUser(user_id=before.id, guild_id=before.guild.id)
session.add(db_mute)
# Ensure this is neither parsed nor called for anything but the mute command itself
@group(invoke_without_command=True, aliases=['gag'])
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def mute(self, ctx, member: Member, *, duration: to_timedelta = None):
"""
Mute a user from chat, optionally specifying an automatic timeout.
Does nothing if the user is already muted.
Arguments:
- member: Member to mute.
- [optional] duration: Duration of the mute. Defaults to infinite.
Default unit if unspecified is seconds.
Note that it is not possible to explicitly specify infinity as a duration.
Required context: Server
Required permissions:
- Manage Roles
Required bot permissions:
- Manage Roles
"""
db_guild = ctx.session.query(MuteGuild).get(ctx.guild.id)
mute_role = None
if db_guild:
mute_role = ctx.guild.get_role(db_guild.role_id)
if not mute_role:
# No role => create new and save to DB
mute_role = await init_role(ctx, db_guild)
if mute_role in member.roles: # Mute role already there, nothing to do here
return
# Add mute to DB prior to role assigment, member update handler triggers otherwise
is_short_mute = duration and duration.total_seconds() <= self.check_period
if not is_short_mute:
db_mute = MuteUser(user_id=member.id, guild_id=ctx.guild.id)
if duration:
db_mute.muted_until = datetime.utcnow() + duration
db_mute.channel_id = ctx.channel.id
ctx.session.add(db_mute)
with self.lock_member(member): # Lock member until command terminates
await member.add_roles(mute_role, reason='Muted by {}.'.format(ctx.author))
# TODO: Include duration in message
await maybe_send(ctx, 'User {} was muted by {}.'
.format(member.mention, ctx.author.mention))
# User should be muted for less than one check period
# => queue unmute directly and don't touch DB from here
if is_short_mute:
await unmute_member(member, mute_role, ctx.channel, delay_delta=duration)
@mute.command()
@guild_only()
@has_permissions(manage_roles=True)
async def setrole(self, ctx, role: Role):
"""
Set the mute role to use with mute commands and mute detection.
Setting it via this command overwrites any existing role.
However, this command does NOT touch permissions in any way.
Instead, it assumes you have already set up the role's permissions in a satisfactory manner.
If the new role is the same as the old one, this command has no effect.
Internally, this unmarks those previously marked as muted if they do not have the new role.
Inversely, it marks everyone having the new role as muted.
Required context: Server
Required permissions:
- Manage Roles
"""
db_guild = ctx.session.query(MuteGuild).get(ctx.guild.id)
if not db_guild:
db_guild = MuteGuild(guild_id=ctx.guild.id, role_id=role.id)
ctx.session.add(db_guild)
return
if db_guild.role_id == role.id:
return
# Remove usages of old role from DB
role_member_ids = {member.id for member in role.members} # Set for later use
ctx.session.query(MuteUser).filter(
MuteUser.guild_id == ctx.guild.id,
MuteUser.user_id.notin_(list(role_member_ids)) # Make list because SQLAlchemy wants one
).delete(synchronize_session=False)
# Query remaining mutes
existing_db_mute_ids = {
db_mute.user_id
for db_mute in ctx.session.query(MuteUser).filter_by(guild_id=ctx.guild.id)
}
# Make set with members that need to be added to DB
new_mute_member_ids = role_member_ids - existing_db_mute_ids
ctx.session.add_all(
MuteUser(user_id=member_id, guild_id=ctx.guild.id) for member_id in new_mute_member_ids
)
@command()
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def unmute(self, ctx, member: Member):
"""
Unmute a user. Does nothing if the user is not muted.
Required context: Server
Required permissions:
- Manage Roles
Required bot permissions:
- Manage Roles
"""
db_mute = ctx.session.query(MuteUser).get((member.id, ctx.guild.id))
if not db_mute:
return
db_guild = db_mute.guild
mute_role = ctx.guild.get_role(db_guild.role_id)
if not mute_role:
return
# No need to manually delete DB row, member update handler will
await member.remove_roles(mute_role, reason='Explicit unmute by {}.'.format(ctx.author))
await maybe_send(
ctx,
'User {} was unmuted by {}.'.format(member.mention, ctx.author.mention)
)
|
# -*- coding: utf-8 -*-
"""
This module contains the code completion mode and the related classes.
"""
import os
try:
from future.builtins import chr
except:
pass # python 3.2 not supported
import logging
import re
import sys
import time
from pyqode.core.api.mode import Mode
from pyqode.core.backend import NotRunning
from pyqode.qt import QtWidgets, QtCore, QtGui
from pyqode.core.api.utils import DelayJobRunner, memoized, TextHelper
from pyqode.core import backend
def _logger():
return logging.getLogger(__name__)
class CodeCompletionMode(Mode, QtCore.QObject):
""" Provides code completions when typing or when pressing Ctrl+Space.
This mode provides a code completion system which is extensible.
It takes care of running the completion request in a background process
using one or more completion provider and display the results in a
QCompleter.
To add code completion for a specific language, you only need to
implement a new
:class:`pyqode.core.backend.workers.CodeCompletionWorker.Provider`
The completion popup is shown when the user press **ctrl+space** or
automatically while the user is typing some code (this can be configured
using a series of properties).
"""
@property
def trigger_key(self):
"""
The key that triggers code completion (Default is **Space**:
Ctrl + Space).
"""
return self._trigger_key
@trigger_key.setter
def trigger_key(self, value):
self._trigger_key = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(CodeCompletionMode).trigger_key = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def trigger_length(self):
"""
The trigger length defines the word length required to run code
completion.
"""
return self._trigger_len
@trigger_length.setter
def trigger_length(self, value):
self._trigger_len = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(CodeCompletionMode).trigger_length = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def trigger_symbols(self):
"""
Defines the list of symbols that immediately trigger a code completion
requiest. BY default, this list contains the dot character.
For C++, we would add the '->' operator to that list.
"""
return self._trigger_symbols
@trigger_symbols.setter
def trigger_symbols(self, value):
self._trigger_symbols = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(CodeCompletionMode).trigger_symbols = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def case_sensitive(self):
"""
True to performs case sensitive completion matching.
"""
return self._case_sensitive
@case_sensitive.setter
def case_sensitive(self, value):
self._case_sensitive = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(CodeCompletionMode).case_sensitive = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def completion_prefix(self):
"""
Returns the current completion prefix
"""
return self._helper.word_under_cursor(
select_whole_word=False).selectedText().strip()
def __init__(self):
Mode.__init__(self)
QtCore.QObject.__init__(self)
self._current_completion = ""
self._trigger_key = QtCore.Qt.Key_Space
self._trigger_len = 1
self._trigger_symbols = ['.']
self._case_sensitive = False
self._completer = None
self._last_cursor_line = -1
self._last_cursor_column = -1
self._request_id = self._last_request_id = 0
#
# Mode interface
#
def on_install(self, editor):
self._completer = QtWidgets.QCompleter([""], editor)
self._completer.setCompletionMode(self._completer.PopupCompletion)
self._completer.activated.connect(self._insert_completion)
self._completer.highlighted.connect(
self._on_selected_completion_changed)
try:
self._completer.setFilterMode(QtCore.Qt.MatchContains)
except AttributeError:
# not available for Qt < 5.2
pass
self._completer.setModel(QtGui.QStandardItemModel())
self._helper = TextHelper(editor)
Mode.on_install(self, editor)
def on_uninstall(self):
Mode.on_uninstall(self)
self._completer.popup().hide()
self._completer = None
def on_state_changed(self, state):
if state:
self.editor.focused_in.connect(self._on_focus_in)
self.editor.key_pressed.connect(self._on_key_pressed)
self.editor.post_key_pressed.connect(self._on_key_released)
else:
self.editor.focused_in.disconnect(self._on_focus_in)
self.editor.key_pressed.disconnect(self._on_key_pressed)
self.editor.post_key_pressed.disconnect(self._on_key_released)
#
# Slots
#
def _on_key_pressed(self, event):
def _handle_completer_events():
nav_key = self._is_navigation_key(event)
mod = QtCore.Qt.ControlModifier
ctrl = int(event.modifiers() & mod) == mod
# complete
if (event.key() == QtCore.Qt.Key_Enter or
event.key() == QtCore.Qt.Key_Return or
event.key() == QtCore.Qt.Key_Tab):
self._insert_completion(self._current_completion)
self._hide_popup()
event.accept()
# hide
elif (event.key() == QtCore.Qt.Key_Escape or
event.key() == QtCore.Qt.Key_Backtab or
nav_key and ctrl):
self._hide_popup()
# move into list
elif event.key() == QtCore.Qt.Key_Home:
self._show_popup(index=0)
event.accept()
elif event.key() == QtCore.Qt.Key_End:
self._show_popup(index=self._completer.completionCount() - 1)
event.accept()
_logger().debug('key pressed: %s' % event.text())
is_shortcut = self._is_shortcut(event)
# handle completer popup events ourselves
if self._completer.popup().isVisible():
_handle_completer_events()
if is_shortcut:
event.accept()
if is_shortcut:
self._force_request()
self.request_completion()
event.accept()
def _on_key_released(self, event):
if self._is_shortcut(event):
return
_logger().debug('key released:%s' % event.text())
word = self._helper.word_under_cursor(
select_whole_word=True).selectedText()
_logger().debug('word: %s' % word)
if event.text():
if event.key() in [QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Delete] \
and (not self._is_popup_visible() or word == ''):
self._last_cursor_line = -1
self._last_cursor_column = -1
self._hide_popup()
return
if event.key() == QtCore.Qt.Key_Return:
return
if event.text() in self._trigger_symbols:
# symbol trigger, force request
self._force_request()
self.request_completion()
elif len(word) >= self._trigger_len and event.text() not in [
' ', ',', ';', ':', '=', '*', '+', '-', '/']:
# Lenght trigger
if int(event.modifiers()) in [
QtCore.Qt.NoModifier, QtCore.Qt.ShiftModifier]:
self.request_completion()
else:
self._hide_popup()
else:
self._hide_popup()
else:
if self._is_navigation_key(event):
if self._is_popup_visible() and word:
self._show_popup()
return
else:
self._hide_popup()
def _on_focus_in(self, event):
"""
Resets completer's widget
:param event: QFocusEvents
"""
self._completer.setWidget(self.editor)
def _on_selected_completion_changed(self, completion):
self._current_completion = completion
def _insert_completion(self, completion):
cursor = self._helper.word_under_cursor(select_whole_word=False)
cursor.insertText(completion)
self.editor.setTextCursor(cursor)
def _on_results_available(self, results):
_logger().debug("completion results (completions=%r), prefix=%s",
results, self.completion_prefix)
context = results[0]
results = results[1:]
line, column, request_id = context
_logger().debug('request context: %r', context)
_logger().debug('latest context: %r', (self._last_cursor_line,
self._last_cursor_column,
self._request_id))
self._last_request_id = request_id
if (line == self._last_cursor_line and
column == self._last_cursor_column):
if self.editor:
all_results = []
for res in results:
all_results += res
self._show_completions(all_results)
else:
_logger().debug('outdated request, dropping')
#
# Helper methods
#
def _is_popup_visible(self):
return self._completer.popup().isVisible()
def _force_request(self):
self._last_cursor_column = -1
self._last_cursor_line = -1
def _in_disabled_zone(self):
helper = TextHelper(self.editor)
tc = self.editor.textCursor()
while tc.atBlockEnd() and not tc.atBlockStart() and tc.position():
tc.movePosition(tc.Left)
return TextHelper(self.editor).is_comment_or_string(tc)
def request_completion(self):
if self._in_disabled_zone():
return False
line = self._helper.current_line_nbr()
column = self._helper.current_column_nbr() - \
len(self.completion_prefix)
same_context = (line == self._last_cursor_line and
column == self._last_cursor_column)
if same_context:
if self._request_id - 1== self._last_request_id:
# context has not changed and the correct results can be
# directly shown
_logger().debug('request completion ignored, context has not '
'changed')
self._show_popup()
else:
# same context but result not yet available
pass
return True
else:
_logger().debug('requesting completion')
data = {
'code': self.editor.toPlainText(),
'line': line,
'column': column,
'path': self.editor.file.path,
'encoding': self.editor.file.encoding,
'prefix': self.completion_prefix,
'request_id': self._request_id
}
try:
self.editor.backend.send_request(
backend.CodeCompletionWorker, args=data,
on_receive=self._on_results_available)
except NotRunning:
_logger().exception('failed to send the completion request')
return False
else:
_logger().debug('request sent: %r', data)
self._last_cursor_column = column
self._last_cursor_line = line
self._request_id += 1
return True
def _is_shortcut(self, event):
"""
Checks if the event's key and modifiers make the completion shortcut
(Ctrl+Space)
:param event: QKeyEvent
:return: bool
"""
modifier = (QtCore.Qt.MetaModifier if sys.platform == 'darwin' else
QtCore.Qt.ControlModifier)
valid_modifier = int(event.modifiers() & modifier) == modifier
valid_key = event.key() == self._trigger_key
return valid_key and valid_modifier
def _hide_popup(self):
"""
Hides the completer popup
"""
_logger().debug('hide popup')
if (self._completer.popup() is not None and
self._completer.popup().isVisible()):
self._completer.popup().hide()
def _get_popup_rect(self):
cursor_rec = self.editor.cursorRect()
char_width = self.editor.fontMetrics().width('A')
prefix_len = (len(self.completion_prefix) * char_width)
cursor_rec.translate(
self.editor.panels.margin_size() - prefix_len,
self.editor.panels.margin_size(0) + 5)
popup = self._completer.popup()
width = popup.verticalScrollBar().sizeHint().width()
cursor_rec.setWidth(
self._completer.popup().sizeHintForColumn(0) + width)
return cursor_rec
def _show_popup(self, index=0):
"""
Shows the popup at the specified index.
:param index: index
:return:
"""
full_prefix = self._helper.word_under_cursor(
select_whole_word=False).selectedText()
if self._case_sensitive:
self._completer.setCaseSensitivity(QtCore.Qt.CaseSensitive)
else:
self._completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
# set prefix
self._completer.setCompletionPrefix(self.completion_prefix)
cnt = self._completer.completionCount()
selected = self._completer.currentCompletion()
if (full_prefix == selected) and cnt == 1:
_logger().debug('user already typed the only completion that we '
'have')
self._hide_popup()
else:
# show the completion list
if self.editor.isVisible():
try:
os.environ['PYQODE_CORE_TESTSUITE']
except KeyError:
pass
else:
# not sure why, but this is needed when running the
# test suite on travis, otherwise it segfaults
self._completer.setWidget(self.editor)
self._completer.complete(self._get_popup_rect())
self._completer.popup().setCurrentIndex(
self._completer.completionModel().index(index, 0))
_logger().debug("popup shown: %r" % self._completer.popup().isVisible())
else:
_logger().debug('cannot show popup, editor is not visible')
def _show_completions(self, completions):
_logger().debug("showing %d completions" % len(completions))
_logger().debug('popup state: %r', self._completer.popup().isVisible())
t = time.time()
self._update_model(completions)
elapsed = time.time() - t
_logger().debug("completion model updated: %d items in %f seconds",
self._completer.model().rowCount(), elapsed)
self._show_popup()
def _update_model(self, completions):
"""
Creates a QStandardModel that holds the suggestion from the completion
models for the QCompleter
:param completionPrefix:
"""
# build the completion model
cc_model = QtGui.QStandardItemModel()
for completion in completions:
name = completion['name']
item = QtGui.QStandardItem()
item.setData(name, QtCore.Qt.DisplayRole)
if 'icon' in completion:
item.setData(QtGui.QIcon(completion['icon']),
QtCore.Qt.DecorationRole)
cc_model.appendRow(item)
self._completer.setModel(cc_model)
return cc_model
@staticmethod
def _is_navigation_key(event):
return (event.key() == QtCore.Qt.Key_Backspace or
event.key() == QtCore.Qt.Key_Back or
event.key() == QtCore.Qt.Key_Delete or
event.key() == QtCore.Qt.Key_Left or
event.key() == QtCore.Qt.Key_Right or
event.key() == QtCore.Qt.Key_Up or
event.key() == QtCore.Qt.Key_Down or
event.key() == QtCore.Qt.Key_Space)
Fix a few corner cases and cleanup code (PEP8, unused imports)
# -*- coding: utf-8 -*-
"""
This module contains the code completion mode and the related classes.
"""
import os
import logging
import sys
import time
from pyqode.core.api.mode import Mode
from pyqode.core.backend import NotRunning
from pyqode.qt import QtWidgets, QtCore, QtGui
from pyqode.core.api.utils import TextHelper
from pyqode.core import backend
def _logger():
return logging.getLogger(__name__)
class CodeCompletionMode(Mode, QtCore.QObject):
""" Provides code completions when typing or when pressing Ctrl+Space.
This mode provides a code completion system which is extensible.
It takes care of running the completion request in a background process
using one or more completion provider and display the results in a
QCompleter.
To add code completion for a specific language, you only need to
implement a new
:class:`pyqode.core.backend.workers.CodeCompletionWorker.Provider`
The completion popup is shown when the user press **ctrl+space** or
automatically while the user is typing some code (this can be configured
using a series of properties).
"""
@property
def trigger_key(self):
"""
The key that triggers code completion (Default is **Space**:
Ctrl + Space).
"""
return self._trigger_key
@trigger_key.setter
def trigger_key(self, value):
self._trigger_key = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(CodeCompletionMode).trigger_key = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def trigger_length(self):
"""
The trigger length defines the word length required to run code
completion.
"""
return self._trigger_len
@trigger_length.setter
def trigger_length(self, value):
self._trigger_len = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(CodeCompletionMode).trigger_length = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def trigger_symbols(self):
"""
Defines the list of symbols that immediately trigger a code completion
requiest. BY default, this list contains the dot character.
For C++, we would add the '->' operator to that list.
"""
return self._trigger_symbols
@trigger_symbols.setter
def trigger_symbols(self, value):
self._trigger_symbols = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(CodeCompletionMode).trigger_symbols = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def case_sensitive(self):
"""
True to performs case sensitive completion matching.
"""
return self._case_sensitive
@case_sensitive.setter
def case_sensitive(self, value):
self._case_sensitive = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(CodeCompletionMode).case_sensitive = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def completion_prefix(self):
"""
Returns the current completion prefix
"""
return self._helper.word_under_cursor(
select_whole_word=False).selectedText().strip()
def __init__(self):
Mode.__init__(self)
QtCore.QObject.__init__(self)
self._current_completion = ""
self._trigger_key = QtCore.Qt.Key_Space
self._trigger_len = 1
self._trigger_symbols = ['.']
self._case_sensitive = False
self._completer = None
self._last_cursor_line = -1
self._last_cursor_column = -1
self._request_id = self._last_request_id = 0
#
# Mode interface
#
def on_install(self, editor):
self._completer = QtWidgets.QCompleter([""], editor)
self._completer.setCompletionMode(self._completer.PopupCompletion)
self._completer.activated.connect(self._insert_completion)
self._completer.highlighted.connect(
self._on_selected_completion_changed)
try:
self._completer.setFilterMode(QtCore.Qt.MatchContains)
except AttributeError:
# not available for Qt < 5.2
pass
self._completer.setModel(QtGui.QStandardItemModel())
self._helper = TextHelper(editor)
Mode.on_install(self, editor)
def on_uninstall(self):
Mode.on_uninstall(self)
self._completer.popup().hide()
self._completer = None
def on_state_changed(self, state):
if state:
self.editor.focused_in.connect(self._on_focus_in)
self.editor.key_pressed.connect(self._on_key_pressed)
self.editor.post_key_pressed.connect(self._on_key_released)
else:
self.editor.focused_in.disconnect(self._on_focus_in)
self.editor.key_pressed.disconnect(self._on_key_pressed)
self.editor.post_key_pressed.disconnect(self._on_key_released)
#
# Slots
#
def _on_key_pressed(self, event):
def _handle_completer_events():
nav_key = self._is_navigation_key(event)
mod = QtCore.Qt.ControlModifier
ctrl = int(event.modifiers() & mod) == mod
# complete
if (event.key() == QtCore.Qt.Key_Enter or
event.key() == QtCore.Qt.Key_Return or
event.key() == QtCore.Qt.Key_Tab):
self._insert_completion(self._current_completion)
self._hide_popup()
event.accept()
# hide
elif (event.key() == QtCore.Qt.Key_Escape or
event.key() == QtCore.Qt.Key_Backtab or
nav_key and ctrl):
self._hide_popup()
# move into list
elif event.key() == QtCore.Qt.Key_Home:
self._show_popup(index=0)
event.accept()
elif event.key() == QtCore.Qt.Key_End:
self._show_popup(index=self._completer.completionCount() - 1)
event.accept()
_logger().warn('key pressed: %s' % event.text())
is_shortcut = self._is_shortcut(event)
# handle completer popup events ourselves
if self._completer.popup().isVisible():
_handle_completer_events()
if is_shortcut:
event.accept()
if is_shortcut:
self._force_request()
self.request_completion()
event.accept()
def _on_key_released(self, event):
if self._is_shortcut(event):
return
_logger().warn('key released:%s' % event.text())
word = self._helper.word_under_cursor(
select_whole_word=True).selectedText()
_logger().warn('word: %s' % word)
if event.text():
if event.key() in [QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Delete] \
and (not self._is_popup_visible() or word == ''):
self._force_request()
return
if event.key() == QtCore.Qt.Key_Return:
return
if event.text() in self._trigger_symbols:
# symbol trigger, force request
self._force_request()
self.request_completion()
elif len(word) >= self._trigger_len and event.text() not in [
' ', ',', ';', ':', '=', '*', '+', '-', '/',
'(', ')', '{', '}', '[', ']', '\t', '\n']:
# Lenght trigger
if int(event.modifiers()) in [
QtCore.Qt.NoModifier, QtCore.Qt.ShiftModifier]:
self.request_completion()
else:
self._hide_popup()
else:
self._hide_popup()
else:
if self._is_navigation_key(event):
if self._is_popup_visible() and word:
self._show_popup()
return
else:
self._hide_popup()
def _on_focus_in(self, event):
"""
Resets completer's widget
:param event: QFocusEvents
"""
self._completer.setWidget(self.editor)
def _on_selected_completion_changed(self, completion):
self._current_completion = completion
def _insert_completion(self, completion):
cursor = self._helper.word_under_cursor(select_whole_word=False)
cursor.insertText(completion)
self.editor.setTextCursor(cursor)
def _on_results_available(self, results):
_logger().warn("completion results (completions=%r), prefix=%s",
results, self.completion_prefix)
context = results[0]
results = results[1:]
line, column, request_id = context
_logger().warn('request context: %r', context)
_logger().warn('latest context: %r', (self._last_cursor_line,
self._last_cursor_column,
self._request_id))
self._last_request_id = request_id
if (line == self._last_cursor_line and
column == self._last_cursor_column):
if self.editor:
all_results = []
for res in results:
all_results += res
self._show_completions(all_results)
else:
_logger().warn('outdated request, dropping')
#
# Helper methods
#
def _is_popup_visible(self):
return self._completer.popup().isVisible()
def _force_request(self):
self._last_cursor_line = -1
self._last_cursor_column = -1
self._hide_popup()
def _in_disabled_zone(self):
tc = self.editor.textCursor()
while tc.atBlockEnd() and not tc.atBlockStart() and tc.position():
tc.movePosition(tc.Left)
return TextHelper(self.editor).is_comment_or_string(tc)
def request_completion(self):
if self._in_disabled_zone():
return False
line = self._helper.current_line_nbr()
column = self._helper.current_column_nbr() - \
len(self.completion_prefix)
same_context = (line == self._last_cursor_line and
column == self._last_cursor_column)
if same_context:
if self._request_id - 1 == self._last_request_id:
# context has not changed and the correct results can be
# directly shown
_logger().warn('request completion ignored, context has not '
'changed')
self._show_popup()
else:
# same context but result not yet available
pass
return True
else:
_logger().warn('requesting completion')
data = {
'code': self.editor.toPlainText(),
'line': line,
'column': column,
'path': self.editor.file.path,
'encoding': self.editor.file.encoding,
'prefix': self.completion_prefix,
'request_id': self._request_id
}
try:
self.editor.backend.send_request(
backend.CodeCompletionWorker, args=data,
on_receive=self._on_results_available)
except NotRunning:
_logger().exception('failed to send the completion request')
return False
else:
_logger().warn('request sent: %r', data)
self._last_cursor_column = column
self._last_cursor_line = line
self._request_id += 1
return True
def _is_shortcut(self, event):
"""
Checks if the event's key and modifiers make the completion shortcut
(Ctrl+Space)
:param event: QKeyEvent
:return: bool
"""
modifier = (QtCore.Qt.MetaModifier if sys.platform == 'darwin' else
QtCore.Qt.ControlModifier)
valid_modifier = int(event.modifiers() & modifier) == modifier
valid_key = event.key() == self._trigger_key
return valid_key and valid_modifier
def _hide_popup(self):
"""
Hides the completer popup
"""
_logger().warn('hide popup')
if (self._completer.popup() is not None and
self._completer.popup().isVisible()):
self._completer.popup().hide()
def _get_popup_rect(self):
cursor_rec = self.editor.cursorRect()
char_width = self.editor.fontMetrics().width('A')
prefix_len = (len(self.completion_prefix) * char_width)
cursor_rec.translate(
self.editor.panels.margin_size() - prefix_len,
self.editor.panels.margin_size(0) + 5)
popup = self._completer.popup()
width = popup.verticalScrollBar().sizeHint().width()
cursor_rec.setWidth(
self._completer.popup().sizeHintForColumn(0) + width)
return cursor_rec
def _show_popup(self, index=0):
"""
Shows the popup at the specified index.
:param index: index
:return:
"""
full_prefix = self._helper.word_under_cursor(
select_whole_word=False).selectedText()
if self._case_sensitive:
self._completer.setCaseSensitivity(QtCore.Qt.CaseSensitive)
else:
self._completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
# set prefix
self._completer.setCompletionPrefix(self.completion_prefix)
cnt = self._completer.completionCount()
selected = self._completer.currentCompletion()
if (full_prefix == selected) and cnt == 1:
_logger().warn('user already typed the only completion that we '
'have')
self._hide_popup()
else:
# show the completion list
if self.editor.isVisible():
if self._completer.widget() != self.editor:
print('set widget')
self._completer.setWidget(self.editor)
self._completer.complete(self._get_popup_rect())
self._completer.popup().setCurrentIndex(
self._completer.completionModel().index(index, 0))
_logger().warn(
"popup shown: %r" % self._completer.popup().isVisible())
else:
_logger().warn('cannot show popup, editor is not visible')
def _show_completions(self, completions):
_logger().warn("showing %d completions" % len(completions))
_logger().warn('popup state: %r', self._completer.popup().isVisible())
t = time.time()
self._update_model(completions)
elapsed = time.time() - t
_logger().warn("completion model updated: %d items in %f seconds",
self._completer.model().rowCount(), elapsed)
self._show_popup()
def _update_model(self, completions):
"""
Creates a QStandardModel that holds the suggestion from the completion
models for the QCompleter
:param completionPrefix:
"""
# build the completion model
cc_model = QtGui.QStandardItemModel()
for completion in completions:
name = completion['name']
item = QtGui.QStandardItem()
item.setData(name, QtCore.Qt.DisplayRole)
if 'icon' in completion:
item.setData(QtGui.QIcon(completion['icon']),
QtCore.Qt.DecorationRole)
cc_model.appendRow(item)
self._completer.setModel(cc_model)
return cc_model
@staticmethod
def _is_navigation_key(event):
return (event.key() == QtCore.Qt.Key_Backspace or
event.key() == QtCore.Qt.Key_Back or
event.key() == QtCore.Qt.Key_Delete or
event.key() == QtCore.Qt.Key_Left or
event.key() == QtCore.Qt.Key_Right or
event.key() == QtCore.Qt.Key_Up or
event.key() == QtCore.Qt.Key_Down or
event.key() == QtCore.Qt.Key_Space)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from contextlib import contextmanager
import copy
import threading
import time
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code.
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
iter_vars = graph.get_collection(_TPU_ESTIMATOR)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(_TPU_ESTIMATOR,
reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[_TPU_ESTIMATOR],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps runnining in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
class _TPUContext(object):
"""A context holds immutable states of TPU computation.
This immutable object holds TPUEstimator config, train/eval batch size, and
`TPUEstimator.use_tpu`, which is expected to be passed around. It also
provides utility functions, basded on the current state, to determine other
information commonly required by TPU computation, such as TPU device names,
TPU hosts, shard batch size, etc.
N.B. As `mode` is not immutable state in Estimator, but essential to
distinguish between TPU training and evaluation, a common usage for
_TPUContext with `mode` is as follows:
```
with _ctx.with_mode(mode) as ctx:
if ctx.is_running_on_cpu():
...
```
"""
def __init__(self, config, train_batch_size, eval_batch_size, use_tpu):
self._config = config
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._use_tpu = use_tpu
self._num_shards_or_none = self._config.tpu_config.num_shards
self._mode = None
def _assert_mode(self):
if self._mode is None:
raise RuntimeError(
'`mode` needs to be set via contextmanager `with_mode`.')
return self._mode
@property
def num_of_cores_per_host(self):
num_cores = self.num_cores
return min(num_cores, 8)
@contextmanager
def with_mode(self, mode):
new_ctx = copy.copy(self) # Shallow copy is enough.
new_ctx._mode = mode # pylint: disable=protected-access
yield new_ctx
@property
def mode(self):
return self._assert_mode()
@property
def num_cores(self):
# TODO(xiejw): Adds lazy num_shards initialization.
return self._num_shards_or_none
@property
def num_hosts(self):
return self.num_cores // self.num_of_cores_per_host
@property
def config(self):
return self._config
def is_input_sharded_per_core(self):
"""Return true if input_fn is invoked per-core (other than per-host)."""
self._assert_mode()
return (self._mode == model_fn_lib.ModeKeys.TRAIN and
not self._config.tpu_config.per_host_input_for_training)
def is_running_on_cpu(self):
"""Determines whether the input_fn and model_fn should be invoked on CPU."""
mode = self._assert_mode()
return ((not self._use_tpu) or mode == model_fn_lib.ModeKeys.PREDICT or
(mode == model_fn_lib.ModeKeys.EVAL and
self._eval_batch_size is None))
@property
def batch_size_for_input_fn(self):
"""Returns the shard batch size for `input_fn`."""
mode = self._assert_mode()
# Special case for eval.
if mode == model_fn_lib.ModeKeys.EVAL and self._eval_batch_size is None:
return None
if self.is_running_on_cpu():
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size
if mode == model_fn_lib.ModeKeys.EVAL:
return self._eval_batch_size
return None
global_batch_size = (self._train_batch_size if
mode == model_fn_lib.ModeKeys.TRAIN
else self._eval_batch_size)
# On TPU
if self.is_input_sharded_per_core():
return global_batch_size // self.num_cores
else:
return global_batch_size // self.num_hosts
@property
def batch_size_for_model_fn(self):
"""Returns the shard batch size for `model_fn`."""
mode = self._assert_mode()
# Special case for eval.
if mode == model_fn_lib.ModeKeys.EVAL and self._eval_batch_size is None:
return None
if self.is_running_on_cpu():
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size
if mode == model_fn_lib.ModeKeys.EVAL:
return self._eval_batch_size
return None
# On TPU. always sharded per core.
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size // self.num_cores
else:
return self._eval_batch_size // self.num_cores
@property
def master_job(self):
"""Returns the job name to use to place TPU computations on.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
run_config = self._config
# If the user specifies the tpu_job_name, use that.
if run_config.tpu_config.tpu_job_name:
return run_config.tpu_config.tpu_job_name
# The tpu job is determined by the run_config. Right now, this method is
# required as tpu_config is not part of the RunConfig.
mode = self._assert_mode()
master = (run_config.evaluation_master if mode == model_fn_lib.ModeKeys.EVAL
else run_config.master)
if master in _LOCAL_MASTERS:
return None
if (not run_config.session_config or
not run_config.session_config.cluster_def.job):
return _DEFAULT_JOB_NAME
cluster_def = run_config.session_config.cluster_def
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part '
'of your TPUConfig.')
@property
def tpu_host_placement_function(self):
"""Returns the TPU host place function."""
master = self.master_job
def _placement_function(_sentinal=None, core_id=None, host_id=None): # pylint: disable=invalid-name
assert _sentinal is None
if core_id is not None and host_id is not None:
raise RuntimeError(
'core_id and host_id can have only one non-None value.')
if master is None:
return '/replica:0/task:0/device:CPU:0'
else:
# This assumes that if using more than 8 shards,
# the job configuration varies 'task'.
if core_id is not None:
host_id = core_id / 8
return '/job:%s/task:%d/device:CPU:0' % (master, host_id)
return _placement_function
@property
def tpu_device_placement_function(self):
master = self.master_job
job_device = '' if master is None else ('/job:%s' % master)
def _placement_function(i):
return '%s/task:%d/device:TPU:%d' % (job_device, i / 8, i % 8)
return _placement_function
@property
def tpu_ordinal_function(self):
"""Returns the TPU ordinal fn."""
def _tpu_ordinal_function(index):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
index: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
return index % 8
return _tpu_ordinal_function
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs',
'scaffold_fn'])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
${tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
if eval_metrics is not None:
_EvalMetrics.validate(eval_metrics)
return super(TPUEstimatorSpec, cls).__new__(cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
eval_metric_ops = _EvalMetrics.to_metric_metric_ops_for_cpu(
self.eval_metrics)
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold)
class _InfeedOutfeedThreadBaseController(object):
"""This wraps the infeed/outfeed thread and stops when Estimator finishes."""
def __init__(self, thd):
self._signal_queue = Queue.Queue()
thd.daemon = True
thd.start()
self._thd = thd
def block_and_get_signal(self):
return self._signal_queue.get()
def send_next_batch_signal(self, signal=_SIGNAL.NEXT_BATCH):
self._signal_queue.put(signal)
def join(self):
self._signal_queue.put(_SIGNAL.STOP)
self._thd.join()
class _OutfeedThreadController(_InfeedOutfeedThreadBaseController):
"""This wraps the outfeed thread and stops when Estimator finishes."""
def __init__(self, session, dequeue_ops):
super(_OutfeedThreadController, self).__init__(
threading.Thread(target=self._execute_dequeue_ops,
args=(session, dequeue_ops)))
def _execute_dequeue_ops(self, session, dequeue_ops):
count = 0
while True:
signal = self.block_and_get_signal()
if signal == _SIGNAL.STOP:
logging.info('Stop outfeed thread.')
return
iterations = signal
for i in range(iterations):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(dequeue_ops)
count += 1
def join(self):
logging.info('Waiting for Outfeed Thread to exit.')
super(_OutfeedThreadController, self).join()
class _InfeedThreadController(_InfeedOutfeedThreadBaseController):
"""This wraps the infeed thread and stops when Estimator finishes."""
def __init__(self, session, enqueue_ops, initial_infeed_sleep_secs):
super(_InfeedThreadController, self).__init__(
threading.Thread(
target=self._input_thread_fn_for_loading,
args=(session, enqueue_ops, initial_infeed_sleep_secs)))
def _input_thread_fn_for_loading(self, session, enqueue_ops,
initial_infeed_sleep_secs):
count = 0
if initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
initial_infeed_sleep_secs)
time.sleep(initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
try:
while True:
signal = self._signal_queue.get()
if signal == _SIGNAL.STOP:
logging.info('Stop Infeed input thread.')
return
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
# Enqueue batches for next loop.
session.run(enqueue_ops)
else:
iterations = signal
for i in range(iterations):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(enqueue_ops)
count += 1
except Exception: # pylint: disable=broad-except
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# However, sleep for 2 minutes before explicit closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
logging.error(
'Failed running infeed, closing session.\n'
'You may see an exception from your main session after this. '
'Sleep for 2 minutes before close Session from infeed thread to '
'allow the main thread returning an error first, if any.',
exc_info=1
)
time.sleep(120)
logging.error('Closing the failed session.')
session.close()
def join(self):
logging.info('Waiting for Infeed Thread to exit.')
super(_InfeedThreadController, self).join()
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self, ctx, enqueue_ops, dequeue_ops=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_op = [tpu.initialize_system(job=self._master_job)]
self._finalize_op = [tpu.shutdown_system(job=self._master_job)]
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_op,
options=config_pb2.RunOptions(timeout_in_ms=5*60*1000))
logging.info('Start infeed thread controller')
self._infeed_thd_controller = _InfeedThreadController(
session, self._enqueue_ops, self._initial_infeed_sleep_secs)
if self._dequeue_ops is not None:
logging.info('Start outfeed thread controller')
self._outfeed_thd_controller = _OutfeedThreadController(
session, self._dequeue_ops)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_thd_controller.send_next_batch_signal(iterations)
if self._dequeue_ops is not None:
# TODO(xiejw): Refactor the outfeed dequeue into tf.while_loop.
logging.info(
'Dequeue next (%d) batch(es) of data from outfeed.', iterations)
self._outfeed_thd_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_thd_controller.join()
if self._dequeue_ops is not None:
logging.info('Stop output thread controller')
self._outfeed_thd_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_op)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations,
session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
inputs = input_fn()
if isinstance(inputs, tuple):
features, labels = inputs
else:
features, labels = inputs, None
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
with ops.device(device):
num_cores_per_host = ctx.num_of_cores_per_host
inputs = input_fn()
if isinstance(inputs, tuple):
features, labels = inputs
else:
features, labels = inputs, None
inputs_structure_recorder.validate_and_record_structure(
features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_cores_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device))
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in `_TPUContext`, it
invokes `input_fn` for all cores (usually multi-host TPU training) or for one
host (usually for single-host TPU evaluation), and sends all `features` and
`labels` returned by `input_fn` to TPU infeed. For per-core invocation,
`features` and `labels` are piped to infeed directly, one tuple for each
core. For per-host invocation, `features` and `labels` are split at host
(with respect to `batch_axis`) and piped to all cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separatedly to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return tensor_or_dict.keys() if isinstance(tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([features[name]
for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (len(self._feature_names) if self._feature_names
else 1)
if self._has_labels:
expected_num_labels = (len(self._label_names) if self._label_names
else 1)
else:
expected_num_labels = 0
expected_num_tensors = expected_num_features + expected_num_labels
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
unflattened_label = dict(zip(self._label_names,
flattened_inputs[expected_num_features:]))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
return unflattened_features, unflattened_label
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_TPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops = self._invoke_input_fn_and_record_structure()
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
values = self._infeed_queue.generate_dequeue_op()
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
enqueue_ops.append(_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
self._batch_axis, host_device))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
enqueue_ops.append(_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels):
return self._call_model_fn(features, labels)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A Fn representing the train step for TPU.
"""
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
features, labels = dequeue_fn()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, TPUEstimatorSpec):
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
with ops.control_dependencies([train_op]):
return array_ops.identity(loss)
return train_step, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn and eval_metrics. The eval_fn representing the eval
step for TPU. and eval_metrics is an `_EvalMetrics` instance.
"""
eval_metrics = _EvalMetrics(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
features, labels = dequeue_fn()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
eval_metrics.record(tpu_estimator_spec)
outfeed_ops = tpu_ops.outfeed_enqueue_tuple(eval_metrics.outfeed_tensors)
with ops.control_dependencies([outfeed_ops]):
return math_ops.add(total_loss, loss)
return eval_step, eval_metrics, captured_scaffold_fn
def _call_model_fn(self, features, labels):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError(
'model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
params[_BATCH_SIZE_KEY] = batch_size_for_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu() and
isinstance(estimator_spec, TPUEstimatorSpec)):
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _EvalMetrics(object):
"""Class wraps TPUEstimator.eval_metrics."""
def __init__(self, ctx):
self._ctx = ctx
self._metric_fn = None
self._is_dict = False
self._tensor_keys = []
self._tensors = []
self._tensor_dtypes = []
self._tensor_shapes = []
self._recorded = False
@staticmethod
def validate(eval_metrics):
"""Validates the `eval_metrics` in `TPUEstimatorSpec`."""
if not isinstance(eval_metrics, (tuple, list)):
raise ValueError('eval_metrics should be tuple or list')
if len(eval_metrics) != 2:
raise ValueError('eval_metrics should have two elements.')
if not callable(eval_metrics[0]):
raise TypeError('eval_metrics[0] should be callable.')
if not isinstance(eval_metrics[1], (tuple, list, dict)):
raise ValueError('eval_metrics[1] should be tuple or list, or dict.')
if isinstance(eval_metrics[1], (tuple, list)):
fn_args = util.fn_args(eval_metrics[0])
if len(eval_metrics[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.eval_metrics, length of tensors does not '
'match method args of metric_fn.')
@staticmethod
def to_metric_metric_ops_for_cpu(eval_metrics):
"""Converts `TPUEstimatorSpec.eval_metrics` to `eval_metric_ops` for CPU."""
if not eval_metrics:
return None
_EvalMetrics.validate(eval_metrics)
metric_fn, tensors = eval_metrics
if isinstance(tensors, (tuple, list)):
return metric_fn(*tensors)
else:
# Must be dict.
try:
return metric_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling metric_fn for evalution: %s. '
'It is likely the tensors (eval_metrics[1]) do not match the '
'metric_fn arguments', e)
raise e
def record(self, spec):
"""Records the eval_metrics structure in `spec`."""
if self._recorded:
raise RuntimeError('Eval metrics have been recorded already.')
self._metric_fn, tensor_list_or_dict = spec.eval_metrics
if isinstance(tensor_list_or_dict, dict):
self._is_dict = True
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys.append(key)
self._tensors.append(tensor)
self._tensor_dtypes.append(tensor.dtype)
self._tensor_shapes.append(tensor.shape)
else:
# List or tuple.
self._is_dict = False
self._tensors = tensor_list_or_dict
for tensor in tensor_list_or_dict:
self._tensor_dtypes.append(tensor.dtype)
self._tensor_shapes.append(tensor.shape)
self._recorded = True
@property
def outfeed_tensors(self):
if not self._recorded:
raise RuntimeError('Eval metrics have not been recorded yet')
return self._tensors
def to_metric_metric_ops_for_tpu(self, dummy_update_op):
"""Creates the eval_metric_ops now based on the TPU outfeed.
`eval_metric_ops` is defined in `EstimatorSpec`. From all shards, tensors
are dequeued from outfeed and then concatenated (along batch size dimension)
to form global-like tensors. All global-like tensors are passed to the
metric fn.
Args:
dummy_update_op: A dummy update op.
Returns:
A tuple of (`eval_metric_ops` and `update_ops`), where `update_ops` should
be invoked in Outfeed dequeue thread, which drive the outfeed dequeue and
update the state of metrics.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
num_cores = self._ctx.num_cores
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
for i in xrange(len(self._tensors)):
dequeue_ops.append([])
# Outfeed ops execute on each JF node.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(num_cores):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=self._tensor_dtypes, shapes=self._tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# It is assumed evaluation always happends on single host TPU system. So,
# place all ops on tpu host if possible.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preseve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._is_dict:
dequeue_ops = dict(zip(self._tensor_keys, dequeue_ops))
try:
eval_metric_ops = self._metric_fn(**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling metric_fn for evalution: %s. '
'It is likely the tensors (eval_metrics[1]) do not match the '
'metric_fn arguments', e)
raise e
else:
eval_metric_ops = self._metric_fn(*dequeue_ops)
eval_update_ops = []
for k, v in eval_metric_ops.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
return eval_metric_ops, eval_update_ops
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
If `use_tpu` is false, all training, evaluation, and predict are executed on
CPU.
For training, TPUEstimator transforms a global batch size in params to a
per-shard batch size when calling the `input_fn` and `model_fn`. Users should
specify `train_batch_size` in constructor, and then get the batch size for
each shard in `input_fn` and `model_fn` by `params['batch_size']`. If
`TPUConfig.per_host_input_for_training` is `True`, `input_fn` is invoked per
host rather than per core. In this case, a global batch size is transformed a
per-host batch size in params for `input_fn`, but `model_fn` still gets
per-core batch size.
For evaluation, if `eval_batch_size` is None, it is executed on CPU, even if
`use_tpu` is `True`. If `eval_batch_size` is not `None`, it is executed on
TPU, which is an experimental feature. In this case, `model_fn` should return
`TPUEstimatorSpec` instead of `EstimatorSpec`, which expects the
`eval_metrics` for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
Current limitations:
1. TPU evaluation only works on single host.
2. `input_fn` for evaluation should not throw OutOfRange error for all
evaluation steps and all batches should have the same size.
Example (MNIST):
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Predict support on TPU is not yet implemented. So, `predict` and
`export_savedmodel` are executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training respects this bit.
- If true, see `eval_batch_size` for evaluate support.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by
`config.tpu_config.num_shards`.
eval_batch_size: An int representing the global training batch size.
Currently, if `None`, evaluation is still executed on CPU (even when
`use_tpu` is True). In near future, `use_tpu` will be the only option to
switch between TPU/CPU evaluation.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError(
'{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
if not isinstance(train_batch_size, int):
raise ValueError('`train_batch_size` must be an int')
if train_batch_size < 1:
raise ValueError('`train_batch_size` must be positive')
# The specified batch size is the batch size for the entire computation.
# The input_fn and model_fn are called per-shard, so we want to calculate
# the per-shard batch size and pass that.
if train_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'train batch size {} must be divisible by number of shards {}'
.format(train_batch_size, config.tpu_config.num_shards))
if eval_batch_size is not None:
if config.tpu_config.num_shards > 8:
raise NotImplementedError(
'TPU evaluation is only supported with one host.')
if eval_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'eval batch size {} must be divisible by number of shards {}'
.format(eval_batch_size, config.tpu_config.num_shards))
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _TPUContext are immutable.
self._ctx = _TPUContext(self._config, train_batch_size, eval_batch_size,
use_tpu)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [_TPUStopAtStepHook(self._iterations_per_training_loop,
steps, max_steps)]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
hooks = []
hooks.append(evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps))
hooks.append(_SetEvalIterationsHook(steps))
return hooks
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn
if ctx.is_running_on_cpu():
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn():
return input_fn(**kwargs)
return _input_fn
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# TODO(jhseu): Move to PREDICT to TPU.
if ctx.is_running_on_cpu():
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(features, labels)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
hooks = [
TPUInfeedOutfeedSessionHook(ctx, enqueue_ops),
training.LoggingTensorHook(
{'loss': array_ops.identity(loss),
'step': training.get_global_step()},
every_n_secs=30)
]
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_hooks=hooks,
train_op=control_flow_ops.group(*update_ops),
scaffold=scaffold)
# Now eval.
total_loss, eval_metric_ops, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread. So,
# here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step counter
# properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
eval_metric_ops, eval_update_ops = (
eval_metric_ops.to_metric_metric_ops_for_tpu(dummy_update_op))
hooks = [
TPUInfeedOutfeedSessionHook(ctx, enqueue_ops, eval_update_ops),
]
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
return _model_fn
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, eval_metric_ops, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var,
single_tpu_eval_step,
[_ZERO_LOSS],
name='loop')
(loss,) = tpu.shard(multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, eval_metric_ops, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step,
[_INITIAL_LOSS],
name=b'loop')
(loss,) = tpu.shard(multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)], parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [o for o in operations
if o.type == _CROSS_REPLICA_SUM_OP]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can be captured only. Please file bug .')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug .')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError(
'{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
Minor change for Python 3 compatibility for TPU Estimators.
PiperOrigin-RevId: 180576479
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from contextlib import contextmanager
import copy
import threading
import time
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code.
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
iter_vars = graph.get_collection(_TPU_ESTIMATOR)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(_TPU_ESTIMATOR,
reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[_TPU_ESTIMATOR],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps runnining in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
class _TPUContext(object):
"""A context holds immutable states of TPU computation.
This immutable object holds TPUEstimator config, train/eval batch size, and
`TPUEstimator.use_tpu`, which is expected to be passed around. It also
provides utility functions, basded on the current state, to determine other
information commonly required by TPU computation, such as TPU device names,
TPU hosts, shard batch size, etc.
N.B. As `mode` is not immutable state in Estimator, but essential to
distinguish between TPU training and evaluation, a common usage for
_TPUContext with `mode` is as follows:
```
with _ctx.with_mode(mode) as ctx:
if ctx.is_running_on_cpu():
...
```
"""
def __init__(self, config, train_batch_size, eval_batch_size, use_tpu):
self._config = config
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._use_tpu = use_tpu
self._num_shards_or_none = self._config.tpu_config.num_shards
self._mode = None
def _assert_mode(self):
if self._mode is None:
raise RuntimeError(
'`mode` needs to be set via contextmanager `with_mode`.')
return self._mode
@property
def num_of_cores_per_host(self):
num_cores = self.num_cores
return min(num_cores, 8)
@contextmanager
def with_mode(self, mode):
new_ctx = copy.copy(self) # Shallow copy is enough.
new_ctx._mode = mode # pylint: disable=protected-access
yield new_ctx
@property
def mode(self):
return self._assert_mode()
@property
def num_cores(self):
# TODO(xiejw): Adds lazy num_shards initialization.
return self._num_shards_or_none
@property
def num_hosts(self):
return self.num_cores // self.num_of_cores_per_host
@property
def config(self):
return self._config
def is_input_sharded_per_core(self):
"""Return true if input_fn is invoked per-core (other than per-host)."""
self._assert_mode()
return (self._mode == model_fn_lib.ModeKeys.TRAIN and
not self._config.tpu_config.per_host_input_for_training)
def is_running_on_cpu(self):
"""Determines whether the input_fn and model_fn should be invoked on CPU."""
mode = self._assert_mode()
return ((not self._use_tpu) or mode == model_fn_lib.ModeKeys.PREDICT or
(mode == model_fn_lib.ModeKeys.EVAL and
self._eval_batch_size is None))
@property
def batch_size_for_input_fn(self):
"""Returns the shard batch size for `input_fn`."""
mode = self._assert_mode()
# Special case for eval.
if mode == model_fn_lib.ModeKeys.EVAL and self._eval_batch_size is None:
return None
if self.is_running_on_cpu():
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size
if mode == model_fn_lib.ModeKeys.EVAL:
return self._eval_batch_size
return None
global_batch_size = (self._train_batch_size if
mode == model_fn_lib.ModeKeys.TRAIN
else self._eval_batch_size)
# On TPU
if self.is_input_sharded_per_core():
return global_batch_size // self.num_cores
else:
return global_batch_size // self.num_hosts
@property
def batch_size_for_model_fn(self):
"""Returns the shard batch size for `model_fn`."""
mode = self._assert_mode()
# Special case for eval.
if mode == model_fn_lib.ModeKeys.EVAL and self._eval_batch_size is None:
return None
if self.is_running_on_cpu():
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size
if mode == model_fn_lib.ModeKeys.EVAL:
return self._eval_batch_size
return None
# On TPU. always sharded per core.
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size // self.num_cores
else:
return self._eval_batch_size // self.num_cores
@property
def master_job(self):
"""Returns the job name to use to place TPU computations on.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
run_config = self._config
# If the user specifies the tpu_job_name, use that.
if run_config.tpu_config.tpu_job_name:
return run_config.tpu_config.tpu_job_name
# The tpu job is determined by the run_config. Right now, this method is
# required as tpu_config is not part of the RunConfig.
mode = self._assert_mode()
master = (run_config.evaluation_master if mode == model_fn_lib.ModeKeys.EVAL
else run_config.master)
if master in _LOCAL_MASTERS:
return None
if (not run_config.session_config or
not run_config.session_config.cluster_def.job):
return _DEFAULT_JOB_NAME
cluster_def = run_config.session_config.cluster_def
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part '
'of your TPUConfig.')
@property
def tpu_host_placement_function(self):
"""Returns the TPU host place function."""
master = self.master_job
def _placement_function(_sentinal=None, core_id=None, host_id=None): # pylint: disable=invalid-name
assert _sentinal is None
if core_id is not None and host_id is not None:
raise RuntimeError(
'core_id and host_id can have only one non-None value.')
if master is None:
return '/replica:0/task:0/device:CPU:0'
else:
# This assumes that if using more than 8 shards,
# the job configuration varies 'task'.
if core_id is not None:
host_id = core_id / 8
return '/job:%s/task:%d/device:CPU:0' % (master, host_id)
return _placement_function
@property
def tpu_device_placement_function(self):
master = self.master_job
job_device = '' if master is None else ('/job:%s' % master)
def _placement_function(i):
return '%s/task:%d/device:TPU:%d' % (job_device, i / 8, i % 8)
return _placement_function
@property
def tpu_ordinal_function(self):
"""Returns the TPU ordinal fn."""
def _tpu_ordinal_function(index):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
index: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
return index % 8
return _tpu_ordinal_function
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs',
'scaffold_fn'])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
${tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
if eval_metrics is not None:
_EvalMetrics.validate(eval_metrics)
return super(TPUEstimatorSpec, cls).__new__(cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
eval_metric_ops = _EvalMetrics.to_metric_metric_ops_for_cpu(
self.eval_metrics)
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold)
class _InfeedOutfeedThreadBaseController(object):
"""This wraps the infeed/outfeed thread and stops when Estimator finishes."""
def __init__(self, thd):
self._signal_queue = Queue.Queue()
thd.daemon = True
thd.start()
self._thd = thd
def block_and_get_signal(self):
return self._signal_queue.get()
def send_next_batch_signal(self, signal=_SIGNAL.NEXT_BATCH):
self._signal_queue.put(signal)
def join(self):
self._signal_queue.put(_SIGNAL.STOP)
self._thd.join()
class _OutfeedThreadController(_InfeedOutfeedThreadBaseController):
"""This wraps the outfeed thread and stops when Estimator finishes."""
def __init__(self, session, dequeue_ops):
super(_OutfeedThreadController, self).__init__(
threading.Thread(target=self._execute_dequeue_ops,
args=(session, dequeue_ops)))
def _execute_dequeue_ops(self, session, dequeue_ops):
count = 0
while True:
signal = self.block_and_get_signal()
if signal == _SIGNAL.STOP:
logging.info('Stop outfeed thread.')
return
iterations = signal
for i in range(iterations):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(dequeue_ops)
count += 1
def join(self):
logging.info('Waiting for Outfeed Thread to exit.')
super(_OutfeedThreadController, self).join()
class _InfeedThreadController(_InfeedOutfeedThreadBaseController):
"""This wraps the infeed thread and stops when Estimator finishes."""
def __init__(self, session, enqueue_ops, initial_infeed_sleep_secs):
super(_InfeedThreadController, self).__init__(
threading.Thread(
target=self._input_thread_fn_for_loading,
args=(session, enqueue_ops, initial_infeed_sleep_secs)))
def _input_thread_fn_for_loading(self, session, enqueue_ops,
initial_infeed_sleep_secs):
count = 0
if initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
initial_infeed_sleep_secs)
time.sleep(initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
try:
while True:
signal = self._signal_queue.get()
if signal == _SIGNAL.STOP:
logging.info('Stop Infeed input thread.')
return
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
# Enqueue batches for next loop.
session.run(enqueue_ops)
else:
iterations = signal
for i in range(iterations):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(enqueue_ops)
count += 1
except Exception: # pylint: disable=broad-except
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# However, sleep for 2 minutes before explicit closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
logging.error(
'Failed running infeed, closing session.\n'
'You may see an exception from your main session after this. '
'Sleep for 2 minutes before close Session from infeed thread to '
'allow the main thread returning an error first, if any.',
exc_info=1
)
time.sleep(120)
logging.error('Closing the failed session.')
session.close()
def join(self):
logging.info('Waiting for Infeed Thread to exit.')
super(_InfeedThreadController, self).join()
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self, ctx, enqueue_ops, dequeue_ops=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_op = [tpu.initialize_system(job=self._master_job)]
self._finalize_op = [tpu.shutdown_system(job=self._master_job)]
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_op,
options=config_pb2.RunOptions(timeout_in_ms=5*60*1000))
logging.info('Start infeed thread controller')
self._infeed_thd_controller = _InfeedThreadController(
session, self._enqueue_ops, self._initial_infeed_sleep_secs)
if self._dequeue_ops is not None:
logging.info('Start outfeed thread controller')
self._outfeed_thd_controller = _OutfeedThreadController(
session, self._dequeue_ops)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_thd_controller.send_next_batch_signal(iterations)
if self._dequeue_ops is not None:
# TODO(xiejw): Refactor the outfeed dequeue into tf.while_loop.
logging.info(
'Dequeue next (%d) batch(es) of data from outfeed.', iterations)
self._outfeed_thd_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_thd_controller.join()
if self._dequeue_ops is not None:
logging.info('Stop output thread controller')
self._outfeed_thd_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_op)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations,
session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
inputs = input_fn()
if isinstance(inputs, tuple):
features, labels = inputs
else:
features, labels = inputs, None
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
with ops.device(device):
num_cores_per_host = ctx.num_of_cores_per_host
inputs = input_fn()
if isinstance(inputs, tuple):
features, labels = inputs
else:
features, labels = inputs, None
inputs_structure_recorder.validate_and_record_structure(
features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_cores_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device))
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in `_TPUContext`, it
invokes `input_fn` for all cores (usually multi-host TPU training) or for one
host (usually for single-host TPU evaluation), and sends all `features` and
`labels` returned by `input_fn` to TPU infeed. For per-core invocation,
`features` and `labels` are piped to infeed directly, one tuple for each
core. For per-host invocation, `features` and `labels` are split at host
(with respect to `batch_axis`) and piped to all cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separatedly to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return tensor_or_dict.keys() if isinstance(tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([features[name]
for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (len(self._feature_names) if self._feature_names
else 1)
if self._has_labels:
expected_num_labels = (len(self._label_names) if self._label_names
else 1)
else:
expected_num_labels = 0
expected_num_tensors = expected_num_features + expected_num_labels
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
unflattened_label = dict(zip(self._label_names,
flattened_inputs[expected_num_features:]))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
return unflattened_features, unflattened_label
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_TPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops = self._invoke_input_fn_and_record_structure()
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
values = self._infeed_queue.generate_dequeue_op()
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
enqueue_ops.append(_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
self._batch_axis, host_device))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
enqueue_ops.append(_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels):
return self._call_model_fn(features, labels)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A Fn representing the train step for TPU.
"""
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
features, labels = dequeue_fn()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, TPUEstimatorSpec):
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
with ops.control_dependencies([train_op]):
return array_ops.identity(loss)
return train_step, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn and eval_metrics. The eval_fn representing the eval
step for TPU. and eval_metrics is an `_EvalMetrics` instance.
"""
eval_metrics = _EvalMetrics(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
features, labels = dequeue_fn()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
eval_metrics.record(tpu_estimator_spec)
outfeed_ops = tpu_ops.outfeed_enqueue_tuple(eval_metrics.outfeed_tensors)
with ops.control_dependencies([outfeed_ops]):
return math_ops.add(total_loss, loss)
return eval_step, eval_metrics, captured_scaffold_fn
def _call_model_fn(self, features, labels):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError(
'model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
params[_BATCH_SIZE_KEY] = batch_size_for_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu() and
isinstance(estimator_spec, TPUEstimatorSpec)):
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _EvalMetrics(object):
"""Class wraps TPUEstimator.eval_metrics."""
def __init__(self, ctx):
self._ctx = ctx
self._metric_fn = None
self._is_dict = False
self._tensor_keys = []
self._tensors = []
self._tensor_dtypes = []
self._tensor_shapes = []
self._recorded = False
@staticmethod
def validate(eval_metrics):
"""Validates the `eval_metrics` in `TPUEstimatorSpec`."""
if not isinstance(eval_metrics, (tuple, list)):
raise ValueError('eval_metrics should be tuple or list')
if len(eval_metrics) != 2:
raise ValueError('eval_metrics should have two elements.')
if not callable(eval_metrics[0]):
raise TypeError('eval_metrics[0] should be callable.')
if not isinstance(eval_metrics[1], (tuple, list, dict)):
raise ValueError('eval_metrics[1] should be tuple or list, or dict.')
if isinstance(eval_metrics[1], (tuple, list)):
fn_args = util.fn_args(eval_metrics[0])
if len(eval_metrics[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.eval_metrics, length of tensors does not '
'match method args of metric_fn.')
@staticmethod
def to_metric_metric_ops_for_cpu(eval_metrics):
"""Converts `TPUEstimatorSpec.eval_metrics` to `eval_metric_ops` for CPU."""
if not eval_metrics:
return None
_EvalMetrics.validate(eval_metrics)
metric_fn, tensors = eval_metrics
if isinstance(tensors, (tuple, list)):
return metric_fn(*tensors)
else:
# Must be dict.
try:
return metric_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling metric_fn for evalution: %s. '
'It is likely the tensors (eval_metrics[1]) do not match the '
'metric_fn arguments', e)
raise e
def record(self, spec):
"""Records the eval_metrics structure in `spec`."""
if self._recorded:
raise RuntimeError('Eval metrics have been recorded already.')
self._metric_fn, tensor_list_or_dict = spec.eval_metrics
if isinstance(tensor_list_or_dict, dict):
self._is_dict = True
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys.append(key)
self._tensors.append(tensor)
self._tensor_dtypes.append(tensor.dtype)
self._tensor_shapes.append(tensor.shape)
else:
# List or tuple.
self._is_dict = False
self._tensors = tensor_list_or_dict
for tensor in tensor_list_or_dict:
self._tensor_dtypes.append(tensor.dtype)
self._tensor_shapes.append(tensor.shape)
self._recorded = True
@property
def outfeed_tensors(self):
if not self._recorded:
raise RuntimeError('Eval metrics have not been recorded yet')
return self._tensors
def to_metric_metric_ops_for_tpu(self, dummy_update_op):
"""Creates the eval_metric_ops now based on the TPU outfeed.
`eval_metric_ops` is defined in `EstimatorSpec`. From all shards, tensors
are dequeued from outfeed and then concatenated (along batch size dimension)
to form global-like tensors. All global-like tensors are passed to the
metric fn.
Args:
dummy_update_op: A dummy update op.
Returns:
A tuple of (`eval_metric_ops` and `update_ops`), where `update_ops` should
be invoked in Outfeed dequeue thread, which drive the outfeed dequeue and
update the state of metrics.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
num_cores = self._ctx.num_cores
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
for i in xrange(len(self._tensors)):
dequeue_ops.append([])
# Outfeed ops execute on each JF node.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(num_cores):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=self._tensor_dtypes, shapes=self._tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# It is assumed evaluation always happends on single host TPU system. So,
# place all ops on tpu host if possible.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preseve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._is_dict:
dequeue_ops = dict(zip(self._tensor_keys, dequeue_ops))
try:
eval_metric_ops = self._metric_fn(**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling metric_fn for evalution: %s. '
'It is likely the tensors (eval_metrics[1]) do not match the '
'metric_fn arguments', e)
raise e
else:
eval_metric_ops = self._metric_fn(*dequeue_ops)
eval_update_ops = []
for k, v in eval_metric_ops.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
return eval_metric_ops, eval_update_ops
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
If `use_tpu` is false, all training, evaluation, and predict are executed on
CPU.
For training, TPUEstimator transforms a global batch size in params to a
per-shard batch size when calling the `input_fn` and `model_fn`. Users should
specify `train_batch_size` in constructor, and then get the batch size for
each shard in `input_fn` and `model_fn` by `params['batch_size']`. If
`TPUConfig.per_host_input_for_training` is `True`, `input_fn` is invoked per
host rather than per core. In this case, a global batch size is transformed a
per-host batch size in params for `input_fn`, but `model_fn` still gets
per-core batch size.
For evaluation, if `eval_batch_size` is None, it is executed on CPU, even if
`use_tpu` is `True`. If `eval_batch_size` is not `None`, it is executed on
TPU, which is an experimental feature. In this case, `model_fn` should return
`TPUEstimatorSpec` instead of `EstimatorSpec`, which expects the
`eval_metrics` for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
Current limitations:
1. TPU evaluation only works on single host.
2. `input_fn` for evaluation should not throw OutOfRange error for all
evaluation steps and all batches should have the same size.
Example (MNIST):
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Predict support on TPU is not yet implemented. So, `predict` and
`export_savedmodel` are executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training respects this bit.
- If true, see `eval_batch_size` for evaluate support.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by
`config.tpu_config.num_shards`.
eval_batch_size: An int representing the global training batch size.
Currently, if `None`, evaluation is still executed on CPU (even when
`use_tpu` is True). In near future, `use_tpu` will be the only option to
switch between TPU/CPU evaluation.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError(
'{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
if not isinstance(train_batch_size, int):
raise ValueError('`train_batch_size` must be an int')
if train_batch_size < 1:
raise ValueError('`train_batch_size` must be positive')
# The specified batch size is the batch size for the entire computation.
# The input_fn and model_fn are called per-shard, so we want to calculate
# the per-shard batch size and pass that.
if train_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'train batch size {} must be divisible by number of shards {}'
.format(train_batch_size, config.tpu_config.num_shards))
if eval_batch_size is not None:
if config.tpu_config.num_shards > 8:
raise NotImplementedError(
'TPU evaluation is only supported with one host.')
if eval_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'eval batch size {} must be divisible by number of shards {}'
.format(eval_batch_size, config.tpu_config.num_shards))
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _TPUContext are immutable.
self._ctx = _TPUContext(self._config, train_batch_size, eval_batch_size,
use_tpu)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [_TPUStopAtStepHook(self._iterations_per_training_loop,
steps, max_steps)]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
hooks = []
hooks.append(evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps))
hooks.append(_SetEvalIterationsHook(steps))
return hooks
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn
if ctx.is_running_on_cpu():
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn():
return input_fn(**kwargs)
return _input_fn
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# TODO(jhseu): Move to PREDICT to TPU.
if ctx.is_running_on_cpu():
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(features, labels)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
hooks = [
TPUInfeedOutfeedSessionHook(ctx, enqueue_ops),
training.LoggingTensorHook(
{'loss': array_ops.identity(loss),
'step': training.get_global_step()},
every_n_secs=30)
]
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_hooks=hooks,
train_op=control_flow_ops.group(*update_ops),
scaffold=scaffold)
# Now eval.
total_loss, eval_metric_ops, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread. So,
# here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step counter
# properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
eval_metric_ops, eval_update_ops = (
eval_metric_ops.to_metric_metric_ops_for_tpu(dummy_update_op))
hooks = [
TPUInfeedOutfeedSessionHook(ctx, enqueue_ops, eval_update_ops),
]
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
return _model_fn
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, eval_metric_ops, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var,
single_tpu_eval_step,
[_ZERO_LOSS],
name='loop')
(loss,) = tpu.shard(multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, eval_metric_ops, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step,
[_INITIAL_LOSS],
name=b'loop')
(loss,) = tpu.shard(multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)], parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [o for o in operations
if o.type == _CROSS_REPLICA_SUM_OP]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can be captured only. Please file bug .')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug .')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError(
'{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
|
import logging
import json
from django.http import HttpResponse
from myuw_mobile.views.rest_dispatch import RESTDispatch
from myuw_mobile.dao.notice import get_notices_for_current_user
from myuw_mobile.logger.timer import Timer
from myuw_mobile.logger.logresp import log_success_response
class Notices(RESTDispatch):
"""
Performs actions on resource at /api/v1/notices/.
"""
def GET(self, request):
"""
GET returns 200 with a list of notices for the current user
"""
timer = Timer()
logger = logging.getLogger(__name__)
notices = get_notices_for_current_user()
notice_json = self._get_json(notices)
logger.debug(notice_json)
log_success_response(logger, timer)
return HttpResponse(json.dumps(notice_json))
def _get_json(self, notices):
notice_json = {}
for notice in notices:
data = notice.json_data()
data['id_hash'] = notice.id_hash
data['is_read'] = notice.is_read
if notice.custom_category in notice_json:
notice_json[notice.custom_category]["notices"].append(data)
else:
notice_json[notice.custom_category] = {"notices": [data]}
return notice_json
Restructuring JSON to match UX design
import logging
import json
from django.http import HttpResponse
from myuw_mobile.views.rest_dispatch import RESTDispatch
from myuw_mobile.dao.notice import get_notices_for_current_user
from myuw_mobile.logger.timer import Timer
from myuw_mobile.logger.logresp import log_success_response
from datetime import datetime
class Notices(RESTDispatch):
"""
Performs actions on resource at /api/v1/notices/.
"""
def GET(self, request):
"""
GET returns 200 with a list of notices for the current user
"""
timer = Timer()
logger = logging.getLogger(__name__)
notices = get_notices_for_current_user()
notice_json = self._get_json(notices)
logger.debug(notice_json)
log_success_response(logger, timer)
return HttpResponse(json.dumps(notice_json))
def _get_json(self, notices):
notice_json = {
"holds": [],
"today": [],
"week": [],
'future': []
}
today = datetime.now()
for notice in notices:
data = notice.json_data()
data['id_hash'] = notice.id_hash
data['is_read'] = notice.is_read
data['category'] = notice.custom_category
if notice.custom_category == "Holds":
notice_json['holds'].append(data)
else:
for attr in notice.attributes:
if attr.data_type == "date":
date = datetime.strptime(attr.get_value(), "%Y-%m-%d")
if date.strftime("%j") == today.strftime("%j"):
notice_json["today"].append(data)
if date.strftime("%V") == today.strftime("%V"):
notice_json["week"].append(data)
if date > today:
notice_json["future"].append(data)
return notice_json
|
from warnings import warn
import numpy as np
from sklearn.metrics import auc
from sklearn.utils import check_random_state
__all__ = ['negative_mv_auc_score', 'mv_curve']
MAX_N_FEATURES = 8
def negative_mv_auc_score(
detector, X=None, y=None, interval=(0.9, 0.999),
n_uniform_samples=10000
):
"""Compute the opposite of the area under the Mass-Volume (MV) curve.
Parameters
----------
detector : object
Detector.
X : array-like of shape (n_samples, n_features), default None
Data.
y : ignored
interval : tuple, default (0.9, 0.999)
Interval of probabilities.
n_uniform_samples : int, default 10000
Number of samples which are drawn from the uniform distribution over
the hypercube enclosing the data.
random_state : int or RandomState instance, default None
Seed of the pseudo random number generator.
Returns
-------
score : float
Opposite of the area under the MV curve.
References
----------
.. [#goix16] Goix, N.,
"How to evaluate the quality of unsupervised anomaly detection
algorithms?,"
In ICML Anomaly Detection Workshop, 2016.
"""
mass, volume, _ = mv_curve(
detector, X, n_uniform_samples=n_uniform_samples
)
is_in_range = (interval[0] <= mass) & (mass <= interval[1])
return auc(mass[is_in_range], volume[is_in_range], reorder=True)
def mv_curve(detector, X=None, n_uniform_samples=10000):
"""Compute mass-volume pairs for different offsets.
Parameters
----------
detector : object
Detector.
X : array-like of shape (n_samples, n_features), default None
Data.
n_uniform_samples : int, default 10000
Number of samples which are drawn from the uniform distribution over
the hypercube enclosing the data.
Returns
-------
mass : array-like
volume : array-like
offsets : array-like
References
----------
.. [#goix16] Goix, N.,
"How to evaluate the quality of unsupervised anomaly detection
algorithms?,"
In ICML Anomaly Detection Workshop, 2016.
"""
def lebesgue_measure(offset, score_uniform_samples, data_volume):
return np.mean(score_uniform_samples >= offset) * data_volume
if detector._n_features > MAX_N_FEATURES:
warn(
f'X is expected to have {MAX_N_FEATURES} or less features '
f'but had {detector._n_features} features'
)
rnd = check_random_state(detector.random_state)
U = rnd.uniform(
low = detector.data_min_,
high = detector.data_max_,
size = (n_uniform_samples, detector._n_features)
)
score_samples = -detector.anomaly_score(X)
score_uniform_samples = -detector.anomaly_score(U)
mass = np.linspace(0., 1.)
offsets = np.percentile(score_samples, 100. * (1. - mass))
volume = np.vectorize(
lebesgue_measure, excluded=[1, 2]
)(offsets, score_uniform_samples, detector.data_volume_)
return mass, volume, offsets
fix negative_mv_auc_score function
from warnings import warn
import numpy as np
from sklearn.metrics import auc
from sklearn.utils import check_random_state
__all__ = ['negative_mv_auc_score', 'mv_curve']
MAX_N_FEATURES = 8
def negative_mv_auc_score(
detector, X=None, y=None, interval=(0.9, 0.999),
n_uniform_samples=10000
):
"""Compute the opposite of the area under the Mass-Volume (MV) curve.
Parameters
----------
detector : object
Detector.
X : array-like of shape (n_samples, n_features), default None
Data.
y : ignored
interval : tuple, default (0.9, 0.999)
Interval of probabilities.
n_uniform_samples : int, default 10000
Number of samples which are drawn from the uniform distribution over
the hypercube enclosing the data.
random_state : int or RandomState instance, default None
Seed of the pseudo random number generator.
Returns
-------
score : float
Opposite of the area under the MV curve.
References
----------
.. [#goix16] Goix, N.,
"How to evaluate the quality of unsupervised anomaly detection
algorithms?,"
In ICML Anomaly Detection Workshop, 2016.
"""
mass, volume, _ = mv_curve(
detector, X, n_uniform_samples=n_uniform_samples
)
is_in_range = (interval[0] <= mass) & (mass <= interval[1])
return -auc(mass[is_in_range], volume[is_in_range], reorder=True)
def mv_curve(detector, X=None, n_uniform_samples=10000):
"""Compute mass-volume pairs for different offsets.
Parameters
----------
detector : object
Detector.
X : array-like of shape (n_samples, n_features), default None
Data.
n_uniform_samples : int, default 10000
Number of samples which are drawn from the uniform distribution over
the hypercube enclosing the data.
Returns
-------
mass : array-like
volume : array-like
offsets : array-like
References
----------
.. [#goix16] Goix, N.,
"How to evaluate the quality of unsupervised anomaly detection
algorithms?,"
In ICML Anomaly Detection Workshop, 2016.
"""
def lebesgue_measure(offset, score_uniform_samples, data_volume):
return np.mean(score_uniform_samples >= offset) * data_volume
if detector._n_features > MAX_N_FEATURES:
warn(
f'X is expected to have {MAX_N_FEATURES} or less features '
f'but had {detector._n_features} features'
)
rnd = check_random_state(detector.random_state)
U = rnd.uniform(
low = detector.data_min_,
high = detector.data_max_,
size = (n_uniform_samples, detector._n_features)
)
score_samples = -detector.anomaly_score(X)
score_uniform_samples = -detector.anomaly_score(U)
mass = np.linspace(0., 1.)
offsets = np.percentile(score_samples, 100. * (1. - mass))
volume = np.vectorize(
lebesgue_measure, excluded=[1, 2]
)(offsets, score_uniform_samples, detector.data_volume_)
return mass, volume, offsets
|
import visa
from re import match
from numpy import array
class StanfordSR785:
def __init__(self, gpib):
''' Initialize device '''
error = False
self.__type = "dynamic signal analyzer"
try:
# Needs terminator char, manual p422
self. device = visa.instrument("GPIB::%d" %(gpib), term_chars = visa.LF)
if not self.__TestConnection():
error = True
except visa.VisaIOError:
error = True
if error:
print "Exception: No %s on this gpib channel: %d" %(self.__type, gpib)
return None
else:
print "Success: %s found" %(self.__type)
def __TestConnection(self):
''' Test if we have the right device by matching id number '''
id = self.device.ask("*IDN?")
if (match(".*,SR785,.*", id)):
found = True
else:
found = False
return found
def reset(self):
''' Reset and clear device '''
self.device.write("*RST")
self.device.write("*CLS")
def write(self, command):
''' Connect to VISA write '''
self.device.write(command)
def read(self):
''' Connect to VISA read '''
return self.device.read()
def ask(self, command):
''' Connect to VISA ask '''
return self.device.ask(command)
def display_status_word(self, word):
codes = ["NEWA",
"AVGA",
"STLA",
"LIMA",
"SSA",
"WFA",
"WFD",
"unused",
"NEWB",
"AVGB",
"STLB",
"LIMB",
"SSB",
"WFB",
"WFB",
"unused",
]
x = 1
herecode = []
for i in range(16):
y = word & x
if y > 0:
herecode += [codes[i]]
x = x << 1
return bin(word), herecode
def getdata(self, channel=2):
if channel == 0 or channel == 2:
data1 = [float(num) for num in self.ask("DSPY ? 0").split(',')]
data1 = array(data1[0:-1])
if channel == 1 or channel == 2:
data2 = [float(num) for num in self.ask("DSPY ? 1").split(',')]
data2 = array(data2[0:-1])
if channel == 0:
out = data1
elif channel == 1:
out = data2
else:
out = array(zip(data1, data2))
return out
if __name__ == "__main__":
import numpy as np
from time import sleep
import matplotlib
matplotlib.rcParams['backend'] = 'wx'
import matplotlib.pylab as pl
device = StanfordSR785(18)
# device.write("CLS; FSPN 0,200; STRT");
# for i in range(40):
# print device.ask("DSPS?")
# sleep(1)
start, stop = 1, 200
device.write("SSTR 2, %d" %(start))
device.write("SSTP 2, %d" %(stop))
print device.ask("SSTR ? 0")
print device.ask("SSTP ? 0")
device.write("SRPT 2,0")
device.ask("DSPS?")
device.write("STRT")
dataa, datab = False, False
while not dataa or not datab:
res = device.display_status_word(int(device.ask("DSPS ?")))
print res
codes = res[1]
if 'SSA' in codes:
dataa = True
if 'SSB' in codes:
datab = True
# if dataa and datab:
# break
sleep(1)
# print device.ask("ACTD ?")
# print device.ask("DTRD ? 2,0")
# device.write("NOTE 0,1,0,50,50,Hello")
# print device.ask("DUMP")
data = [float(num) for num in device.ask("DSPY ? 0").split(',')]
data = data[0:-1]
pts = len(data)
f = np.logspace(np.log10(start), np.log10(stop), pts) # this is incorrect
pl.semilogx(f, data)
pl.show()
sr785: raise error instead of printing stuff
import visa
from re import match
from numpy import array
class StanfordSR785:
def __init__(self, gpib):
''' Initialize device '''
error = False
self.__type = "dynamic signal analyzer"
try:
# Needs terminator char, manual p422
self. device = visa.instrument("GPIB::%d" %(gpib), term_chars = visa.LF)
if not self.__TestConnection():
error = True
except visa.VisaIOError:
error = True
if error:
raise IOError("Exception: No %s on this gpib channel: %d" %(self.__type, gpib))
else:
print "Success: %s found" %(self.__type)
def __TestConnection(self):
''' Test if we have the right device by matching id number '''
id = self.device.ask("*IDN?")
if (match(".*,SR785,.*", id)):
found = True
else:
found = False
return found
def reset(self):
''' Reset and clear device '''
self.device.write("*RST")
self.device.write("*CLS")
def write(self, command):
''' Connect to VISA write '''
self.device.write(command)
def read(self):
''' Connect to VISA read '''
return self.device.read()
def ask(self, command):
''' Connect to VISA ask '''
return self.device.ask(command)
def display_status_word(self, word):
codes = ["NEWA",
"AVGA",
"STLA",
"LIMA",
"SSA",
"WFA",
"WFD",
"unused",
"NEWB",
"AVGB",
"STLB",
"LIMB",
"SSB",
"WFB",
"WFB",
"unused",
]
x = 1
herecode = []
for i in range(16):
y = word & x
if y > 0:
herecode += [codes[i]]
x = x << 1
return bin(word), herecode
def getdata(self, channel=2):
if channel == 0 or channel == 2:
data1 = [float(num) for num in self.ask("DSPY ? 0").split(',')]
data1 = array(data1[0:-1])
if channel == 1 or channel == 2:
data2 = [float(num) for num in self.ask("DSPY ? 1").split(',')]
data2 = array(data2[0:-1])
if channel == 0:
out = data1
elif channel == 1:
out = data2
else:
out = array(zip(data1, data2))
return out
if __name__ == "__main__":
import numpy as np
from time import sleep
import matplotlib
matplotlib.rcParams['backend'] = 'wx'
import matplotlib.pylab as pl
device = StanfordSR785(18)
# device.write("CLS; FSPN 0,200; STRT");
# for i in range(40):
# print device.ask("DSPS?")
# sleep(1)
start, stop = 1, 200
device.write("SSTR 2, %d" %(start))
device.write("SSTP 2, %d" %(stop))
print device.ask("SSTR ? 0")
print device.ask("SSTP ? 0")
device.write("SRPT 2,0")
device.ask("DSPS?")
device.write("STRT")
dataa, datab = False, False
while not dataa or not datab:
res = device.display_status_word(int(device.ask("DSPS ?")))
print res
codes = res[1]
if 'SSA' in codes:
dataa = True
if 'SSB' in codes:
datab = True
# if dataa and datab:
# break
sleep(1)
# print device.ask("ACTD ?")
# print device.ask("DTRD ? 2,0")
# device.write("NOTE 0,1,0,50,50,Hello")
# print device.ask("DUMP")
data = [float(num) for num in device.ask("DSPY ? 0").split(',')]
data = data[0:-1]
pts = len(data)
f = np.logspace(np.log10(start), np.log10(stop), pts) # this is incorrect
pl.semilogx(f, data)
pl.show()
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from osv import osv, fields
from osv.orm import except_orm
import urlparse
import os
import pooler
from content_index import content_index
import netsvc
import StringIO
import random
import string
from psycopg2 import Binary
from tools import config
import tools
from tools.translate import _
def random_name():
random.seed()
d = [random.choice(string.ascii_letters) for x in xrange(10) ]
name = "".join(d)
return name
# Unsupported WebDAV Commands:
# label
# search
# checkin
# checkout
# propget
# propset
#
# An object that represent an uri
# path: the uri of the object
# content: the Content it belongs to (_print.pdf)
# type: content or collection
# content: objct = res.partner
# collection: object = directory, object2 = res.partner
# file: objct = ir.attachement
# root: if we are at the first directory of a ressource
#
INVALID_CHARS={'*':str(hash('*')), '|':str(hash('|')) , "\\":str(hash("\\")), '/':'__', ':':str(hash(':')), '"':str(hash('"')), '<':str(hash('<')) , '>':str(hash('>')) , '?':str(hash('?'))}
class node_class(object):
def __init__(self, cr, uid, path, object, object2=False, context={}, content=False, type='collection', root=False):
self.cr = cr
self.uid = uid
self.path = path
self.object = object
self.object2 = object2
self.context = context
self.content = content
self.type=type
self.root=root
def _file_get(self, nodename=False):
if not self.object:
return []
pool = pooler.get_pool(self.cr.dbname)
fobj = pool.get('ir.attachment')
res2 = []
where = []
if self.object2:
where.append( ('res_model','=',self.object2._name) )
where.append( ('res_id','=',self.object2.id) )
else:
where.append( ('parent_id','=',self.object.id) )
where.append( ('res_id','=',False) )
if nodename:
where.append( (fobj._rec_name,'=',nodename) )
for content in self.object.content_ids:
if self.object2 or not content.include_name:
if content.include_name:
test_nodename = self.object2.name + (content.suffix or '') + (content.extension or '')
else:
test_nodename = (content.suffix or '') + (content.extension or '')
if test_nodename.find('/'):
test_nodename=test_nodename.replace('/', '_')
path = self.path+'/'+test_nodename
if not nodename:
n = node_class(self.cr, self.uid,path, self.object2, False, context=self.context, content=content, type='content', root=False)
res2.append( n)
else:
if nodename == test_nodename:
n = node_class(self.cr, self.uid, path, self.object2, False, context=self.context, content=content, type='content', root=False)
res2.append(n)
ids = fobj.search(self.cr, self.uid, where+[ ('parent_id','=',self.object and self.object.id or False) ])
if self.object and self.root and (self.object.type=='ressource'):
ids += fobj.search(self.cr, self.uid, where+[ ('parent_id','=',False) ])
res = fobj.browse(self.cr, self.uid, ids, context=self.context)
return map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, False, context=self.context, type='file', root=False), res) + res2
def get_translation(self,value,lang):
result = value
pool = pooler.get_pool(self.cr.dbname)
translation_ids = pool.get('ir.translation').search(self.cr, self.uid, [('value','=',value),('lang','=',lang),('type','=','model')])
if len(translation_ids):
tran_id = translation_ids[0]
translation = pool.get('ir.translation').read(self.cr, self.uid, tran_id, ['res_id','name'])
res_model,field_name = tuple(translation['name'].split(','))
res_id = translation['res_id']
res = pool.get(res_model).read(self.cr, self.uid, res_id, [field_name])
if res:
result = res[field_name]
return result
def directory_list_for_child(self,nodename,parent=False):
pool = pooler.get_pool(self.cr.dbname)
where = []
if nodename:
nodename = self.get_translation(nodename, self.context['lang'])
where.append(('name','=',nodename))
if (self.object and self.object.type=='directory') or not self.object2:
where.append(('parent_id','=',self.object and self.object.id or False))
else:
where.append(('parent_id','=',False))
if self.object:
where.append(('ressource_parent_type_id','=',self.object.ressource_type_id.id))
else:
where.append(('ressource_parent_type_id','=',False))
ids = pool.get('document.directory').search(self.cr, self.uid, where+[('ressource_id','=',0)])
if self.object2:
ids += pool.get('document.directory').search(self.cr, self.uid, where+[('ressource_id','=',self.object2.id)])
res = pool.get('document.directory').browse(self.cr, self.uid, ids, self.context)
return res
def _child_get(self, nodename=False):
if self.type not in ('collection','database'):
return []
res = self.directory_list_for_child(nodename)
result= map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, x.type=='directory' and self.object2 or False, context=self.context, root=self.root), res)
if self.type=='database':
pool = pooler.get_pool(self.cr.dbname)
fobj = pool.get('ir.attachment')
vargs = [('parent_id','=',False),('res_id','=',False)]
if nodename:
vargs.append(('name','=',nodename))
file_ids=fobj.search(self.cr,self.uid,vargs)
res = fobj.browse(self.cr, self.uid, file_ids, context=self.context)
result +=map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, False, context=self.context, type='file', root=self.root), res)
if self.type=='collection' and self.object.type=="ressource":
where = self.object.domain and eval(self.object.domain, {'active_id':self.root}) or []
pool = pooler.get_pool(self.cr.dbname)
obj = pool.get(self.object.ressource_type_id.model)
if len(obj.fields_get(self.cr, self.uid, ['dirname'])):
_dirname_field = 'dirname'
else:
_dirname_field = 'name'
name_for = obj._name.split('.')[-1]
if nodename and nodename.find(name_for) == 0 :
id = int(nodename.replace(name_for,''))
where.append(('id','=',id))
elif nodename:
if nodename.find('__') :
nodename=nodename.replace('__','/')
for invalid in INVALID_CHARS:
if nodename.find(INVALID_CHARS[invalid]) :
nodename=nodename.replace(INVALID_CHARS[invalid],invalid)
nodename = self.get_translation(nodename, self.context['lang'])
where.append((_dirname_field,'=',nodename))
if self.object.ressource_tree:
if obj._parent_name in obj.fields_get(self.cr,self.uid):
where.append((obj._parent_name,'=',self.object2 and self.object2.id or False))
ids = obj.search(self.cr, self.uid, where)
res = obj.browse(self.cr, self.uid, ids,self.context)
result+= map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name.replace('/','__'), self.object, x, context=self.context, root=x.id), res)
return result
else :
if self.object2:
return result
else:
if self.object2:
return result
ids = obj.search(self.cr, self.uid, where)
res = obj.browse(self.cr, self.uid, ids,self.context)
for r in res:
if len(obj.fields_get(self.cr, self.uid, [_dirname_field])):
r.name = eval('r.'+_dirname_field)
else:
r.name = False
if not r.name:
r.name = name_for + '%d'%r.id
for invalid in INVALID_CHARS:
if r.name.find(invalid) :
r.name = r.name.replace(invalid,INVALID_CHARS[invalid])
result2 = map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name.replace('/','__'), self.object, x, context=self.context, root=x.id), res)
if result2:
if self.object.ressource_tree:
result += result2
else:
result = result2
return result
def children(self):
return self._child_get() + self._file_get()
def child(self, name):
res = self._child_get(name)
if res:
return res[0]
res = self._file_get(name)
if res:
return res[0]
return None
def path_get(self):
path = self.path
if self.path[0]=='/':
path = self.path[1:]
return path
class document_directory(osv.osv):
_name = 'document.directory'
_description = 'Document directory'
_columns = {
'name': fields.char('Name', size=64, required=True, select=1, translate=True),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'file_type': fields.char('Content Type', size=32),
'domain': fields.char('Domain', size=128, help="Use a domain if you want to apply an automatic filter on visible resources."),
'user_id': fields.many2one('res.users', 'Owner'),
'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
'parent_id': fields.many2one('document.directory', 'Parent Item'),
'child_ids': fields.one2many('document.directory', 'parent_id', 'Children'),
'file_ids': fields.one2many('ir.attachment', 'parent_id', 'Files'),
'content_ids': fields.one2many('document.directory.content', 'directory_id', 'Virtual Files'),
'type': fields.selection([('directory','Static Directory'),('ressource','Other Resources')], 'Type', required=True),
'ressource_type_id': fields.many2one('ir.model', 'Directories Mapped to Objects',
help="Select an object here and Open ERP will create a mapping for each of these " \
"objects, using the given domain, when browsing through FTP."),
'ressource_parent_type_id': fields.many2one('ir.model', 'Parent Model',
help="If you put an object here, this directory template will appear bellow all of these objects. " \
"Don't put a parent directory if you select a parent model."),
'ressource_id': fields.integer('Resource ID'),
'ressource_tree': fields.boolean('Tree Structure',
help="Check this if you want to use the same tree structure as the object selected in the system."),
}
_defaults = {
'user_id': lambda self,cr,uid,ctx: uid,
'domain': lambda self,cr,uid,ctx: '[]',
'type': lambda *args: 'directory',
'ressource_id': lambda *a: 0
}
_sql_constraints = [
('dirname_uniq', 'unique (name,parent_id,ressource_id,ressource_parent_type_id)', 'The directory name must be unique !')
]
def get_resource_path(self,cr,uid,dir_id,res_model,res_id):
# this method will be used in process module
# to be need test and Improvement if resource dir has parent resource (link resource)
path=[]
def _parent(dir_id,path):
parent=self.browse(cr,uid,dir_id)
if parent.parent_id and not parent.ressource_parent_type_id:
_parent(parent.parent_id.id,path)
path.append(parent.name)
else:
path.append(parent.name)
return path
directory=self.browse(cr,uid,dir_id)
model_ids=self.pool.get('ir.model').search(cr,uid,[('model','=',res_model)])
if directory:
_parent(dir_id,path)
path.append(self.pool.get(directory.ressource_type_id.model).browse(cr,uid,res_id).name)
user=self.pool.get('res.users').browse(cr,uid,uid)
return "ftp://%s:%s@localhost:%s/%s/%s"%(user.login,user.password,config.get('ftp_server_port',8021),cr.dbname,'/'.join(path))
return False
def _check_recursion(self, cr, uid, ids):
level = 100
while len(ids):
cr.execute('select distinct parent_id from document_directory where id in ('+','.join(map(str,ids))+')')
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error! You can not create recursive Directories.', ['parent_id'])
]
def __init__(self, *args, **kwargs):
res = super(document_directory, self).__init__(*args, **kwargs)
self._cache = {}
def onchange_content_id(self, cr, uid, ids, ressource_type_id):
return {}
def _get_childs(self, cr, uid, node, nodename=False, context={}):
where = []
if nodename:
nodename = self.get_translation(nodename, self.context['lang'])
where.append(('name','=',nodename))
if object:
where.append(('parent_id','=',object.id))
ids = self.search(cr, uid, where, context)
return self.browse(cr, uid, ids, context), False
"""
PRE:
uri: of the form "Sales Order/SO001"
PORT:
uri
object: the object.directory or object.directory.content
object2: the other object linked (if object.directory.content)
"""
def get_object(self, cr, uid, uri, context={}):
lang = context.get('lang',False)
if not lang:
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user.context_lang
context['lang'] = lang
if not uri:
return node_class(cr, uid, '', False, context=context, type='database')
turi = tuple(uri)
if False and (turi in self._cache):
(path, oo, oo2, context, content,type,root) = self._cache[turi]
if oo:
object = self.pool.get(oo[0]).browse(cr, uid, oo[1], context)
else:
object = False
if oo2:
object2 = self.pool.get(oo2[0]).browse(cr, uid, oo2[1], context)
else:
object2 = False
node = node_class(cr, uid, '/', False, context=context, type='database')
return node
node = node_class(cr, uid, '/', False, context=context, type='database')
for path in uri[:]:
if path:
node = node.child(path)
if not node:
return False
oo = node.object and (node.object._name, node.object.id) or False
oo2 = node.object2 and (node.object2._name, node.object2.id) or False
self._cache[turi] = (node.path, oo, oo2, node.context, node.content,node.type,node.root)
return node
def get_childs(self, cr, uid, uri, context={}):
node = self.get_object(cr, uid, uri, context)
if uri:
children = node.children()
else:
children= [node]
result = map(lambda node: node.path_get(), children)
#childs,object2 = self._get_childs(cr, uid, object, False, context)
#result = map(lambda x: urlparse.urljoin(path+'/',x.name), childs)
return result
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default ={}
name = self.read(cr, uid, [id])[0]['name']
default.update({'name': name+ " (copy)"})
return super(document_directory,self).copy(cr,uid,id,default,context)
def _check_duplication(self, cr, uid,vals,ids=[],op='create'):
name=vals.get('name',False)
parent_id=vals.get('parent_id',False)
ressource_parent_type_id=vals.get('ressource_parent_type_id',False)
ressource_id=vals.get('ressource_id',0)
if op=='write':
for directory in self.browse(cr,uid,ids):
if not name:
name=directory.name
if not parent_id:
parent_id=directory.parent_id and directory.parent_id.id or False
if not ressource_parent_type_id:
ressource_parent_type_id=directory.ressource_parent_type_id and directory.ressource_parent_type_id.id or False
if not ressource_id:
ressource_id=directory.ressource_id and directory.ressource_id or 0
res=self.search(cr,uid,[('id','<>',directory.id),('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
if op=='create':
res=self.search(cr,uid,[('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
return True
def write(self, cr, uid, ids, vals, context=None):
if not self._check_duplication(cr,uid,vals,ids,op='write'):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
return super(document_directory,self).write(cr,uid,ids,vals,context=context)
def create(self, cr, uid, vals, context=None):
if not self._check_duplication(cr,uid,vals):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
if vals.get('name',False) and (vals.get('name').find('/')+1 or vals.get('name').find('@')+1 or vals.get('name').find('$')+1 or vals.get('name').find('#')+1) :
raise osv.except_osv(_('ValidateError'), _('Directory name contains special characters!'))
return super(document_directory,self).create(cr, uid, vals, context)
document_directory()
class document_directory_node(osv.osv):
_inherit = 'process.node'
_columns = {
'directory_id': fields.many2one('document.directory', 'Document directory', ondelete="set null"),
}
document_directory_node()
class document_directory_content_type(osv.osv):
_name = 'document.directory.content.type'
_description = 'Directory Content Type'
_columns = {
'name': fields.char('Content Type', size=64, required=True),
'code': fields.char('Extension', size=4),
'active': fields.boolean('Active'),
}
_defaults = {
'active': lambda *args: 1
}
document_directory_content_type()
class document_directory_content(osv.osv):
_name = 'document.directory.content'
_description = 'Directory Content'
_order = "sequence"
def _extension_get(self, cr, uid, context={}):
cr.execute('select code,name from document_directory_content_type where active')
res = cr.fetchall()
return res
_columns = {
'name': fields.char('Content Name', size=64, required=True),
'sequence': fields.integer('Sequence', size=16),
'suffix': fields.char('Suffix', size=16),
'report_id': fields.many2one('ir.actions.report.xml', 'Report'),
'extension': fields.selection(_extension_get, 'Document Type', required=True, size=4),
'include_name': fields.boolean('Include Record Name', help="Check this field if you want that the name of the file start by the record name."),
'directory_id': fields.many2one('document.directory', 'Directory'),
}
_defaults = {
'extension': lambda *args: '.pdf',
'sequence': lambda *args: 1,
'include_name': lambda *args: 1,
}
def process_write_pdf(self, cr, uid, node, context={}):
return True
def process_read_pdf(self, cr, uid, node, context={}):
report = self.pool.get('ir.actions.report.xml').browse(cr, uid, node.content.report_id.id)
srv = netsvc.LocalService('report.'+report.report_name)
pdf,pdftype = srv.create(cr, uid, [node.object.id], {}, {})
s = StringIO.StringIO(pdf)
s.name = node
return s
document_directory_content()
class ir_action_report_xml(osv.osv):
_name="ir.actions.report.xml"
_inherit ="ir.actions.report.xml"
def _model_get(self, cr, uid, ids, name, arg, context):
res = {}
model_pool = self.pool.get('ir.model')
for data in self.read(cr,uid,ids,['model']):
model = data.get('model',False)
if model:
model_id =model_pool.search(cr,uid,[('model','=',model)])
if model_id:
res[data.get('id')] = model_id[0]
else:
res[data.get('id')] = False
return res
def _model_search(self, cr, uid, obj, name, args):
if not len(args):
return []
model_id= args[0][2]
if not model_id:
return []
model = self.pool.get('ir.model').read(cr,uid,[model_id])[0]['model']
report_id = self.search(cr,uid,[('model','=',model)])
if not report_id:
return [('id','=','0')]
return [('id','in',report_id)]
_columns={
'model_id' : fields.function(_model_get,fnct_search=_model_search,method=True,string='Model Id'),
}
ir_action_report_xml()
def create_directory(path):
dir_name = random_name()
path = os.path.join(path,dir_name)
os.makedirs(path)
return dir_name
class document_file(osv.osv):
_inherit = 'ir.attachment'
def _get_filestore(self, cr):
return os.path.join(tools.config['root_path'], 'filestore', cr.dbname)
def _data_get(self, cr, uid, ids, name, arg, context):
result = {}
cr.execute('select id,store_fname,link from ir_attachment where id in ('+','.join(map(str,ids))+')')
for id,r,l in cr.fetchall():
try:
value = file(os.path.join(self._get_filestore(cr), r), 'rb').read()
result[id] = base64.encodestring(value)
except:
result[id]=''
if context.get('bin_size', False):
result[id] = tools.human_size(len(result[id]))
return result
#
# This code can be improved
#
def _data_set(self, cr, obj, id, name, value, uid=None, context={}):
if not value:
return True
#if (not context) or context.get('store_method','fs')=='fs':
try:
path = self._get_filestore(cr)
if not os.path.isdir(path):
try:
os.makedirs(path)
except:
raise except_orm(_('Permission Denied !'), _('You do not permissions to write on the server side.'))
flag = None
# This can be improved
for dirs in os.listdir(path):
if os.path.isdir(os.path.join(path,dirs)) and len(os.listdir(os.path.join(path,dirs)))<4000:
flag = dirs
break
flag = flag or create_directory(path)
filename = random_name()
fname = os.path.join(path, flag, filename)
fp = file(fname,'wb')
v = base64.decodestring(value)
fp.write(v)
filesize = os.stat(fname).st_size
cr.execute('update ir_attachment set store_fname=%s,store_method=%s,file_size=%s where id=%s', (os.path.join(flag,filename),'fs',len(v),id))
return True
except Exception,e :
raise except_orm(_('Error!'), str(e))
_columns = {
'user_id': fields.many2one('res.users', 'Owner', select=1),
'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
'parent_id': fields.many2one('document.directory', 'Directory', select=1),
'file_size': fields.integer('File Size', required=True),
'file_type': fields.char('Content Type', size=32),
'index_content': fields.text('Indexed Content'),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'store_method': fields.selection([('db','Database'),('fs','Filesystem'),('link','Link')], "Storing Method"),
'datas': fields.function(_data_get,method=True,fnct_inv=_data_set,string='File Content',type="binary"),
'store_fname': fields.char('Stored Filename', size=200),
'res_model': fields.char('Attached Model', size=64), #res_model
'res_id': fields.integer('Attached ID'), #res_id
'partner_id':fields.many2one('res.partner', 'Partner', select=1),
'title': fields.char('Resource Title',size=64),
}
_defaults = {
'user_id': lambda self,cr,uid,ctx:uid,
'file_size': lambda self,cr,uid,ctx:0,
'store_method': lambda *args: 'db'
}
_sql_constraints = [
('filename_uniq', 'unique (name,parent_id,res_id,res_model)', 'The file name must be unique !')
]
def _check_duplication(self, cr, uid,vals,ids=[],op='create'):
name=vals.get('name',False)
parent_id=vals.get('parent_id',False)
res_model=vals.get('res_model',False)
res_id=vals.get('res_id',0)
if op=='write':
for file in self.browse(cr,uid,ids):
if not name:
name=file.name
if not parent_id:
parent_id=file.parent_id and file.parent_id.id or False
if not res_model:
res_model=file.res_model and file.res_model or False
if not res_id:
res_id=file.res_id and file.res_id or 0
res=self.search(cr,uid,[('id','<>',file.id),('name','=',name),('parent_id','=',parent_id),('res_model','=',res_model),('res_id','=',res_id)])
if len(res):
return False
if op=='create':
res=self.search(cr,uid,[('name','=',name),('parent_id','=',parent_id),('res_id','=',res_id),('res_model','=',res_model)])
if len(res):
return False
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default ={}
name = self.read(cr, uid, [id])[0]['name']
default.update({'name': name+ " (copy)"})
return super(document_file,self).copy(cr,uid,id,default,context)
def write(self, cr, uid, ids, vals, context=None):
res=self.search(cr,uid,[('id','in',ids)])
if not len(res):
return False
if not self._check_duplication(cr,uid,vals,ids,'write'):
raise except_orm(_('ValidateError'), _('File name must be unique!'))
result = super(document_file,self).write(cr,uid,ids,vals,context=context)
cr.commit()
try:
for f in self.browse(cr, uid, ids, context=context):
#if 'datas' not in vals:
# vals['datas']=f.datas
res = content_index(base64.decodestring(vals['datas']), f.datas_fname, f.file_type or None)
super(document_file,self).write(cr, uid, ids, {
'index_content': res
})
cr.commit()
except:
pass
return result
def create(self, cr, uid, vals, context={}):
vals['title']=vals['name']
vals['parent_id'] = context.get('parent_id',False) or vals.get('parent_id',False)
if not vals.get('res_id', False) and context.get('default_res_id',False):
vals['res_id']=context.get('default_res_id',False)
if not vals.get('res_model', False) and context.get('default_res_model',False):
vals['res_model']=context.get('default_res_model',False)
if vals.get('res_id', False) and vals.get('res_model',False):
obj_model=self.pool.get(vals['res_model'])
result = obj_model.read(cr, uid, [vals['res_id']], context=context)
if len(result):
obj=result[0]
vals['title'] = (obj.get('name',''))[:60]
if obj_model._name=='res.partner':
vals['partner_id']=obj['id']
elif obj.get('address_id',False):
if isinstance(obj['address_id'],tuple) or isinstance(obj['address_id'],list):
address_id=obj['address_id'][0]
else:
address_id=obj['address_id']
address=self.pool.get('res.partner.address').read(cr,uid,[address_id],context=context)
if len(address):
vals['partner_id']=address[0]['partner_id'][0] or False
elif obj.get('partner_id',False):
if isinstance(obj['partner_id'],tuple) or isinstance(obj['partner_id'],list):
vals['partner_id']=obj['partner_id'][0]
else:
vals['partner_id']=obj['partner_id']
datas=None
if vals.get('link',False) :
import urllib
datas=base64.encodestring(urllib.urlopen(vals['link']).read())
else:
datas=vals.get('datas',False)
vals['file_size']= len(datas)
if not self._check_duplication(cr,uid,vals):
raise except_orm(_('ValidateError'), _('File name must be unique!'))
result = super(document_file,self).create(cr, uid, vals, context)
cr.commit()
try:
res = content_index(base64.decodestring(datas), vals['datas_fname'], vals.get('content_type', None))
super(document_file,self).write(cr, uid, [result], {
'index_content': res,
})
cr.commit()
except:
pass
return result
def unlink(self,cr, uid, ids, context={}):
for f in self.browse(cr, uid, ids, context):
#if f.store_method=='fs':
try:
os.unlink(os.path.join(self._get_filestore(cr), f.store_fname))
except:
pass
return super(document_file, self).unlink(cr, uid, ids, context)
document_file()
class document_configuration_wizard(osv.osv_memory):
_name='document.configuration.wizard'
_rec_name = 'Auto Directory configuration'
_columns = {
'host': fields.char('Server Address', size=64, help="Put here the server address or IP. " \
"Keep localhost if you don't know what to write.", required=True)
}
def detect_ip_addr(self, cr, uid, context=None):
def _detect_ip_addr(self, cr, uid, context=None):
from array import array
import socket
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
ifaces = [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr
try:
ip_addr = _detect_ip_addr(self, cr, uid, context)
except:
ip_addr = 'localhost'
return ip_addr
_defaults = {
'host': detect_ip_addr,
}
def action_cancel(self,cr,uid,ids,conect=None):
return {
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.actions.configuration.wizard',
'type': 'ir.actions.act_window',
'target':'new',
}
def action_config(self, cr, uid, ids, context=None):
conf = self.browse(cr, uid, ids[0], context)
obj=self.pool.get('document.directory')
objid=self.pool.get('ir.model.data')
if self.pool.get('sale.order'):
id = objid._get_id(cr, uid, 'document', 'dir_sale_order_all')
id = objid.browse(cr, uid, id, context=context).res_id
mid = self.pool.get('ir.model').search(cr, uid, [('model','=','sale.order')])
obj.write(cr, uid, [id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': '[]',
})
aid = objid._get_id(cr, uid, 'sale', 'report_sale_order')
aid = objid.browse(cr, uid, aid, context=context).res_id
self.pool.get('document.directory.content').create(cr, uid, {
'name': "Print Order",
'suffix': "_print",
'report_id': aid,
'extension': '.pdf',
'include_name': 1,
'directory_id': id,
})
id = objid._get_id(cr, uid, 'document', 'dir_sale_order_quote')
id = objid.browse(cr, uid, id, context=context).res_id
obj.write(cr, uid, [id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': "[('state','=','draft')]",
})
if self.pool.get('product.product'):
id = objid._get_id(cr, uid, 'document', 'dir_product')
id = objid.browse(cr, uid, id, context=context).res_id
mid = self.pool.get('ir.model').search(cr, uid, [('model','=','product.product')])
obj.write(cr, uid, [id], {
'type':'ressource',
'ressource_type_id': mid[0],
})
if self.pool.get('stock.location'):
aid = objid._get_id(cr, uid, 'stock', 'report_product_history')
aid = objid.browse(cr, uid, aid, context=context).res_id
self.pool.get('document.directory.content').create(cr, uid, {
'name': "Product Stock",
'suffix': "_stock_forecast",
'report_id': aid,
'extension': '.pdf',
'include_name': 1,
'directory_id': id,
})
if self.pool.get('account.analytic.account'):
id = objid._get_id(cr, uid, 'document', 'dir_project')
id = objid.browse(cr, uid, id, context=context).res_id
mid = self.pool.get('ir.model').search(cr, uid, [('model','=','account.analytic.account')])
obj.write(cr, uid, [id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': '[]',
'ressource_tree': 1
})
aid = objid._get_id(cr, uid, 'document', 'action_document_browse')
aid = objid.browse(cr, uid, aid, context=context).res_id
self.pool.get('ir.actions.url').write(cr, uid, [aid], {'url': 'ftp://'+(conf.host or 'localhost')+':8021/'})
return {
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.actions.configuration.wizard',
'type': 'ir.actions.act_window',
'target': 'new',
}
document_configuration_wizard()
[FIX] document : create doc of any resource model with black name
lp bug: https://launchpad.net/bugs/404975 fixed
bzr revid: hmo@tinyerp.com-20090803104440-y65u4102ym2e4m68
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from osv import osv, fields
from osv.orm import except_orm
import urlparse
import os
import pooler
from content_index import content_index
import netsvc
import StringIO
import random
import string
from psycopg2 import Binary
from tools import config
import tools
from tools.translate import _
def random_name():
random.seed()
d = [random.choice(string.ascii_letters) for x in xrange(10) ]
name = "".join(d)
return name
# Unsupported WebDAV Commands:
# label
# search
# checkin
# checkout
# propget
# propset
#
# An object that represent an uri
# path: the uri of the object
# content: the Content it belongs to (_print.pdf)
# type: content or collection
# content: objct = res.partner
# collection: object = directory, object2 = res.partner
# file: objct = ir.attachement
# root: if we are at the first directory of a ressource
#
INVALID_CHARS={'*':str(hash('*')), '|':str(hash('|')) , "\\":str(hash("\\")), '/':'__', ':':str(hash(':')), '"':str(hash('"')), '<':str(hash('<')) , '>':str(hash('>')) , '?':str(hash('?'))}
class node_class(object):
def __init__(self, cr, uid, path, object, object2=False, context={}, content=False, type='collection', root=False):
self.cr = cr
self.uid = uid
self.path = path
self.object = object
self.object2 = object2
self.context = context
self.content = content
self.type=type
self.root=root
def _file_get(self, nodename=False):
if not self.object:
return []
pool = pooler.get_pool(self.cr.dbname)
fobj = pool.get('ir.attachment')
res2 = []
where = []
if self.object2:
where.append( ('res_model','=',self.object2._name) )
where.append( ('res_id','=',self.object2.id) )
else:
where.append( ('parent_id','=',self.object.id) )
where.append( ('res_id','=',False) )
if nodename:
where.append( (fobj._rec_name,'=',nodename) )
for content in self.object.content_ids:
if self.object2 or not content.include_name:
if content.include_name:
test_nodename = self.object2.name + (content.suffix or '') + (content.extension or '')
else:
test_nodename = (content.suffix or '') + (content.extension or '')
if test_nodename.find('/'):
test_nodename=test_nodename.replace('/', '_')
path = self.path+'/'+test_nodename
if not nodename:
n = node_class(self.cr, self.uid,path, self.object2, False, context=self.context, content=content, type='content', root=False)
res2.append( n)
else:
if nodename == test_nodename:
n = node_class(self.cr, self.uid, path, self.object2, False, context=self.context, content=content, type='content', root=False)
res2.append(n)
ids = fobj.search(self.cr, self.uid, where+[ ('parent_id','=',self.object and self.object.id or False) ])
if self.object and self.root and (self.object.type=='ressource'):
ids += fobj.search(self.cr, self.uid, where+[ ('parent_id','=',False) ])
res = fobj.browse(self.cr, self.uid, ids, context=self.context)
return map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, False, context=self.context, type='file', root=False), res) + res2
def get_translation(self,value,lang):
result = value
pool = pooler.get_pool(self.cr.dbname)
translation_ids = pool.get('ir.translation').search(self.cr, self.uid, [('value','=',value),('lang','=',lang),('type','=','model')])
if len(translation_ids):
tran_id = translation_ids[0]
translation = pool.get('ir.translation').read(self.cr, self.uid, tran_id, ['res_id','name'])
res_model,field_name = tuple(translation['name'].split(','))
res_id = translation['res_id']
res = pool.get(res_model).read(self.cr, self.uid, res_id, [field_name])
if res:
result = res[field_name]
return result
def directory_list_for_child(self,nodename,parent=False):
pool = pooler.get_pool(self.cr.dbname)
where = []
if nodename:
nodename = self.get_translation(nodename, self.context['lang'])
where.append(('name','=',nodename))
if (self.object and self.object.type=='directory') or not self.object2:
where.append(('parent_id','=',self.object and self.object.id or False))
else:
where.append(('parent_id','=',False))
if self.object:
where.append(('ressource_parent_type_id','=',self.object.ressource_type_id.id))
else:
where.append(('ressource_parent_type_id','=',False))
ids = pool.get('document.directory').search(self.cr, self.uid, where+[('ressource_id','=',0)])
if self.object2:
ids += pool.get('document.directory').search(self.cr, self.uid, where+[('ressource_id','=',self.object2.id)])
res = pool.get('document.directory').browse(self.cr, self.uid, ids, self.context)
return res
def _child_get(self, nodename=False):
if self.type not in ('collection','database'):
return []
res = self.directory_list_for_child(nodename)
result= map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, x.type=='directory' and self.object2 or False, context=self.context, root=self.root), res)
if self.type=='database':
pool = pooler.get_pool(self.cr.dbname)
fobj = pool.get('ir.attachment')
vargs = [('parent_id','=',False),('res_id','=',False)]
if nodename:
vargs.append(('name','=',nodename))
file_ids=fobj.search(self.cr,self.uid,vargs)
res = fobj.browse(self.cr, self.uid, file_ids, context=self.context)
result +=map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, False, context=self.context, type='file', root=self.root), res)
if self.type=='collection' and self.object.type=="ressource":
where = self.object.domain and eval(self.object.domain, {'active_id':self.root}) or []
pool = pooler.get_pool(self.cr.dbname)
obj = pool.get(self.object.ressource_type_id.model)
if len(obj.fields_get(self.cr, self.uid, ['dirname'])):
_dirname_field = 'dirname'
else:
_dirname_field = 'name'
name_for = obj._name.split('.')[-1]
if nodename and nodename.find(name_for) == 0 :
id = int(nodename.replace(name_for,''))
where.append(('id','=',id))
elif nodename:
if nodename.find('__') :
nodename=nodename.replace('__','/')
for invalid in INVALID_CHARS:
if nodename.find(INVALID_CHARS[invalid]) :
nodename=nodename.replace(INVALID_CHARS[invalid],invalid)
nodename = self.get_translation(nodename, self.context['lang'])
where.append((_dirname_field,'=',nodename))
if self.object.ressource_tree:
if obj._parent_name in obj.fields_get(self.cr,self.uid):
where.append((obj._parent_name,'=',self.object2 and self.object2.id or False))
ids = obj.search(self.cr, self.uid, where)
res = obj.browse(self.cr, self.uid, ids,self.context)
result+= map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name.replace('/','__'), self.object, x, context=self.context, root=x.id), res)
return result
else :
if self.object2:
return result
else:
if self.object2:
return result
ids = obj.search(self.cr, self.uid, where)
res = obj.browse(self.cr, self.uid, ids,self.context)
for r in res:
if len(obj.fields_get(self.cr, self.uid, [_dirname_field])):
r.name = eval('r.'+_dirname_field)
else:
r.name = False
if not r.name:
r.name = name_for + '%d'%r.id
for invalid in INVALID_CHARS:
if r.name.find(invalid) :
r.name = r.name.replace(invalid,INVALID_CHARS[invalid])
result2 = map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name.replace('/','__'), self.object, x, context=self.context, root=x.id), res)
if result2:
if self.object.ressource_tree:
result += result2
else:
result = result2
return result
def children(self):
return self._child_get() + self._file_get()
def child(self, name):
res = self._child_get(name)
if res:
return res[0]
res = self._file_get(name)
if res:
return res[0]
return None
def path_get(self):
path = self.path
if self.path[0]=='/':
path = self.path[1:]
return path
class document_directory(osv.osv):
_name = 'document.directory'
_description = 'Document directory'
_columns = {
'name': fields.char('Name', size=64, required=True, select=1, translate=True),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'file_type': fields.char('Content Type', size=32),
'domain': fields.char('Domain', size=128, help="Use a domain if you want to apply an automatic filter on visible resources."),
'user_id': fields.many2one('res.users', 'Owner'),
'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
'parent_id': fields.many2one('document.directory', 'Parent Item'),
'child_ids': fields.one2many('document.directory', 'parent_id', 'Children'),
'file_ids': fields.one2many('ir.attachment', 'parent_id', 'Files'),
'content_ids': fields.one2many('document.directory.content', 'directory_id', 'Virtual Files'),
'type': fields.selection([('directory','Static Directory'),('ressource','Other Resources')], 'Type', required=True),
'ressource_type_id': fields.many2one('ir.model', 'Directories Mapped to Objects',
help="Select an object here and Open ERP will create a mapping for each of these " \
"objects, using the given domain, when browsing through FTP."),
'ressource_parent_type_id': fields.many2one('ir.model', 'Parent Model',
help="If you put an object here, this directory template will appear bellow all of these objects. " \
"Don't put a parent directory if you select a parent model."),
'ressource_id': fields.integer('Resource ID'),
'ressource_tree': fields.boolean('Tree Structure',
help="Check this if you want to use the same tree structure as the object selected in the system."),
}
_defaults = {
'user_id': lambda self,cr,uid,ctx: uid,
'domain': lambda self,cr,uid,ctx: '[]',
'type': lambda *args: 'directory',
'ressource_id': lambda *a: 0
}
_sql_constraints = [
('dirname_uniq', 'unique (name,parent_id,ressource_id,ressource_parent_type_id)', 'The directory name must be unique !')
]
def get_resource_path(self,cr,uid,dir_id,res_model,res_id):
# this method will be used in process module
# to be need test and Improvement if resource dir has parent resource (link resource)
path=[]
def _parent(dir_id,path):
parent=self.browse(cr,uid,dir_id)
if parent.parent_id and not parent.ressource_parent_type_id:
_parent(parent.parent_id.id,path)
path.append(parent.name)
else:
path.append(parent.name)
return path
directory=self.browse(cr,uid,dir_id)
model_ids=self.pool.get('ir.model').search(cr,uid,[('model','=',res_model)])
if directory:
_parent(dir_id,path)
path.append(self.pool.get(directory.ressource_type_id.model).browse(cr,uid,res_id).name)
user=self.pool.get('res.users').browse(cr,uid,uid)
return "ftp://%s:%s@localhost:%s/%s/%s"%(user.login,user.password,config.get('ftp_server_port',8021),cr.dbname,'/'.join(path))
return False
def _check_recursion(self, cr, uid, ids):
level = 100
while len(ids):
cr.execute('select distinct parent_id from document_directory where id in ('+','.join(map(str,ids))+')')
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error! You can not create recursive Directories.', ['parent_id'])
]
def __init__(self, *args, **kwargs):
res = super(document_directory, self).__init__(*args, **kwargs)
self._cache = {}
def onchange_content_id(self, cr, uid, ids, ressource_type_id):
return {}
def _get_childs(self, cr, uid, node, nodename=False, context={}):
where = []
if nodename:
nodename = self.get_translation(nodename, self.context['lang'])
where.append(('name','=',nodename))
if object:
where.append(('parent_id','=',object.id))
ids = self.search(cr, uid, where, context)
return self.browse(cr, uid, ids, context), False
"""
PRE:
uri: of the form "Sales Order/SO001"
PORT:
uri
object: the object.directory or object.directory.content
object2: the other object linked (if object.directory.content)
"""
def get_object(self, cr, uid, uri, context={}):
lang = context.get('lang',False)
if not lang:
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user.context_lang
context['lang'] = lang
if not uri:
return node_class(cr, uid, '', False, context=context, type='database')
turi = tuple(uri)
if False and (turi in self._cache):
(path, oo, oo2, context, content,type,root) = self._cache[turi]
if oo:
object = self.pool.get(oo[0]).browse(cr, uid, oo[1], context)
else:
object = False
if oo2:
object2 = self.pool.get(oo2[0]).browse(cr, uid, oo2[1], context)
else:
object2 = False
node = node_class(cr, uid, '/', False, context=context, type='database')
return node
node = node_class(cr, uid, '/', False, context=context, type='database')
for path in uri[:]:
if path:
node = node.child(path)
if not node:
return False
oo = node.object and (node.object._name, node.object.id) or False
oo2 = node.object2 and (node.object2._name, node.object2.id) or False
self._cache[turi] = (node.path, oo, oo2, node.context, node.content,node.type,node.root)
return node
def get_childs(self, cr, uid, uri, context={}):
node = self.get_object(cr, uid, uri, context)
if uri:
children = node.children()
else:
children= [node]
result = map(lambda node: node.path_get(), children)
#childs,object2 = self._get_childs(cr, uid, object, False, context)
#result = map(lambda x: urlparse.urljoin(path+'/',x.name), childs)
return result
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default ={}
name = self.read(cr, uid, [id])[0]['name']
default.update({'name': name+ " (copy)"})
return super(document_directory,self).copy(cr,uid,id,default,context)
def _check_duplication(self, cr, uid,vals,ids=[],op='create'):
name=vals.get('name',False)
parent_id=vals.get('parent_id',False)
ressource_parent_type_id=vals.get('ressource_parent_type_id',False)
ressource_id=vals.get('ressource_id',0)
if op=='write':
for directory in self.browse(cr,uid,ids):
if not name:
name=directory.name
if not parent_id:
parent_id=directory.parent_id and directory.parent_id.id or False
if not ressource_parent_type_id:
ressource_parent_type_id=directory.ressource_parent_type_id and directory.ressource_parent_type_id.id or False
if not ressource_id:
ressource_id=directory.ressource_id and directory.ressource_id or 0
res=self.search(cr,uid,[('id','<>',directory.id),('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
if op=='create':
res=self.search(cr,uid,[('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
return True
def write(self, cr, uid, ids, vals, context=None):
if not self._check_duplication(cr,uid,vals,ids,op='write'):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
return super(document_directory,self).write(cr,uid,ids,vals,context=context)
def create(self, cr, uid, vals, context=None):
if not self._check_duplication(cr,uid,vals):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
if vals.get('name',False) and (vals.get('name').find('/')+1 or vals.get('name').find('@')+1 or vals.get('name').find('$')+1 or vals.get('name').find('#')+1) :
raise osv.except_osv(_('ValidateError'), _('Directory name contains special characters!'))
return super(document_directory,self).create(cr, uid, vals, context)
document_directory()
class document_directory_node(osv.osv):
_inherit = 'process.node'
_columns = {
'directory_id': fields.many2one('document.directory', 'Document directory', ondelete="set null"),
}
document_directory_node()
class document_directory_content_type(osv.osv):
_name = 'document.directory.content.type'
_description = 'Directory Content Type'
_columns = {
'name': fields.char('Content Type', size=64, required=True),
'code': fields.char('Extension', size=4),
'active': fields.boolean('Active'),
}
_defaults = {
'active': lambda *args: 1
}
document_directory_content_type()
class document_directory_content(osv.osv):
_name = 'document.directory.content'
_description = 'Directory Content'
_order = "sequence"
def _extension_get(self, cr, uid, context={}):
cr.execute('select code,name from document_directory_content_type where active')
res = cr.fetchall()
return res
_columns = {
'name': fields.char('Content Name', size=64, required=True),
'sequence': fields.integer('Sequence', size=16),
'suffix': fields.char('Suffix', size=16),
'report_id': fields.many2one('ir.actions.report.xml', 'Report'),
'extension': fields.selection(_extension_get, 'Document Type', required=True, size=4),
'include_name': fields.boolean('Include Record Name', help="Check this field if you want that the name of the file start by the record name."),
'directory_id': fields.many2one('document.directory', 'Directory'),
}
_defaults = {
'extension': lambda *args: '.pdf',
'sequence': lambda *args: 1,
'include_name': lambda *args: 1,
}
def process_write_pdf(self, cr, uid, node, context={}):
return True
def process_read_pdf(self, cr, uid, node, context={}):
report = self.pool.get('ir.actions.report.xml').browse(cr, uid, node.content.report_id.id)
srv = netsvc.LocalService('report.'+report.report_name)
pdf,pdftype = srv.create(cr, uid, [node.object.id], {}, {})
s = StringIO.StringIO(pdf)
s.name = node
return s
document_directory_content()
class ir_action_report_xml(osv.osv):
_name="ir.actions.report.xml"
_inherit ="ir.actions.report.xml"
def _model_get(self, cr, uid, ids, name, arg, context):
res = {}
model_pool = self.pool.get('ir.model')
for data in self.read(cr,uid,ids,['model']):
model = data.get('model',False)
if model:
model_id =model_pool.search(cr,uid,[('model','=',model)])
if model_id:
res[data.get('id')] = model_id[0]
else:
res[data.get('id')] = False
return res
def _model_search(self, cr, uid, obj, name, args):
if not len(args):
return []
model_id= args[0][2]
if not model_id:
return []
model = self.pool.get('ir.model').read(cr,uid,[model_id])[0]['model']
report_id = self.search(cr,uid,[('model','=',model)])
if not report_id:
return [('id','=','0')]
return [('id','in',report_id)]
_columns={
'model_id' : fields.function(_model_get,fnct_search=_model_search,method=True,string='Model Id'),
}
ir_action_report_xml()
def create_directory(path):
dir_name = random_name()
path = os.path.join(path,dir_name)
os.makedirs(path)
return dir_name
class document_file(osv.osv):
_inherit = 'ir.attachment'
def _get_filestore(self, cr):
return os.path.join(tools.config['root_path'], 'filestore', cr.dbname)
def _data_get(self, cr, uid, ids, name, arg, context):
result = {}
cr.execute('select id,store_fname,link from ir_attachment where id in ('+','.join(map(str,ids))+')')
for id,r,l in cr.fetchall():
try:
value = file(os.path.join(self._get_filestore(cr), r), 'rb').read()
result[id] = base64.encodestring(value)
except:
result[id]=''
if context.get('bin_size', False):
result[id] = tools.human_size(len(result[id]))
return result
#
# This code can be improved
#
def _data_set(self, cr, obj, id, name, value, uid=None, context={}):
if not value:
return True
#if (not context) or context.get('store_method','fs')=='fs':
try:
path = self._get_filestore(cr)
if not os.path.isdir(path):
try:
os.makedirs(path)
except:
raise except_orm(_('Permission Denied !'), _('You do not permissions to write on the server side.'))
flag = None
# This can be improved
for dirs in os.listdir(path):
if os.path.isdir(os.path.join(path,dirs)) and len(os.listdir(os.path.join(path,dirs)))<4000:
flag = dirs
break
flag = flag or create_directory(path)
filename = random_name()
fname = os.path.join(path, flag, filename)
fp = file(fname,'wb')
v = base64.decodestring(value)
fp.write(v)
filesize = os.stat(fname).st_size
cr.execute('update ir_attachment set store_fname=%s,store_method=%s,file_size=%s where id=%s', (os.path.join(flag,filename),'fs',len(v),id))
return True
except Exception,e :
raise except_orm(_('Error!'), str(e))
_columns = {
'user_id': fields.many2one('res.users', 'Owner', select=1),
'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
'parent_id': fields.many2one('document.directory', 'Directory', select=1),
'file_size': fields.integer('File Size', required=True),
'file_type': fields.char('Content Type', size=32),
'index_content': fields.text('Indexed Content'),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'store_method': fields.selection([('db','Database'),('fs','Filesystem'),('link','Link')], "Storing Method"),
'datas': fields.function(_data_get,method=True,fnct_inv=_data_set,string='File Content',type="binary"),
'store_fname': fields.char('Stored Filename', size=200),
'res_model': fields.char('Attached Model', size=64), #res_model
'res_id': fields.integer('Attached ID'), #res_id
'partner_id':fields.many2one('res.partner', 'Partner', select=1),
'title': fields.char('Resource Title',size=64),
}
_defaults = {
'user_id': lambda self,cr,uid,ctx:uid,
'file_size': lambda self,cr,uid,ctx:0,
'store_method': lambda *args: 'db'
}
_sql_constraints = [
('filename_uniq', 'unique (name,parent_id,res_id,res_model)', 'The file name must be unique !')
]
def _check_duplication(self, cr, uid,vals,ids=[],op='create'):
name=vals.get('name',False)
parent_id=vals.get('parent_id',False)
res_model=vals.get('res_model',False)
res_id=vals.get('res_id',0)
if op=='write':
for file in self.browse(cr,uid,ids):
if not name:
name=file.name
if not parent_id:
parent_id=file.parent_id and file.parent_id.id or False
if not res_model:
res_model=file.res_model and file.res_model or False
if not res_id:
res_id=file.res_id and file.res_id or 0
res=self.search(cr,uid,[('id','<>',file.id),('name','=',name),('parent_id','=',parent_id),('res_model','=',res_model),('res_id','=',res_id)])
if len(res):
return False
if op=='create':
res=self.search(cr,uid,[('name','=',name),('parent_id','=',parent_id),('res_id','=',res_id),('res_model','=',res_model)])
if len(res):
return False
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default ={}
name = self.read(cr, uid, [id])[0]['name']
default.update({'name': name+ " (copy)"})
return super(document_file,self).copy(cr,uid,id,default,context)
def write(self, cr, uid, ids, vals, context=None):
res=self.search(cr,uid,[('id','in',ids)])
if not len(res):
return False
if not self._check_duplication(cr,uid,vals,ids,'write'):
raise except_orm(_('ValidateError'), _('File name must be unique!'))
result = super(document_file,self).write(cr,uid,ids,vals,context=context)
cr.commit()
try:
for f in self.browse(cr, uid, ids, context=context):
#if 'datas' not in vals:
# vals['datas']=f.datas
res = content_index(base64.decodestring(vals['datas']), f.datas_fname, f.file_type or None)
super(document_file,self).write(cr, uid, ids, {
'index_content': res
})
cr.commit()
except:
pass
return result
def create(self, cr, uid, vals, context={}):
vals['title']=vals['name']
vals['parent_id'] = context.get('parent_id',False) or vals.get('parent_id',False)
if not vals.get('res_id', False) and context.get('default_res_id',False):
vals['res_id']=context.get('default_res_id',False)
if not vals.get('res_model', False) and context.get('default_res_model',False):
vals['res_model']=context.get('default_res_model',False)
if vals.get('res_id', False) and vals.get('res_model',False):
obj_model=self.pool.get(vals['res_model'])
result = obj_model.read(cr, uid, [vals['res_id']], context=context)
if len(result):
obj=result[0]
if obj.get('name',False):
vals['title'] = (obj.get('name',''))[:60]
if obj_model._name=='res.partner':
vals['partner_id']=obj['id']
elif obj.get('address_id',False):
if isinstance(obj['address_id'],tuple) or isinstance(obj['address_id'],list):
address_id=obj['address_id'][0]
else:
address_id=obj['address_id']
address=self.pool.get('res.partner.address').read(cr,uid,[address_id],context=context)
if len(address):
vals['partner_id']=address[0]['partner_id'][0] or False
elif obj.get('partner_id',False):
if isinstance(obj['partner_id'],tuple) or isinstance(obj['partner_id'],list):
vals['partner_id']=obj['partner_id'][0]
else:
vals['partner_id']=obj['partner_id']
datas=None
if vals.get('link',False) :
import urllib
datas=base64.encodestring(urllib.urlopen(vals['link']).read())
else:
datas=vals.get('datas',False)
vals['file_size']= len(datas)
if not self._check_duplication(cr,uid,vals):
raise except_orm(_('ValidateError'), _('File name must be unique!'))
result = super(document_file,self).create(cr, uid, vals, context)
cr.commit()
try:
res = content_index(base64.decodestring(datas), vals['datas_fname'], vals.get('content_type', None))
super(document_file,self).write(cr, uid, [result], {
'index_content': res,
})
cr.commit()
except:
pass
return result
def unlink(self,cr, uid, ids, context={}):
for f in self.browse(cr, uid, ids, context):
#if f.store_method=='fs':
try:
os.unlink(os.path.join(self._get_filestore(cr), f.store_fname))
except:
pass
return super(document_file, self).unlink(cr, uid, ids, context)
document_file()
class document_configuration_wizard(osv.osv_memory):
_name='document.configuration.wizard'
_rec_name = 'Auto Directory configuration'
_columns = {
'host': fields.char('Server Address', size=64, help="Put here the server address or IP. " \
"Keep localhost if you don't know what to write.", required=True)
}
def detect_ip_addr(self, cr, uid, context=None):
def _detect_ip_addr(self, cr, uid, context=None):
from array import array
import socket
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
ifaces = [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr
try:
ip_addr = _detect_ip_addr(self, cr, uid, context)
except:
ip_addr = 'localhost'
return ip_addr
_defaults = {
'host': detect_ip_addr,
}
def action_cancel(self,cr,uid,ids,conect=None):
return {
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.actions.configuration.wizard',
'type': 'ir.actions.act_window',
'target':'new',
}
def action_config(self, cr, uid, ids, context=None):
conf = self.browse(cr, uid, ids[0], context)
obj=self.pool.get('document.directory')
objid=self.pool.get('ir.model.data')
if self.pool.get('sale.order'):
id = objid._get_id(cr, uid, 'document', 'dir_sale_order_all')
id = objid.browse(cr, uid, id, context=context).res_id
mid = self.pool.get('ir.model').search(cr, uid, [('model','=','sale.order')])
obj.write(cr, uid, [id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': '[]',
})
aid = objid._get_id(cr, uid, 'sale', 'report_sale_order')
aid = objid.browse(cr, uid, aid, context=context).res_id
self.pool.get('document.directory.content').create(cr, uid, {
'name': "Print Order",
'suffix': "_print",
'report_id': aid,
'extension': '.pdf',
'include_name': 1,
'directory_id': id,
})
id = objid._get_id(cr, uid, 'document', 'dir_sale_order_quote')
id = objid.browse(cr, uid, id, context=context).res_id
obj.write(cr, uid, [id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': "[('state','=','draft')]",
})
if self.pool.get('product.product'):
id = objid._get_id(cr, uid, 'document', 'dir_product')
id = objid.browse(cr, uid, id, context=context).res_id
mid = self.pool.get('ir.model').search(cr, uid, [('model','=','product.product')])
obj.write(cr, uid, [id], {
'type':'ressource',
'ressource_type_id': mid[0],
})
if self.pool.get('stock.location'):
aid = objid._get_id(cr, uid, 'stock', 'report_product_history')
aid = objid.browse(cr, uid, aid, context=context).res_id
self.pool.get('document.directory.content').create(cr, uid, {
'name': "Product Stock",
'suffix': "_stock_forecast",
'report_id': aid,
'extension': '.pdf',
'include_name': 1,
'directory_id': id,
})
if self.pool.get('account.analytic.account'):
id = objid._get_id(cr, uid, 'document', 'dir_project')
id = objid.browse(cr, uid, id, context=context).res_id
mid = self.pool.get('ir.model').search(cr, uid, [('model','=','account.analytic.account')])
obj.write(cr, uid, [id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': '[]',
'ressource_tree': 1
})
aid = objid._get_id(cr, uid, 'document', 'action_document_browse')
aid = objid.browse(cr, uid, aid, context=context).res_id
self.pool.get('ir.actions.url').write(cr, uid, [aid], {'url': 'ftp://'+(conf.host or 'localhost')+':8021/'})
return {
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.actions.configuration.wizard',
'type': 'ir.actions.act_window',
'target': 'new',
}
document_configuration_wizard()
|
import sys
if 'gevent' in sys.modules:
from gevent import monkey
monkey.patch_all()
from biicode.server.rest.rest_api_server import RestApiServer
from biicode.server.conf import BII_MONGO_URI, BII_MEMCACHE_SERVERS,\
BII_MEMCACHE_USERNAME, BII_MEMCACHE_PASSWORD, BII_MAX_MONGO_POOL_SIZE
from biicode.server.store.mongo_server_store import MongoServerStore
from biicode.server.store.mongo_store import MongoStore
store = MongoServerStore(MongoStore.makeConnection(BII_MONGO_URI,
max_pool_size=BII_MAX_MONGO_POOL_SIZE))
if BII_MEMCACHE_SERVERS:
from biicode.server.store.memcache_proxy_store import MemCacheProxyStore
import pylibmc
client = pylibmc.Client(servers=[BII_MEMCACHE_SERVERS],
username=BII_MEMCACHE_USERNAME,
password=BII_MEMCACHE_PASSWORD,
binary=True)
proxy = MemCacheProxyStore(store, client)
else:
proxy = store
# Run with: gunicorn -b 0.0.0.0:9000 -k gevent_pywsgi biicode.server.rest.production_server:app
ra = RestApiServer(proxy)
app = ra.root_app
Disabled gevent
import sys
from biicode.server.rest.rest_api_server import RestApiServer
from biicode.server.conf import BII_MONGO_URI, BII_MEMCACHE_SERVERS,\
BII_MEMCACHE_USERNAME, BII_MEMCACHE_PASSWORD, BII_MAX_MONGO_POOL_SIZE
from biicode.server.store.mongo_server_store import MongoServerStore
from biicode.server.store.mongo_store import MongoStore
store = MongoServerStore(MongoStore.makeConnection(BII_MONGO_URI,
max_pool_size=BII_MAX_MONGO_POOL_SIZE))
if BII_MEMCACHE_SERVERS:
from biicode.server.store.memcache_proxy_store import MemCacheProxyStore
import pylibmc
client = pylibmc.Client(servers=[BII_MEMCACHE_SERVERS],
username=BII_MEMCACHE_USERNAME,
password=BII_MEMCACHE_PASSWORD,
binary=True)
proxy = MemCacheProxyStore(store, client)
else:
proxy = store
# Run with: gunicorn -b 0.0.0.0:9000 -k gevent_pywsgi biicode.server.rest.production_server:app
ra = RestApiServer(proxy)
app = ra.root_app
|
__version__ = "3.18.0"
class ConfigError(Exception):
"""Raised when the configuration of a tool contains some invalid values."""
rc = 100 # sys.exit(rc)
class CustomDatabaseNotFound(Exception):
"""Raised when the InputLocator can't find a user-provided database (region=='custom')"""
rc = 101 # sys.exit(rc)
class ScriptNotFoundException(Exception):
"""Raised when an invalid script name is used."""
rc = 102 # sys.exit(rc)
class MissingInputDataException(Exception):
"""Raised when a script can't run because some information is missing"""
rc = 103
class InvalidOccupancyNameException(Exception):
"""Raised when the occupancy.dbf has an invalid / unknown occupancy column"""
rc = 104
def suppress_3rd_party_debug_loggers():
"""set logging level to WARN for fiona and shapely and others"""
import logging
loggers_to_silence = ["shapely", "Fiona", "fiona", "matplotlib", "urllib3.connectionpool",
"numba.core.ssa"]
for log_name in loggers_to_silence:
log = logging.getLogger(log_name)
log.setLevel(logging.ERROR)
Update __init__.py version
__version__ = "3.22.0"
class ConfigError(Exception):
"""Raised when the configuration of a tool contains some invalid values."""
rc = 100 # sys.exit(rc)
class CustomDatabaseNotFound(Exception):
"""Raised when the InputLocator can't find a user-provided database (region=='custom')"""
rc = 101 # sys.exit(rc)
class ScriptNotFoundException(Exception):
"""Raised when an invalid script name is used."""
rc = 102 # sys.exit(rc)
class MissingInputDataException(Exception):
"""Raised when a script can't run because some information is missing"""
rc = 103
class InvalidOccupancyNameException(Exception):
"""Raised when the occupancy.dbf has an invalid / unknown occupancy column"""
rc = 104
def suppress_3rd_party_debug_loggers():
"""set logging level to WARN for fiona and shapely and others"""
import logging
loggers_to_silence = ["shapely", "Fiona", "fiona", "matplotlib", "urllib3.connectionpool",
"numba.core.ssa"]
for log_name in loggers_to_silence:
log = logging.getLogger(log_name)
log.setLevel(logging.ERROR)
|
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon18.py reads data from an Arduino running the cmdMULTIsens sketch from
# https://github.com/Mausy5043/arduino.git.
import sys, time, math, commands
from libdaemon import Daemon
import serial, re
from urllib2 import Request, urlopen
from bs4 import BeautifulSoup
class MyDaemon(Daemon):
def run(self):
sampleptr = 0
samples = 5*5
datapoints = 11
data = [[None]*datapoints for _ in range(samples)]
sampleTime = 12
cycleTime = samples * sampleTime
# sync to whole minute
waitTime = (cycleTime + sampleTime) - (time.time() % cycleTime)
time.sleep(waitTime)
while True:
startTime=time.time()
result = do_work().split(',')
if (sampleptr == 5):
extern_result = do_extern_work().split(',')
extern_data = map(float, extern_result)
data[sampleptr] = map(float, result)
# report sample average
sampleptr = sampleptr + 1
if (sampleptr == samples):
somma = map(sum,zip(*data))
averages = [format(s / samples, '.3f') for s in somma]
extern_data[2] = do_calc_windchill(averages[1], avg_ext[0])
avg_ext = [format(s, '.3f') for s in extern_data]
do_report(averages, avg_ext)
sampleptr = 0
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
while waitTime <= 0:
waitTime = waitTime + sampleTime
time.sleep(waitTime)
def gettelegram(cmd):
# flag used to exit the while-loop
abort = 0
# countdown counter used to prevent infinite loops
loops2go = 10
#
telegram = "NaN";
while abort == 0:
try:
port.write(cmd)
line = port.readline()
except:
# read error, terminate prematurely
abort = 2
if line != "":
line = line.strip().split()
if line[0] == cmd:
if line[-1] == "!":
telegram = ""
for item in range(1,len(line)-1):
telegram = telegram + ' {0}'.format(line[item])
abort = 1
loops2go = loops2go - 1
if loops2go < 0:
abort = 3
# Return codes:
# abort == 1 indicates a successful read
# abort == 2 means that a serial port read/write error occurred
# abort == 3 no valid data after several attempts
return (telegram, abort)
def do_work():
# 12 datapoints gathered here
telegram, status = gettelegram("A")
#print telegram
if (status != 1):
telegram = -1
return telegram
def do_extern_work():
req = Request("http://xml.buienradar.nl/")
response = urlopen(req)
output = response.read()
soup = BeautifulSoup(output)
MSwind = str(soup.buienradarnl.weergegevens.actueel_weer.weerstations.find(id=6350).windsnelheidms)
GRwind = str(soup.buienradarnl.weergegevens.actueel_weer.weerstations.find(id=6350).windrichtinggr)
datum = str(soup.buienradarnl.weergegevens.actueel_weer.weerstations.find(id=6350).datum)
ms = MSwind.replace("<"," ").replace(">"," ").split()[1]
gr = GRwind.replace("<"," ").replace(">"," ").split()[1]
dt = datum.replace("<"," ").replace(">"," ").split()
#print '{0} {1}, {2}, {3}'.format(dt[1], dt[2], ms, gr)
gilzerijen = '{0}, {1}'.format(ms, gr)
return gilzerijen
def calc_windchill(T,W):
# use this data to determine the windchill temperature acc. JAG/TI
# ref.: http://knmi.nl/bibliotheek/knmipubTR/TR309.pdf
JagTi = 13.12 + 0.6215 * T - 11.37 * (W * 3.6)**0.16 + 0.3965 * T * (W * 3.6)**0.16
return JagTi
def do_report(result, ext_result):
# Get the time and date in human-readable form and UN*X-epoch...
#outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
outDate = commands.getoutput("date '+%F %H:%M:%S'")
result = ', '.join(map(str, result))
ext_result = ', '.join(map(str, ext_result))
f = file('/tmp/testser.txt', 'a')
f.write('{0}, {1}, {2}\n'.format(outDate, result, ext_result) )
f.close()
return
if __name__ == "__main__":
port = serial.Serial('/dev/ttyACM0', 9600, timeout=10)
serial.dsrdtr = False
time.sleep(0.5)
daemon = MyDaemon('/tmp/raspdiagd-18.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
20150502.1032
windchill temperatures are always lower than or equal to dry bulb
temperatures
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon18.py reads data from an Arduino running the cmdMULTIsens sketch from
# https://github.com/Mausy5043/arduino.git.
import sys, time, math, commands
from libdaemon import Daemon
import serial, re
from urllib2 import Request, urlopen
from bs4 import BeautifulSoup
class MyDaemon(Daemon):
def run(self):
sampleptr = 0
samples = 5*5
datapoints = 11
data = [[None]*datapoints for _ in range(samples)]
sampleTime = 12
cycleTime = samples * sampleTime
# sync to whole minute
waitTime = (cycleTime + sampleTime) - (time.time() % cycleTime)
time.sleep(waitTime)
while True:
startTime=time.time()
result = do_work().split(',')
if (sampleptr == 5):
extern_result = do_extern_work().split(',')
extern_data = map(float, extern_result)
data[sampleptr] = map(float, result)
# report sample average
sampleptr = sampleptr + 1
if (sampleptr == samples):
somma = map(sum,zip(*data))
averages = [format(s / samples, '.3f') for s in somma]
extern_data[2] = do_calc_windchill(averages[1], avg_ext[0])
avg_ext = [format(s, '.3f') for s in extern_data]
do_report(averages, avg_ext)
sampleptr = 0
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
while waitTime <= 0:
waitTime = waitTime + sampleTime
time.sleep(waitTime)
def gettelegram(cmd):
# flag used to exit the while-loop
abort = 0
# countdown counter used to prevent infinite loops
loops2go = 10
#
telegram = "NaN";
while abort == 0:
try:
port.write(cmd)
line = port.readline()
except:
# read error, terminate prematurely
abort = 2
if line != "":
line = line.strip().split()
if line[0] == cmd:
if line[-1] == "!":
telegram = ""
for item in range(1,len(line)-1):
telegram = telegram + ' {0}'.format(line[item])
abort = 1
loops2go = loops2go - 1
if loops2go < 0:
abort = 3
# Return codes:
# abort == 1 indicates a successful read
# abort == 2 means that a serial port read/write error occurred
# abort == 3 no valid data after several attempts
return (telegram, abort)
def do_work():
# 12 datapoints gathered here
telegram, status = gettelegram("A")
#print telegram
if (status != 1):
telegram = -1
return telegram
def do_extern_work():
req = Request("http://xml.buienradar.nl/")
response = urlopen(req)
output = response.read()
soup = BeautifulSoup(output)
MSwind = str(soup.buienradarnl.weergegevens.actueel_weer.weerstations.find(id=6350).windsnelheidms)
GRwind = str(soup.buienradarnl.weergegevens.actueel_weer.weerstations.find(id=6350).windrichtinggr)
datum = str(soup.buienradarnl.weergegevens.actueel_weer.weerstations.find(id=6350).datum)
ms = MSwind.replace("<"," ").replace(">"," ").split()[1]
gr = GRwind.replace("<"," ").replace(">"," ").split()[1]
dt = datum.replace("<"," ").replace(">"," ").split()
#print '{0} {1}, {2}, {3}'.format(dt[1], dt[2], ms, gr)
gilzerijen = '{0}, {1}'.format(ms, gr)
return gilzerijen
def calc_windchill(T,W):
# use this data to determine the windchill temperature acc. JAG/TI
# ref.: http://knmi.nl/bibliotheek/knmipubTR/TR309.pdf
JagTi = 13.12 + 0.6215 * T - 11.37 * (W * 3.6)**0.16 + 0.3965 * T * (W * 3.6)**0.16
if (JagTi > T):
JagTi = T
return JagTi
def do_report(result, ext_result):
# Get the time and date in human-readable form and UN*X-epoch...
#outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
outDate = commands.getoutput("date '+%F %H:%M:%S'")
result = ', '.join(map(str, result))
ext_result = ', '.join(map(str, ext_result))
f = file('/tmp/testser.txt', 'a')
f.write('{0}, {1}, {2}\n'.format(outDate, result, ext_result) )
f.close()
return
if __name__ == "__main__":
port = serial.Serial('/dev/ttyACM0', 9600, timeout=10)
serial.dsrdtr = False
time.sleep(0.5)
daemon = MyDaemon('/tmp/raspdiagd-18.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
|
from .files import (reader, tokens, nopen, header, is_newer_b,
basestring, int_types)
import sys
from .pool import pool, pmap
try:
from optimize import shedskinner
except ImportError:
pass
from itertools import groupby as igroupby
from operator import itemgetter
__version__ = "0.3.9"
def groupby(iterable, key=0, filter=None):
"""
wrapper to itertools.groupby that returns a list of each group, rather
than a generator and accepts integers or strings as the key and
automatically converts them to callables with itemgetter(key)
Arguments:
iterable: iterable
key: string, int or callable that tells how to group
Returns:
an iterable where each item is the key and a *list* of that
group. (itertools.groupby returns a generator of that group).
e.g. groupby(iterable, 0)
"""
if isinstance(key, (basestring, int)):
key = itemgetter(key)
elif isinstance(key, (tuple, list)):
key = itemgetter(*key)
for label, grp in igroupby(iterable, key):
yield label, list(grp)
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
def groups_of(n, iterable):
"""
>>> groups_of(2, range(5))
"""
args = [iter(iterable)] * n
for x in izip_longest(*args):
yield [v for v in x if v is not None]
def main():
#import argparse
#p = argparse.ArgumentParser(__doc__)
print("main")
pass
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
add toolshed fmt2header
from .files import (reader, tokens, nopen, header, is_newer_b,
basestring, int_types)
import sys
from .pool import pool, pmap
from .fmt import fmt2header
try:
from optimize import shedskinner
except ImportError:
pass
from itertools import groupby as igroupby
from operator import itemgetter
__version__ = "0.4.0"
def groupby(iterable, key=0, filter=None):
"""
wrapper to itertools.groupby that returns a list of each group, rather
than a generator and accepts integers or strings as the key and
automatically converts them to callables with itemgetter(key)
Arguments:
iterable: iterable
key: string, int or callable that tells how to group
Returns:
an iterable where each item is the key and a *list* of that
group. (itertools.groupby returns a generator of that group).
e.g. groupby(iterable, 0)
"""
if isinstance(key, (basestring, int)):
key = itemgetter(key)
elif isinstance(key, (tuple, list)):
key = itemgetter(*key)
for label, grp in igroupby(iterable, key):
yield label, list(grp)
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
def groups_of(n, iterable):
"""
>>> groups_of(2, range(5))
"""
args = [iter(iterable)] * n
for x in izip_longest(*args):
yield [v for v in x if v is not None]
def main():
#import argparse
#p = argparse.ArgumentParser(__doc__)
print("main")
pass
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@pexego.es>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp import api
class sale_order(orm.Model):
_inherit = 'sale.order'
_columns = {
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('reserve', 'Reserved'),
('waiting_date', 'Waiting Schedule'),
('wait_risk', 'Waiting Risk Approval'),
('risk_approval', 'Risk & VIES approval'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, copy=False, help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
}
def action_risk_approval(self, cr, uid, ids, context=None):
order = self.browse(cr, uid, ids[0], context)
view_form = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale_custom', 'sale_confirm_wizard_form_wizard')
wzd = self.pool('sale.confirm.wizard').create(cr, uid, {})
import ipdb
ipdb.set_trace()
if not order.is_all_reserved and 'confirmed' not in context:
return {'name': "Sale confirm",
'view_mode': 'form',
'view_type': 'form',
'res_model': 'sale.confirm.wizard',
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': wzd,
'views': [(view_form[1], 'form')]
}
else:
self.apply_promotions(cr, uid, ids, context)
self.write(cr, uid, ids, {'state': 'risk_approval'}, context)
self.action_button_confirm(cr, uid, ids, context)
return True
[FIX] 'partner_risk__stock_reserve__rel': Eliminado debug
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@pexego.es>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp import api
class sale_order(orm.Model):
_inherit = 'sale.order'
_columns = {
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('reserve', 'Reserved'),
('waiting_date', 'Waiting Schedule'),
('wait_risk', 'Waiting Risk Approval'),
('risk_approval', 'Risk & VIES approval'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, copy=False, help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
}
def action_risk_approval(self, cr, uid, ids, context=None):
order = self.browse(cr, uid, ids[0], context)
view_form = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale_custom', 'sale_confirm_wizard_form_wizard')
wzd = self.pool('sale.confirm.wizard').create(cr, uid, {})
if not order.is_all_reserved and 'confirmed' not in context:
return {'name': "Sale confirm",
'view_mode': 'form',
'view_type': 'form',
'res_model': 'sale.confirm.wizard',
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': wzd,
'views': [(view_form[1], 'form')]
}
else:
self.apply_promotions(cr, uid, ids, context)
self.write(cr, uid, ids, {'state': 'risk_approval'}, context)
self.action_button_confirm(cr, uid, ids, context)
return True
|
added recursive algorithm for merge sort
def merge(left, right):
"""Merge sort merging function."""
left_index, right_index = 0, 0
result = []
while left_index < len(left) and right_index < len(right):
if left[left_index] < right[right_index]:
result.append(left[left_index])
left_index += 1
else:
result.append(right[right_index])
right_index += 1
result += left[left_index:]
result += right[right_index:]
return result
def merge_sort(array):
"""Merge sort algorithm implementation."""
if len(array) <= 1: # base case
return array
# divide array in half and merge sort recursively
half = len(array) // 2
left = merge_sort(array[:half])
right = merge_sort(array[half:])
return merge(left, right)
|
from __future__ import print_function
import farmfs
from farmfs import getvol
from docopt import docopt
from functools import partial
from farmfs import cwd
from farmfs.util import empty_default, fmap, ffilter, pipeline, concat, identify, uncurry, count, groupby, consume, concatMap, zipFrom, safetype, ingest, first, maybe, every, identity, repeater, uniq, compose, pfmap
from farmfs.volume import mkfs, tree_diff, tree_patcher, encode_snapshot
from farmfs.fs import Path, userPath2Path, ftype_selector, FILE, LINK, skip_ignored, ensure_symlink, walk
from json import JSONEncoder
from s3lib.ui import load_creds as load_s3_creds
import sys
from farmfs.blobstore import S3Blobstore
from tqdm import tqdm
try:
from itertools import ifilter
except ImportError:
# On python3, filter is lazy.
ifilter = filter
try:
from itertools import imap
except ImportError:
# On python3 map is lazy.
imap = map
json_encode = lambda data: JSONEncoder(ensure_ascii=False, sort_keys=True).encode(data)
json_printr = pipeline(list, json_encode, print)
strs_printr = pipeline(fmap(print), consume)
def dict_printr(keys, d):
print("\t".join([ingest(d.get(k, '')) for k in keys]))
def dicts_printr(keys):
return pipeline(fmap(partial(dict_printr, keys)), consume)
snapshot_printr = dicts_printr(['path', 'type', 'csum'])
UI_USAGE = \
"""
FarmFS
Usage:
farmfs mkfs [--root <root>] [--data <data>]
farmfs (status|freeze|thaw) [<path>...]
farmfs snap list
farmfs snap (make|read|delete|restore|diff) <snap>
farmfs fsck [--broken --frozen-ignored --blob-permissions --checksums]
farmfs count
farmfs similarity
farmfs gc [--noop]
farmfs remote add <remote> <root>
farmfs remote remove <remote>
farmfs remote list [<remote>]
farmfs pull <remote> [<snap>]
farmfs diff <remote> [<snap>]
Options:
"""
def op_doer(op):
(blob_op, tree_op, desc) = op
blob_op()
tree_op()
stream_op_doer = fmap(op_doer)
def fsck_missing_blobs(vol, cwd):
'''Look for blobs in tree or snaps which are not in blobstore.'''
trees = vol.trees()
tree_items = concatMap(lambda t: zipFrom(t,iter(t)))
tree_links = ffilter(uncurry(lambda snap, item: item.is_link()))
broken_tree_links = partial(
ifilter,
uncurry(lambda snap, item: not vol.bs.exists(item.csum())))
checksum_grouper = partial(groupby,
uncurry(lambda snap, item: item.csum()))
def broken_link_printr(csum, snap_items):
print(csum)
for (snap, item) in snap_items:
print( '',
snap.name,
item.to_path(vol.root).relative_to(cwd),
sep='\t')
broken_links_printr = fmap(identify(uncurry(broken_link_printr)))
num_bad_blobs = pipeline(
tree_items,
tree_links,
broken_tree_links,
checksum_grouper,
broken_links_printr,
count)(trees)
return num_bad_blobs
def fsck_frozen_ignored(vol, cwd):
'''Look for frozen links which are in the ignored file.'''
#TODO some of this logic could be moved to volume. Which files are members of the volume is a function of the volume.
ignore_mdd = partial(skip_ignored, [safetype(vol.mdd)])
ignored_frozen = pipeline(
ftype_selector([LINK]),
ffilter(uncurry(vol.is_ignored)),
fmap(first),
fmap(lambda p: p.relative_to(cwd)),
fmap(partial(print, "Ignored file frozen")),
count
)(walk(vol.root, skip=ignore_mdd))
return ignored_frozen
def fsck_blob_permissions(vol, cwd):
'''Look for blobstore blobs which are not readonly.'''
blob_permissions = pipeline(
ffilter(vol.bs.verify_blob_permissions),
fmap(partial(print, "writable blob: ")),
count
)(vol.bs.blobs())
return blob_permissions
def fsck_checksum_mismatches(vol, cwd):
'''Look for checksum mismatches.'''
#TODO CORRUPTION checksum mismatch in blob <CSUM>, would be nice to know back references.
mismatches = pipeline(
pfmap(lambda blob: (blob, vol.bs.blob_checksum(blob))),
ffilter(lambda blob_csum: blob_csum[0] != blob_csum[1]),
fmap(first),
fmap(lambda csum: print("CORRUPTION checksum mismatch in blob %s" % csum)),
count
)(vol.bs.blobs())
return mismatches
def ui_main():
result = farmfs_ui(sys.argv[1:], cwd)
exit(result)
def farmfs_ui(argv, cwd):
exitcode = 0
args = docopt(UI_USAGE, argv)
if args['mkfs']:
root = userPath2Path(args['<root>'] or ".", cwd)
data = userPath2Path(args['<data>'], cwd) if args.get('<data>') else Path(".farmfs/userdata", root)
mkfs(root, data)
print("FileSystem Created %s using blobstore %s" % (root, data))
else:
vol = getvol(cwd)
paths = empty_default(map(lambda x: userPath2Path(x, cwd), args['<path>']), [vol.root])
def delta_printr(delta):
deltaPath = delta.path(vol.root).relative_to(cwd)
print("diff: %s %s %s" % (delta.mode, deltaPath, delta.csum))
stream_delta_printr = fmap(identify(delta_printr))
def op_printr(op):
(blob_op, tree_op, (desc, path)) = op
print(desc % path.relative_to(cwd))
stream_op_printr = fmap(identify(op_printr))
if args['status']:
get_thawed = fmap(vol.thawed)
pipeline(get_thawed,
concat,
fmap(lambda p: p.relative_to(cwd)),
fmap(print),
consume)(paths)
elif args['freeze']:
def printr(freeze_op):
s = "Imported %s with checksum %s" % \
(freeze_op['path'].relative_to(cwd),
freeze_op['csum'])
if freeze_op['was_dup']:
print(s, "was a duplicate")
else:
print(s)
importer = fmap(vol.freeze)
get_thawed = fmap(vol.thawed)
print_list = fmap(printr)
pipeline(get_thawed, concat, importer, print_list, consume)(paths)
elif args['thaw']:
def printr(path):
print("Exported %s" % path.relative_to(cwd))
exporter = fmap(vol.thaw)
get_frozen = fmap(vol.frozen)
print_list = fmap(printr)
pipeline(get_frozen, concat, exporter, print_list, consume)(paths)
elif args['fsck']:
fsck_actions = {
'--broken': (fsck_missing_blobs, 1),
'--frozen-ignored': (fsck_frozen_ignored, 4),
'--blob-permissions': (fsck_blob_permissions, 8),
'--checksums': (fsck_checksum_mismatches, 2),
}
fsck_tasks = [action for (verb, action) in fsck_actions.items() if args[verb]]
if len(fsck_tasks) == 0:
# No options were specified, run the whole suite.
fsck_tasks = fsck_actions.values()
for foo, fail_code in fsck_tasks:
exitcode = exitcode | (foo(vol, cwd) and fail_code)
elif args['count']:
trees = vol.trees()
tree_items = concatMap(lambda t: zipFrom(t,iter(t)))
tree_links = ffilter(uncurry(lambda snap, item: item.is_link()))
checksum_grouper = partial(groupby,
uncurry(lambda snap, item: item.csum()))
def count_printr(csum, snap_items):
print(csum, count(snap_items))
for (snap, item) in snap_items:
print(snap.name, item.to_path(vol.root).relative_to(cwd))
counts_printr = fmap(identify(uncurry(count_printr)))
pipeline(
tree_items,
tree_links,
checksum_grouper,
counts_printr,
consume
)(trees)
elif args['similarity']:
for (dir_a, count_a, dir_b, count_b, intersect) in vol.similarity():
assert isinstance(dir_a, Path)
assert isinstance(dir_b, Path)
path_a = dir_a.relative_to(cwd)
path_b = dir_b.relative_to(cwd)
print(path_a, "%d/%d %d%%" % (intersect, count_a, int(100*float(intersect)/count_a)), \
path_b, "%d/%d %d%%" % (intersect, count_b, int(100*float(intersect)/count_b)))
elif args['gc']:
applyfn = fmap(identity) if args.get('--noop') else fmap(vol.bs.delete_blob)
fns = [fmap(identify(partial(print, "Removing"))),
applyfn,
consume]
pipeline(*fns)(sorted(vol.unused_blobs(vol.items())))
elif args['snap']:
snapdb = vol.snapdb
if args['list']:
#TODO have an optional argument for which remote.
print("\n".join(snapdb.list()))
else:
name = args['<snap>']
if args['delete']:
snapdb.delete(name)
elif args['make']:
snapdb.write(name, vol.tree())
else:
snap = snapdb.read(name)
if args['read']:
for i in snap:
print(i)
elif args['restore']:
tree = vol.tree()
diff = tree_diff(vol.tree(), snap)
pipeline(
stream_delta_printr,
tree_patcher(vol, vol),
stream_op_printr,
stream_op_doer,
consume)(diff)
elif args['diff']:
diff = tree_diff(vol.tree(), snap)
pipeline(stream_delta_printr, consume)(diff)
elif args['remote']:
if args["add"]:
remote_vol = getvol(userPath2Path(args['<root>'], cwd))
vol.remotedb.write(args['<remote>'], remote_vol)
elif args["remove"]:
vol.remotedb.delete(args['<remote>'])
elif args["list"]:
if args["<remote>"]:
remote_vol = vol.remotedb.read(args['<remote>'])
print("\n".join(remote_vol.snapdb.list()))
else:
for remote_name in vol.remotedb.list():
remote_vol = vol.remotedb.read(remote_name)
print(remote_name, remote_vol.root)
elif args['pull'] or args['diff']:
remote_vol = vol.remotedb.read(args['<remote>'])
snap_name = args['<snap>']
remote_snap = remote_vol.snapdb.read(snap_name) if snap_name else remote_vol.tree()
diff = tree_diff(vol.tree(), remote_snap)
if args['pull']:
patcher = tree_patcher(vol, remote_vol)
pipeline(
stream_delta_printr,
patcher,
stream_op_printr,
stream_op_doer,
consume)(diff)
else: # diff
pipeline(stream_delta_printr, consume)(diff)
return exitcode
def printNotNone(value):
if value is not None:
print(value)
def reverse(vol, csum):
"""Yields a set of paths which reference a given checksum_path name."""
DBG_USAGE = \
"""
FarmDBG
Usage:
farmdbg reverse [--snap=<snapshot>|--all] <csum>
farmdbg key read <key>
farmdbg key write <key> <value>
farmdbg key delete <key>
farmdbg key list [<key>]
farmdbg walk (keys|userdata|root|snap <snapshot>) [--json]
farmdbg checksum <path>...
farmdbg fix link [--remote=<remote>] <target> <file>
farmdbg rewrite-links <target>
farmdbg missing <snap>...
farmdbg blobtype <blob>...
farmdbg blob <blob>...
farmdbg s3 list <bucket> <prefix>
farmdbg s3 upload [--quiet] <bucket> <prefix>
farmdbg s3 check <bucket> <prefix>
"""
def dbg_main():
return dbg_ui(sys.argv[1:], cwd)
def dbg_ui(argv, cwd):
exitcode = 0
args = docopt(DBG_USAGE, argv)
vol = getvol(cwd)
if args['reverse']:
csum = args['<csum>']
if args['--all']:
trees = vol.trees()
elif args['--snap']:
trees = [vol.snapdb.read(args['--snap'])]
else:
trees = [vol.tree()]
tree_items = concatMap(lambda t: zipFrom(t,iter(t)))
tree_links = ffilter(uncurry(lambda snap, item: item.is_link()))
matching_links = ffilter(uncurry(lambda snap, item: item.csum() == csum))
def link_printr(snap_item):
(snap, item) = snap_item
print(snap.name, item.to_path(vol.root).relative_to(cwd))
links_printr = fmap(identify(link_printr))
pipeline(
tree_items,
tree_links,
matching_links,
links_printr,
consume)(trees)
elif args['key']:
db = vol.keydb
key = args['<key>']
if args['read']:
printNotNone(db.readraw(key))
elif args['delete']:
db.delete(key)
elif args['list']:
for v in db.list(key):
print(v)
elif args['write']:
value = args['<value>']
db.write(key, value)
elif args['walk']:
if args['root']:
printr = json_printr if args.get('--json') else snapshot_printr
printr(encode_snapshot(vol.tree()))
elif args['snap']:
#TODO could add a test for output encoding.
#TODO could add a test for snap format. Leading '/' on paths.
printr = json_printr if args.get('--json') else snapshot_printr
printr(encode_snapshot(vol.snapdb.read(args['<snapshot>'])))
elif args['userdata']:
blobs = vol.bs.blobs()
printr = json_printr if args.get('--json') else strs_printr
printr(blobs)
elif args['keys']:
printr = json_printr if args.get('--json') else strs_printr
printr(vol.keydb.list())
elif args['checksum']:
#TODO <checksum> <full path>
paths = empty_default(map(lambda x: Path(x, cwd), args['<path>']), [vol.root])
for p in paths:
print(p.checksum(), p.relative_to(cwd))
elif args['link']:
f = Path(args['<file>'], cwd)
b = ingest(args['<target>'])
if not vol.bs.exists(b):
print("blob %s doesn't exist" % b)
if args['--remote']:
remote = vol.remotedb.read(args['--remote'])
else:
raise(ValueError("aborting due to missing blob"))
vol.bs.fetch_blob(remote.bs, b)
else:
pass #b exists, can we check its checksum?
vol.bs.link_to_blob(f, b)
elif args['rewrite-links']:
target = Path(args['<target>'], cwd)
for item in vol.tree():
if not item.is_link():
continue
path = item.to_path(vol.root)
new = vol.repair_link(path)
if new is not None:
print("Relinked %s to %s" % (path.relative_to(cwd), new))
elif args['missing']:
tree_csums = pipeline(
ffilter(lambda item: item.is_link()),
fmap(lambda item: item.csum()),
set
)(iter(vol.tree()))
snapNames = args['<snap>']
def missing_printr(csum, pathStrs):
paths = sorted(imap(lambda pathStr: vol.root.join(pathStr), pathStrs))
for path in paths:
print("%s\t%s" % (csum, path.relative_to(cwd)))
missing_csum2pathStr = pipeline(
fmap(vol.snapdb.read),
concatMap(iter),
ffilter(lambda item: item.is_link()),
ffilter(lambda item: not vol.is_ignored(item.to_path(vol.root), None)),
ffilter(lambda item: item.csum() not in tree_csums),
partial(groupby, lambda item: item.csum()),
ffilter(uncurry(lambda csum, items: every(lambda item: not item.to_path(vol.root).exists(), items))),
fmap(uncurry(lambda csum, items: (csum, list(imap(lambda item: item.pathStr(), items))))),
fmap(uncurry(missing_printr)),
count
)(snapNames)
elif args['blobtype']:
for blob in args['<blob>']:
blob = ingest(blob)
#TODO here csum_to_path is really needed.
print(
blob,
maybe("unknown", vol.bs.csum_to_path(blob).filetype()))
elif args['blob']:
for csum in args['<blob>']:
csum = ingest(csum)
#TODO here csum_to_path is needed
print(csum,
vol.bs.csum_to_path(csum).relative_to(cwd))
elif args['s3']:
bucket = args['<bucket>']
prefix = args['<prefix>']
access_id, secret_key = load_s3_creds(None)
s3bs = S3Blobstore(bucket, prefix, access_id, secret_key)
if args['list']:
pipeline(fmap(print), consume)(s3bs.blobs()())
elif args['upload']:
quiet = args.get('--quiet')
print("Fetching remote blobs")
s3_blobs = set(tqdm(s3bs.blobs()(), disable=quiet, desc="Fetching remote blobs", smoothing=1.0, dynamic_ncols=True, maxinterval=1.0))
print("Remote Blobs: %s" % len(s3_blobs))
print("Fetching local blobs") #TODO we are looking at tree, so blobs in snaps won't be sent.
tree_blobs = set(tqdm(pipeline(
ffilter(lambda x: x.is_link()),
fmap(lambda x: x.csum()),
uniq,
)(iter(vol.tree())), disable=quiet, desc="Calculating local blobs", smoothing=1.0, dynamic_ncols=True, maxinterval=1.0))
print("Local Blobs: %s" % len(tree_blobs))
upload_blobs = tree_blobs - s3_blobs
print("Uploading %s blobs to s3" % len(upload_blobs))
with tqdm(desc="Uploading to S3", disable=quiet, total=len(upload_blobs), smoothing=1.0, dynamic_ncols=True, maxinterval=1.0) as pbar:
def update_pbar(blob):
pbar.update(1)
pbar.set_description("Uploaded %s" % blob)
def upload(blob):
s3bs.upload(blob, vol.bs.csum_to_path(blob))()
return blob
all_success = pipeline(
ffilter(lambda x: x not in s3_blobs),
pfmap(upload),
fmap(identify(update_pbar)),
partial(every, identity),
)(upload_blobs)
if all_success:
print("Successfully uploaded")
else:
print("Failed to upload")
exitcode = 1
elif args['check']:
num_corrupt_blobs = pipeline(
ffilter(lambda obj: obj['ETag'][1:-1] != obj['blob']),
fmap(identify(lambda obj: print(obj['blob'], obj['ETag'][1:-1]))),
count
)(s3bs.blob_stats()())
if num_corrupt_blobs == 0:
print("All S3 blobs etags match")
else:
exitcode = 2
return exitcode
use 2 workers for s3upload
from __future__ import print_function
import farmfs
from farmfs import getvol
from docopt import docopt
from functools import partial
from farmfs import cwd
from farmfs.util import empty_default, fmap, ffilter, pipeline, concat, identify, uncurry, count, groupby, consume, concatMap, zipFrom, safetype, ingest, first, maybe, every, identity, repeater, uniq, compose, pfmap
from farmfs.volume import mkfs, tree_diff, tree_patcher, encode_snapshot
from farmfs.fs import Path, userPath2Path, ftype_selector, FILE, LINK, skip_ignored, ensure_symlink, walk
from json import JSONEncoder
from s3lib.ui import load_creds as load_s3_creds
import sys
from farmfs.blobstore import S3Blobstore
from tqdm import tqdm
try:
from itertools import ifilter
except ImportError:
# On python3, filter is lazy.
ifilter = filter
try:
from itertools import imap
except ImportError:
# On python3 map is lazy.
imap = map
json_encode = lambda data: JSONEncoder(ensure_ascii=False, sort_keys=True).encode(data)
json_printr = pipeline(list, json_encode, print)
strs_printr = pipeline(fmap(print), consume)
def dict_printr(keys, d):
print("\t".join([ingest(d.get(k, '')) for k in keys]))
def dicts_printr(keys):
return pipeline(fmap(partial(dict_printr, keys)), consume)
snapshot_printr = dicts_printr(['path', 'type', 'csum'])
UI_USAGE = \
"""
FarmFS
Usage:
farmfs mkfs [--root <root>] [--data <data>]
farmfs (status|freeze|thaw) [<path>...]
farmfs snap list
farmfs snap (make|read|delete|restore|diff) <snap>
farmfs fsck [--broken --frozen-ignored --blob-permissions --checksums]
farmfs count
farmfs similarity
farmfs gc [--noop]
farmfs remote add <remote> <root>
farmfs remote remove <remote>
farmfs remote list [<remote>]
farmfs pull <remote> [<snap>]
farmfs diff <remote> [<snap>]
Options:
"""
def op_doer(op):
(blob_op, tree_op, desc) = op
blob_op()
tree_op()
stream_op_doer = fmap(op_doer)
def fsck_missing_blobs(vol, cwd):
'''Look for blobs in tree or snaps which are not in blobstore.'''
trees = vol.trees()
tree_items = concatMap(lambda t: zipFrom(t,iter(t)))
tree_links = ffilter(uncurry(lambda snap, item: item.is_link()))
broken_tree_links = partial(
ifilter,
uncurry(lambda snap, item: not vol.bs.exists(item.csum())))
checksum_grouper = partial(groupby,
uncurry(lambda snap, item: item.csum()))
def broken_link_printr(csum, snap_items):
print(csum)
for (snap, item) in snap_items:
print( '',
snap.name,
item.to_path(vol.root).relative_to(cwd),
sep='\t')
broken_links_printr = fmap(identify(uncurry(broken_link_printr)))
num_bad_blobs = pipeline(
tree_items,
tree_links,
broken_tree_links,
checksum_grouper,
broken_links_printr,
count)(trees)
return num_bad_blobs
def fsck_frozen_ignored(vol, cwd):
'''Look for frozen links which are in the ignored file.'''
#TODO some of this logic could be moved to volume. Which files are members of the volume is a function of the volume.
ignore_mdd = partial(skip_ignored, [safetype(vol.mdd)])
ignored_frozen = pipeline(
ftype_selector([LINK]),
ffilter(uncurry(vol.is_ignored)),
fmap(first),
fmap(lambda p: p.relative_to(cwd)),
fmap(partial(print, "Ignored file frozen")),
count
)(walk(vol.root, skip=ignore_mdd))
return ignored_frozen
def fsck_blob_permissions(vol, cwd):
'''Look for blobstore blobs which are not readonly.'''
blob_permissions = pipeline(
ffilter(vol.bs.verify_blob_permissions),
fmap(partial(print, "writable blob: ")),
count
)(vol.bs.blobs())
return blob_permissions
def fsck_checksum_mismatches(vol, cwd):
'''Look for checksum mismatches.'''
#TODO CORRUPTION checksum mismatch in blob <CSUM>, would be nice to know back references.
mismatches = pipeline(
pfmap(lambda blob: (blob, vol.bs.blob_checksum(blob))),
ffilter(lambda blob_csum: blob_csum[0] != blob_csum[1]),
fmap(first),
fmap(lambda csum: print("CORRUPTION checksum mismatch in blob %s" % csum)),
count
)(vol.bs.blobs())
return mismatches
def ui_main():
result = farmfs_ui(sys.argv[1:], cwd)
exit(result)
def farmfs_ui(argv, cwd):
exitcode = 0
args = docopt(UI_USAGE, argv)
if args['mkfs']:
root = userPath2Path(args['<root>'] or ".", cwd)
data = userPath2Path(args['<data>'], cwd) if args.get('<data>') else Path(".farmfs/userdata", root)
mkfs(root, data)
print("FileSystem Created %s using blobstore %s" % (root, data))
else:
vol = getvol(cwd)
paths = empty_default(map(lambda x: userPath2Path(x, cwd), args['<path>']), [vol.root])
def delta_printr(delta):
deltaPath = delta.path(vol.root).relative_to(cwd)
print("diff: %s %s %s" % (delta.mode, deltaPath, delta.csum))
stream_delta_printr = fmap(identify(delta_printr))
def op_printr(op):
(blob_op, tree_op, (desc, path)) = op
print(desc % path.relative_to(cwd))
stream_op_printr = fmap(identify(op_printr))
if args['status']:
get_thawed = fmap(vol.thawed)
pipeline(get_thawed,
concat,
fmap(lambda p: p.relative_to(cwd)),
fmap(print),
consume)(paths)
elif args['freeze']:
def printr(freeze_op):
s = "Imported %s with checksum %s" % \
(freeze_op['path'].relative_to(cwd),
freeze_op['csum'])
if freeze_op['was_dup']:
print(s, "was a duplicate")
else:
print(s)
importer = fmap(vol.freeze)
get_thawed = fmap(vol.thawed)
print_list = fmap(printr)
pipeline(get_thawed, concat, importer, print_list, consume)(paths)
elif args['thaw']:
def printr(path):
print("Exported %s" % path.relative_to(cwd))
exporter = fmap(vol.thaw)
get_frozen = fmap(vol.frozen)
print_list = fmap(printr)
pipeline(get_frozen, concat, exporter, print_list, consume)(paths)
elif args['fsck']:
fsck_actions = {
'--broken': (fsck_missing_blobs, 1),
'--frozen-ignored': (fsck_frozen_ignored, 4),
'--blob-permissions': (fsck_blob_permissions, 8),
'--checksums': (fsck_checksum_mismatches, 2),
}
fsck_tasks = [action for (verb, action) in fsck_actions.items() if args[verb]]
if len(fsck_tasks) == 0:
# No options were specified, run the whole suite.
fsck_tasks = fsck_actions.values()
for foo, fail_code in fsck_tasks:
exitcode = exitcode | (foo(vol, cwd) and fail_code)
elif args['count']:
trees = vol.trees()
tree_items = concatMap(lambda t: zipFrom(t,iter(t)))
tree_links = ffilter(uncurry(lambda snap, item: item.is_link()))
checksum_grouper = partial(groupby,
uncurry(lambda snap, item: item.csum()))
def count_printr(csum, snap_items):
print(csum, count(snap_items))
for (snap, item) in snap_items:
print(snap.name, item.to_path(vol.root).relative_to(cwd))
counts_printr = fmap(identify(uncurry(count_printr)))
pipeline(
tree_items,
tree_links,
checksum_grouper,
counts_printr,
consume
)(trees)
elif args['similarity']:
for (dir_a, count_a, dir_b, count_b, intersect) in vol.similarity():
assert isinstance(dir_a, Path)
assert isinstance(dir_b, Path)
path_a = dir_a.relative_to(cwd)
path_b = dir_b.relative_to(cwd)
print(path_a, "%d/%d %d%%" % (intersect, count_a, int(100*float(intersect)/count_a)), \
path_b, "%d/%d %d%%" % (intersect, count_b, int(100*float(intersect)/count_b)))
elif args['gc']:
applyfn = fmap(identity) if args.get('--noop') else fmap(vol.bs.delete_blob)
fns = [fmap(identify(partial(print, "Removing"))),
applyfn,
consume]
pipeline(*fns)(sorted(vol.unused_blobs(vol.items())))
elif args['snap']:
snapdb = vol.snapdb
if args['list']:
#TODO have an optional argument for which remote.
print("\n".join(snapdb.list()))
else:
name = args['<snap>']
if args['delete']:
snapdb.delete(name)
elif args['make']:
snapdb.write(name, vol.tree())
else:
snap = snapdb.read(name)
if args['read']:
for i in snap:
print(i)
elif args['restore']:
tree = vol.tree()
diff = tree_diff(vol.tree(), snap)
pipeline(
stream_delta_printr,
tree_patcher(vol, vol),
stream_op_printr,
stream_op_doer,
consume)(diff)
elif args['diff']:
diff = tree_diff(vol.tree(), snap)
pipeline(stream_delta_printr, consume)(diff)
elif args['remote']:
if args["add"]:
remote_vol = getvol(userPath2Path(args['<root>'], cwd))
vol.remotedb.write(args['<remote>'], remote_vol)
elif args["remove"]:
vol.remotedb.delete(args['<remote>'])
elif args["list"]:
if args["<remote>"]:
remote_vol = vol.remotedb.read(args['<remote>'])
print("\n".join(remote_vol.snapdb.list()))
else:
for remote_name in vol.remotedb.list():
remote_vol = vol.remotedb.read(remote_name)
print(remote_name, remote_vol.root)
elif args['pull'] or args['diff']:
remote_vol = vol.remotedb.read(args['<remote>'])
snap_name = args['<snap>']
remote_snap = remote_vol.snapdb.read(snap_name) if snap_name else remote_vol.tree()
diff = tree_diff(vol.tree(), remote_snap)
if args['pull']:
patcher = tree_patcher(vol, remote_vol)
pipeline(
stream_delta_printr,
patcher,
stream_op_printr,
stream_op_doer,
consume)(diff)
else: # diff
pipeline(stream_delta_printr, consume)(diff)
return exitcode
def printNotNone(value):
if value is not None:
print(value)
def reverse(vol, csum):
"""Yields a set of paths which reference a given checksum_path name."""
DBG_USAGE = \
"""
FarmDBG
Usage:
farmdbg reverse [--snap=<snapshot>|--all] <csum>
farmdbg key read <key>
farmdbg key write <key> <value>
farmdbg key delete <key>
farmdbg key list [<key>]
farmdbg walk (keys|userdata|root|snap <snapshot>) [--json]
farmdbg checksum <path>...
farmdbg fix link [--remote=<remote>] <target> <file>
farmdbg rewrite-links <target>
farmdbg missing <snap>...
farmdbg blobtype <blob>...
farmdbg blob <blob>...
farmdbg s3 list <bucket> <prefix>
farmdbg s3 upload [--quiet] <bucket> <prefix>
farmdbg s3 check <bucket> <prefix>
"""
def dbg_main():
return dbg_ui(sys.argv[1:], cwd)
def dbg_ui(argv, cwd):
exitcode = 0
args = docopt(DBG_USAGE, argv)
vol = getvol(cwd)
if args['reverse']:
csum = args['<csum>']
if args['--all']:
trees = vol.trees()
elif args['--snap']:
trees = [vol.snapdb.read(args['--snap'])]
else:
trees = [vol.tree()]
tree_items = concatMap(lambda t: zipFrom(t,iter(t)))
tree_links = ffilter(uncurry(lambda snap, item: item.is_link()))
matching_links = ffilter(uncurry(lambda snap, item: item.csum() == csum))
def link_printr(snap_item):
(snap, item) = snap_item
print(snap.name, item.to_path(vol.root).relative_to(cwd))
links_printr = fmap(identify(link_printr))
pipeline(
tree_items,
tree_links,
matching_links,
links_printr,
consume)(trees)
elif args['key']:
db = vol.keydb
key = args['<key>']
if args['read']:
printNotNone(db.readraw(key))
elif args['delete']:
db.delete(key)
elif args['list']:
for v in db.list(key):
print(v)
elif args['write']:
value = args['<value>']
db.write(key, value)
elif args['walk']:
if args['root']:
printr = json_printr if args.get('--json') else snapshot_printr
printr(encode_snapshot(vol.tree()))
elif args['snap']:
#TODO could add a test for output encoding.
#TODO could add a test for snap format. Leading '/' on paths.
printr = json_printr if args.get('--json') else snapshot_printr
printr(encode_snapshot(vol.snapdb.read(args['<snapshot>'])))
elif args['userdata']:
blobs = vol.bs.blobs()
printr = json_printr if args.get('--json') else strs_printr
printr(blobs)
elif args['keys']:
printr = json_printr if args.get('--json') else strs_printr
printr(vol.keydb.list())
elif args['checksum']:
#TODO <checksum> <full path>
paths = empty_default(map(lambda x: Path(x, cwd), args['<path>']), [vol.root])
for p in paths:
print(p.checksum(), p.relative_to(cwd))
elif args['link']:
f = Path(args['<file>'], cwd)
b = ingest(args['<target>'])
if not vol.bs.exists(b):
print("blob %s doesn't exist" % b)
if args['--remote']:
remote = vol.remotedb.read(args['--remote'])
else:
raise(ValueError("aborting due to missing blob"))
vol.bs.fetch_blob(remote.bs, b)
else:
pass #b exists, can we check its checksum?
vol.bs.link_to_blob(f, b)
elif args['rewrite-links']:
target = Path(args['<target>'], cwd)
for item in vol.tree():
if not item.is_link():
continue
path = item.to_path(vol.root)
new = vol.repair_link(path)
if new is not None:
print("Relinked %s to %s" % (path.relative_to(cwd), new))
elif args['missing']:
tree_csums = pipeline(
ffilter(lambda item: item.is_link()),
fmap(lambda item: item.csum()),
set
)(iter(vol.tree()))
snapNames = args['<snap>']
def missing_printr(csum, pathStrs):
paths = sorted(imap(lambda pathStr: vol.root.join(pathStr), pathStrs))
for path in paths:
print("%s\t%s" % (csum, path.relative_to(cwd)))
missing_csum2pathStr = pipeline(
fmap(vol.snapdb.read),
concatMap(iter),
ffilter(lambda item: item.is_link()),
ffilter(lambda item: not vol.is_ignored(item.to_path(vol.root), None)),
ffilter(lambda item: item.csum() not in tree_csums),
partial(groupby, lambda item: item.csum()),
ffilter(uncurry(lambda csum, items: every(lambda item: not item.to_path(vol.root).exists(), items))),
fmap(uncurry(lambda csum, items: (csum, list(imap(lambda item: item.pathStr(), items))))),
fmap(uncurry(missing_printr)),
count
)(snapNames)
elif args['blobtype']:
for blob in args['<blob>']:
blob = ingest(blob)
#TODO here csum_to_path is really needed.
print(
blob,
maybe("unknown", vol.bs.csum_to_path(blob).filetype()))
elif args['blob']:
for csum in args['<blob>']:
csum = ingest(csum)
#TODO here csum_to_path is needed
print(csum,
vol.bs.csum_to_path(csum).relative_to(cwd))
elif args['s3']:
bucket = args['<bucket>']
prefix = args['<prefix>']
access_id, secret_key = load_s3_creds(None)
s3bs = S3Blobstore(bucket, prefix, access_id, secret_key)
if args['list']:
pipeline(fmap(print), consume)(s3bs.blobs()())
elif args['upload']:
quiet = args.get('--quiet')
print("Fetching remote blobs")
s3_blobs = set(tqdm(s3bs.blobs()(), disable=quiet, desc="Fetching remote blobs", smoothing=1.0, dynamic_ncols=True, maxinterval=1.0))
print("Remote Blobs: %s" % len(s3_blobs))
print("Fetching local blobs") #TODO we are looking at tree, so blobs in snaps won't be sent.
tree_blobs = set(tqdm(pipeline(
ffilter(lambda x: x.is_link()),
fmap(lambda x: x.csum()),
uniq,
)(iter(vol.tree())), disable=quiet, desc="Calculating local blobs", smoothing=1.0, dynamic_ncols=True, maxinterval=1.0))
print("Local Blobs: %s" % len(tree_blobs))
upload_blobs = tree_blobs - s3_blobs
print("Uploading %s blobs to s3" % len(upload_blobs))
with tqdm(desc="Uploading to S3", disable=quiet, total=len(upload_blobs), smoothing=1.0, dynamic_ncols=True, maxinterval=1.0) as pbar:
def update_pbar(blob):
pbar.update(1)
pbar.set_description("Uploaded %s" % blob)
def upload(blob):
s3bs.upload(blob, vol.bs.csum_to_path(blob))()
return blob
all_success = pipeline(
ffilter(lambda x: x not in s3_blobs),
pfmap(upload, workers=2),
fmap(identify(update_pbar)),
partial(every, identity),
)(upload_blobs)
if all_success:
print("Successfully uploaded")
else:
print("Failed to upload")
exitcode = 1
elif args['check']:
num_corrupt_blobs = pipeline(
ffilter(lambda obj: obj['ETag'][1:-1] != obj['blob']),
fmap(identify(lambda obj: print(obj['blob'], obj['ETag'][1:-1]))),
count
)(s3bs.blob_stats()())
if num_corrupt_blobs == 0:
print("All S3 blobs etags match")
else:
exitcode = 2
return exitcode
|
'''
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import json
import argparse
import unittest
import re
import requests
from os import linesep as LINESEP
# backward compatibility with 2.x
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
import rados
except ModuleNotFoundError as noModErr:
print("Error: %s\nExiting the script..." % noModErr)
sys.exit(1)
try:
# for 2.7.x
from StringIO import StringIO
except ModuleNotFoundError:
# for 3.x
from io import StringIO
try:
# for 2.7.x
from urlparse import urlparse
except ModuleNotFoundError:
# for 3.x
from urllib.parse import urlparse
class ExecutionFailureException(Exception):
pass
################################################
################## DummyRados ##################
################################################
# this is mainly for testing and could be used where 'rados' is not available
class DummyRados(object):
def __init__(self):
self.return_val = 0
self.err_message = ''
self.state = 'connected'
self.cmd_output_map = {}
self.cmd_names = {}
self._init_cmd_output_map()
self.dummy_host_ip_map = {}
def _init_cmd_output_map(self):
self.cmd_names['fs ls'] = '''{"format": "json", "prefix": "fs ls"}'''
self.cmd_names['quorum_status'] = '''{"format": "json", "prefix": "quorum_status"}'''
self.cmd_names['caps_change_default_pool_prefix'] = '''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}'''
# all the commands and their output
self.cmd_output_map[self.cmd_names['fs ls']
] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-data0"]}]'''
self.cmd_output_map[self.cmd_names['quorum_status']] = '''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs *=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs metadata=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"format": "json", "prefix": "mgr services"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}'''
self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}'''
self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map[self.cmd_names['caps_change_default_pool_prefix']] = '''[{}]'''
def shutdown(self):
pass
def get_fsid(self):
return 'af4e1673-0b72-402d-990a-22d2919d0f1c'
def conf_read_file(self):
pass
def connect(self):
pass
def pool_exists(self, pool_name):
return True
def mon_command(self, cmd, out):
json_cmd = json.loads(cmd)
json_cmd_str = json.dumps(json_cmd, sort_keys=True)
cmd_output = self.cmd_output_map[json_cmd_str]
return self.return_val, \
cmd_output, \
"{}".format(self.err_message).encode('utf-8')
def _convert_hostname_to_ip(self, host_name):
ip_reg_x = re.compile(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}')
# if provided host is directly an IP address, return the same
if ip_reg_x.match(host_name):
return host_name
import random
host_ip = self.dummy_host_ip_map.get(host_name, "")
if not host_ip:
host_ip = "172.9.{}.{}".format(
random.randint(0, 255), random.randint(0, 255))
self.dummy_host_ip_map[host_name] = host_ip
del random
return host_ip
@classmethod
def Rados(conffile=None):
return DummyRados()
class RadosJSON:
EXTERNAL_USER_NAME = "client.healthchecker"
EMPTY_OUTPUT_LIST = "Empty output list"
DEFAULT_RGW_POOL_PREFIX = "default"
DEFAULT_MONITORING_ENDPOINT_PORT = "9283"
@classmethod
def gen_arg_parser(cls, args_to_parse=None):
argP = argparse.ArgumentParser()
common_group = argP.add_argument_group('common')
common_group.add_argument("--verbose", "-v",
action='store_true', default=False)
common_group.add_argument("--ceph-conf", "-c",
help="Provide a ceph conf file.", type=str)
common_group.add_argument("--run-as-user", "-u", default="", type=str,
help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'")
common_group.add_argument("--cluster-name", default="openshift-storage",
help="Ceph cluster name")
common_group.add_argument("--namespace", default="",
help="Namespace where CephCluster is running")
common_group.add_argument("--rgw-pool-prefix", default="",
help="RGW Pool prefix")
output_group = argP.add_argument_group('output')
output_group.add_argument("--format", "-t", choices=["json", "bash"],
default='json', help="Provides the output format (json | bash)")
output_group.add_argument("--output", "-o", default="",
help="Output will be stored into the provided file")
output_group.add_argument("--cephfs-filesystem-name", default="",
help="Provides the name of the Ceph filesystem")
output_group.add_argument("--cephfs-data-pool-name", default="",
help="Provides the name of the cephfs data pool")
output_group.add_argument("--rbd-data-pool-name", default="", required=False,
help="Provides the name of the RBD datapool")
output_group.add_argument("--rgw-endpoint", default="", required=False,
help="Rados GateWay endpoint (in <IP>:<PORT> format)")
output_group.add_argument("--monitoring-endpoint", default="", required=False,
help="Ceph Manager prometheus exporter endpoints comma separated list of <IP> entries")
output_group.add_argument("--monitoring-endpoint-port", default="", required=False,
help="Ceph Manager prometheus exporter port")
upgrade_group = argP.add_argument_group('upgrade')
upgrade_group.add_argument("--upgrade", action='store_true', default=False,
help="Upgrades the 'user' with all the permissions needed for the new cluster version")
if args_to_parse:
assert type(args_to_parse) == list, \
"Argument to 'gen_arg_parser' should be a list"
else:
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)
def _check_conflicting_options(self):
if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified")
if self._arg_parser.upgrade and self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Both '--upgrade' and '--rbd-data-pool-name <pool_name>' should not be specified, choose only one")
# a user name must be provided while using '--upgrade' option
if not self._arg_parser.run_as_user and self._arg_parser.upgrade:
raise ExecutionFailureException(
"Please provide an existing user-name through '--run-as-user' (or '-u') flag while upgrading")
def _invalid_endpoint(self, endpoint_str):
try:
ipv4, port = endpoint_str.split(':')
except ValueError:
raise ExecutionFailureException(
"Not a proper endpoint: {}, <IPv4>:<PORT>, format is expected".format(endpoint_str))
ipParts = ipv4.split('.')
if len(ipParts) != 4:
raise ExecutionFailureException(
"Not a valid IP address: {}".format(ipv4))
for eachPart in ipParts:
if not eachPart.isdigit():
raise ExecutionFailureException(
"IP address parts should be numbers: {}".format(ipv4))
intPart = int(eachPart)
if intPart < 0 or intPart > 254:
raise ExecutionFailureException(
"Out of range IP addresses: {}".format(ipv4))
if not port.isdigit():
raise ExecutionFailureException("Port not valid: {}".format(port))
intPort = int(port)
if intPort < 1 or intPort > 2**16-1:
raise ExecutionFailureException(
"Out of range port number: {}".format(port))
return False
def endpoint_dial(self, endpoint_str, timeout=3):
# if the 'cluster' instance is a dummy one,
# don't try to reach out to the endpoint
if isinstance(self.cluster, DummyRados):
return
protocols = ["http", "https"]
for prefix in protocols:
try:
ep = "{}://{}".format(prefix, endpoint_str)
r = requests.head(ep, timeout=timeout)
if r.status_code == 200:
return
except:
continue
raise ExecutionFailureException(
"unable to connect to endpoint: {}".format(endpoint_str))
def __init__(self, arg_list=None):
self.out_map = {}
self._excluded_keys = set()
self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
self._check_conflicting_options()
self.run_as_user = self._arg_parser.run_as_user
self.output_file = self._arg_parser.output
self.ceph_conf = self._arg_parser.ceph_conf
self.MIN_USER_CAP_PERMISSIONS = {
'mgr': 'allow command config',
'mon': 'allow r, allow command quorum_status, allow command version',
'osd': "allow rwx pool={0}.rgw.meta, " +
"allow r pool=.rgw.root, " +
"allow rw pool={0}.rgw.control, " +
"allow rx pool={0}.rgw.log, " +
"allow x pool={0}.rgw.buckets.index"
}
# if user not provided, give a default user
if not self.run_as_user and not self._arg_parser.upgrade:
self.run_as_user = self.EXTERNAL_USER_NAME
if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade:
self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX
if self.ceph_conf:
self.cluster = rados.Rados(conffile=self.ceph_conf)
else:
self.cluster = rados.Rados()
self.cluster.conf_read_file()
self.cluster.connect()
def shutdown(self):
if self.cluster.state == "connected":
self.cluster.shutdown()
def get_fsid(self):
return str(self.cluster.get_fsid())
def _common_cmd_json_gen(self, cmd_json):
cmd = json.dumps(cmd_json, sort_keys=True)
ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b'')
if self._arg_parser.verbose:
print("Command Input: {}".format(cmd))
print("Return Val: {}\nCommand Output: {}\nError Message: {}\n----------\n".format(
ret_val, cmd_out, err_msg))
json_out = {}
# if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty
# then convert 'cmd_out' to a json output
if ret_val == 0 and cmd_out:
json_out = json.loads(cmd_out)
return ret_val, json_out, err_msg
def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'quorum_status' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
q_leader_name = json_out['quorum_leader_name']
q_leader_details = {}
q_leader_matching_list = [l for l in json_out['monmap']['mons']
if l['name'] == q_leader_name]
if len(q_leader_matching_list) == 0:
raise ExecutionFailureException("No matching 'mon' details found")
q_leader_details = q_leader_matching_list[0]
ip_port = str(q_leader_details['public_addr'].split('/')[0])
return "{}={}".format(str(q_leader_name), ip_port)
def _join_host_port(self, endpoint, port):
port = "{}".format(port)
# regex to check the given endpoint is enclosed in square brackets
ipv6_regx = re.compile(r'^\[[^]]*\]$')
# endpoint has ':' in it and if not (already) enclosed in square brackets
if endpoint.count(':') and not ipv6_regx.match(endpoint):
endpoint = '[{}]'.format(endpoint)
if not port:
return endpoint
return ':'.join([endpoint, port])
def _convert_hostname_to_ip(self, host_name):
# if 'cluster' instance is a dummy type,
# call the dummy instance's "convert" method
if not host_name:
raise ExecutionFailureException("Empty hostname provided")
if isinstance(self.cluster, DummyRados):
return self.cluster._convert_hostname_to_ip(host_name)
import socket
ip = socket.gethostbyname(host_name)
del socket
return ip
def get_active_ceph_mgr(self):
monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
monitoring_endpoint_ip = self._arg_parser.monitoring_endpoint
if not monitoring_endpoint_ip:
cmd_json = {"prefix": "mgr services", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'mgr services' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
monitoring_endpoint = json_out.get('prometheus')
if not monitoring_endpoint:
raise ExecutionFailureException(
"'prometheus' service not found, is the exporter enabled?'.\n")
try:
parsed_endpoint = urlparse(monitoring_endpoint)
except ValueError:
raise ExecutionFailureException(
"invalid endpoint: {}".format(monitoring_endpoint))
monitoring_endpoint_ip = parsed_endpoint.hostname
if not monitoring_endpoint_port:
monitoring_endpoint_port = "{}".format(parsed_endpoint.port)
# if monitoring endpoint port is not set, put a default mon port
if not monitoring_endpoint_port:
monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT
try:
monitoring_endpoint_ip = self._convert_hostname_to_ip(
monitoring_endpoint_ip)
except:
raise ExecutionFailureException(
"unable to convert a hostname to an IP address, monitoring host name: {}".format(monitoring_endpoint_ip))
monitoring_endpoint = self._join_host_port(
monitoring_endpoint_ip, monitoring_endpoint_port)
self._invalid_endpoint(monitoring_endpoint)
self.endpoint_dial(monitoring_endpoint)
self.validate_monitoring_endpoint(monitoring_endpoint_port)
return monitoring_endpoint_ip, monitoring_endpoint_port
def validate_monitoring_endpoint(self, port):
if port != self.DEFAULT_MONITORING_ENDPOINT_PORT:
raise ExecutionFailureException(
"'prometheus' service port must listen on {0}. You can change it with 'ceph config set mgr mgr/prometheus/server_port {0}'.\n".format(self.DEFAULT_MONITORING_ENDPOINT_PORT))
def create_cephCSIKeyring_cephFSProvisioner(self):
'''
command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'
'''
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-cephfs-provisioner",
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata=*"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_cephFSNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-cephfs-node",
"caps": ["mon", "allow r",
"mgr", "allow rw",
"osd", "allow rw tag cephfs *=*",
"mds", "allow rw"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-node' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_RBDProvisioner(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-rbd-provisioner",
"caps": ["mon", "profile rbd",
"mgr", "allow rw",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt, report an error
if ret_val != 0:
# if fs and data_pool arguments are not set, silently return
if self._arg_parser.cephfs_filesystem_name == "" and self._arg_parser.cephfs_data_pool_name == "":
return
# if user has provided any of the
# '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments,
# raise an exception as we are unable to verify the args
raise ExecutionFailureException(
"'fs ls' ceph call failed with error: {}".format(err_msg))
matching_json_out = {}
# if '--cephfs-filesystem-name' argument is provided,
# check whether the provided filesystem-name exists or not
if self._arg_parser.cephfs_filesystem_name:
# get the matching list
matching_json_out_list = [matched for matched in json_out
if str(matched['name']) == self._arg_parser.cephfs_filesystem_name]
# unable to find a matching fs-name, raise an error
if len(matching_json_out_list) == 0:
raise ExecutionFailureException(
("Filesystem provided, '{}', " +
"is not found in the fs-list: '{}'").format(
self._arg_parser.cephfs_filesystem_name,
[str(x['name']) for x in json_out]))
matching_json_out = matching_json_out_list[0]
# if cephfs filesystem name is not provided,
# try to get a default fs name by doing the following
else:
# a. check if there is only one filesystem is present
if len(json_out) == 1:
matching_json_out = json_out[0]
# b. or else, check if data_pool name is provided
elif self._arg_parser.cephfs_data_pool_name:
# and if present, check whether there exists a fs which has the data_pool
for eachJ in json_out:
if self._arg_parser.cephfs_data_pool_name in eachJ['data_pools']:
matching_json_out = eachJ
break
# if there is no matching fs exists, that means provided data_pool name is invalid
if not matching_json_out:
raise ExecutionFailureException(
"Provided data_pool name, {}, does not exists".format(
self._arg_parser.cephfs_data_pool_name))
# c. if nothing is set and couldn't find a default,
else:
# just return silently
return
if matching_json_out:
self._arg_parser.cephfs_filesystem_name = str(
matching_json_out['name'])
if type(matching_json_out['data_pools']) == list:
# if the user has already provided data-pool-name,
# through --cephfs-data-pool-name
if self._arg_parser.cephfs_data_pool_name:
# if the provided name is not matching with the one in the list
if self._arg_parser.cephfs_data_pool_name not in matching_json_out['data_pools']:
raise ExecutionFailureException(
"{}: '{}', {}: {}".format(
"Provided data-pool-name",
self._arg_parser.cephfs_data_pool_name,
"doesn't match from the data-pools' list",
[str(x) for x in matching_json_out['data_pools']]))
# if data_pool name is not provided,
# then try to find a default data pool name
else:
# if no data_pools exist, silently return
if len(matching_json_out['data_pools']) == 0:
return
self._arg_parser.cephfs_data_pool_name = str(
matching_json_out['data_pools'][0])
# if there are more than one 'data_pools' exist,
# then warn the user that we are using the selected name
if len(matching_json_out['data_pools']) > 1:
print("{}: {}\n{}: '{}'\n".format(
"WARNING: Multiple data pools detected",
[str(x) for x in matching_json_out['data_pools']],
"Using the data-pool",
self._arg_parser.cephfs_data_pool_name))
def create_cephCSIKeyring_RBDNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-rbd-node",
"caps": ["mon", "profile rbd",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-node' command failed\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_checkerKey(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": self.run_as_user,
"caps": ["mon", self.MIN_USER_CAP_PERMISSIONS['mon'],
"mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'],
"osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create {}' command failed\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def _gen_output_map(self):
if self.out_map:
return
pools_to_validate = [self._arg_parser.rbd_data_pool_name]
# if rgw_endpoint is provided, validate it
if self._arg_parser.rgw_endpoint:
self._invalid_endpoint(self._arg_parser.rgw_endpoint)
self.endpoint_dial(self._arg_parser.rgw_endpoint)
rgw_pool_to_validate = ["{0}.rgw.meta".format(self._arg_parser.rgw_pool_prefix),
".rgw.root",
"{0}.rgw.control".format(
self._arg_parser.rgw_pool_prefix),
"{0}.rgw.log".format(
self._arg_parser.rgw_pool_prefix)]
pools_to_validate.extend(rgw_pool_to_validate)
for pool in pools_to_validate:
if not self.cluster.pool_exists(pool):
raise ExecutionFailureException(
"The provided pool, '{}', does not exist".format(pool))
self._excluded_keys.add('CLUSTER_NAME')
self.get_cephfs_data_pool_details()
self.out_map['NAMESPACE'] = self._arg_parser.namespace
self.out_map['CLUSTER_NAME'] = self._arg_parser.cluster_name
self.out_map['ROOK_EXTERNAL_FSID'] = self.get_fsid()
self.out_map['ROOK_EXTERNAL_USERNAME'] = self.run_as_user
self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'] = self.get_ceph_external_mon_data()
self.out_map['ROOK_EXTERNAL_USER_SECRET'] = self.create_checkerKey()
self.out_map['CSI_RBD_NODE_SECRET_SECRET'] = self.create_cephCSIKeyring_RBDNode()
self.out_map['CSI_RBD_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_RBDProvisioner()
self.out_map['CEPHFS_POOL_NAME'] = self._arg_parser.cephfs_data_pool_name
self.out_map['CEPHFS_FS_NAME'] = self._arg_parser.cephfs_filesystem_name
self.out_map['CSI_CEPHFS_NODE_SECRET'] = ''
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = ''
# create CephFS node and provisioner keyring only when MDS exists
if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']:
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode(
)
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner()
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['MONITORING_ENDPOINT'], \
self.out_map['MONITORING_ENDPOINT_PORT'] = self.get_active_ceph_mgr()
self.out_map['RBD_POOL_NAME'] = self._arg_parser.rbd_data_pool_name
self.out_map['RGW_POOL_PREFIX'] = self._arg_parser.rgw_pool_prefix
def gen_shell_out(self):
self._gen_output_map()
shOutIO = StringIO()
for k, v in self.out_map.items():
if v and k not in self._excluded_keys:
shOutIO.write('export {}={}{}'.format(k, v, LINESEP))
shOut = shOutIO.getvalue()
shOutIO.close()
return shOut
def gen_json_out(self):
self._gen_output_map()
json_out = [
{
"name": "rook-ceph-mon-endpoints",
"kind": "ConfigMap",
"data": {
"data": self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'],
"maxMonId": "0",
"mapping": "{}"
}
},
{
"name": "rook-ceph-mon",
"kind": "Secret",
"data": {
"admin-secret": "admin-secret",
"fsid": self.out_map['ROOK_EXTERNAL_FSID'],
"mon-secret": "mon-secret"
},
},
{
"name": "rook-ceph-operator-creds",
"kind": "Secret",
"data": {
"userID": self.out_map['ROOK_EXTERNAL_USERNAME'],
"userKey": self.out_map['ROOK_EXTERNAL_USER_SECRET']
}
},
{
"name": "rook-csi-rbd-node",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-node',
"userKey": self.out_map['CSI_RBD_NODE_SECRET_SECRET']
}
},
{
"name": "ceph-rbd",
"kind": "StorageClass",
"data": {
"pool": self.out_map['RBD_POOL_NAME']
}
},
{
"name": "monitoring-endpoint",
"kind": "CephCluster",
"data": {
"MonitoringEndpoint": self.out_map['MONITORING_ENDPOINT'],
"MonitoringPort": self.out_map['MONITORING_ENDPOINT_PORT']
}
}
]
# if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
if self.out_map['CSI_RBD_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-rbd-provisioner",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-provisioner',
"userKey": self.out_map['CSI_RBD_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret
if self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-cephfs-provisioner",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-provisioner',
"adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret
if self.out_map['CSI_CEPHFS_NODE_SECRET']:
json_out.append({
"name": "rook-csi-cephfs-node",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-node',
"adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET']
}
})
# if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass
if self.out_map['CEPHFS_FS_NAME']:
json_out.append({
"name": "cephfs",
"kind": "StorageClass",
"data": {
"fsName": self.out_map['CEPHFS_FS_NAME'],
"pool": self.out_map['CEPHFS_POOL_NAME']
}
})
# if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass
if self.out_map['RGW_ENDPOINT']:
json_out.append({
"name": "ceph-rgw",
"kind": "StorageClass",
"data": {
"endpoint": self.out_map['RGW_ENDPOINT'],
"poolPrefix": self.out_map['RGW_POOL_PREFIX']
}
})
return json.dumps(json_out)+LINESEP
def upgrade_user_permissions(self):
# check whether the given user exists or not
cmd_json = {"prefix": "auth get", "entity": "{}".format(
self.run_as_user), "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException("'auth get {}' command failed.\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
j_first = json_out[0]
existing_caps = j_first['caps']
osd_cap = "osd"
cap_keys = ["mon", "mgr", "osd"]
for eachCap in cap_keys:
min_cap_values = self.MIN_USER_CAP_PERMISSIONS.get(eachCap, '')
cur_cap_values = existing_caps.get(eachCap, '')
# detect rgw-pool-prefix
if eachCap == osd_cap:
# if directly provided through '--rgw-pool-prefix' argument, use it
if self._arg_parser.rgw_pool_prefix:
min_cap_values = min_cap_values.format(
self._arg_parser.rgw_pool_prefix)
# or else try to detect one from the existing/current osd cap values
else:
rc = re.compile(r' pool=([^.]+)\.rgw\.[^ ]*')
# 'findall()' method will give a list of prefixes
# and 'set' will eliminate any duplicates
cur_rgw_pool_prefix_list = list(
set(rc.findall(cur_cap_values)))
if len(cur_rgw_pool_prefix_list) != 1:
raise ExecutionFailureException(
"Unable to determine 'rgw-pool-prefx'. Please provide one with '--rgw-pool-prefix' flag")
min_cap_values = min_cap_values.format(
cur_rgw_pool_prefix_list[0])
cur_cap_perm_list = [x.strip()
for x in cur_cap_values.split(',') if x.strip()]
min_cap_perm_list = [x.strip()
for x in min_cap_values.split(',') if x.strip()]
min_cap_perm_list.extend(cur_cap_perm_list)
# eliminate duplicates without using 'set'
# set re-orders items in the list and we have to keep the order
new_cap_perm_list = []
[new_cap_perm_list.append(
x) for x in min_cap_perm_list if x not in new_cap_perm_list]
existing_caps[eachCap] = ", ".join(new_cap_perm_list)
cmd_json = {"prefix": "auth caps",
"entity": self.run_as_user,
"caps": ["mon", existing_caps["mon"],
"mgr", existing_caps["mgr"],
"osd", existing_caps["osd"]],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0:
raise ExecutionFailureException("'auth caps {}' command failed.\n".format(self.run_as_user) +
"Error: {}".format(err_msg))
print("Updated user, {}, successfully.".format(self.run_as_user))
def main(self):
generated_output = ''
if self._arg_parser.upgrade:
self.upgrade_user_permissions()
elif self._arg_parser.format == 'json':
generated_output = self.gen_json_out()
elif self._arg_parser.format == 'bash':
generated_output = self.gen_shell_out()
else:
raise ExecutionFailureException("Unsupported format: {}".format(
self._arg_parser.format))
print('{}'.format(generated_output))
if self.output_file and generated_output:
fOut = open(self.output_file, 'w')
fOut.write(generated_output)
fOut.close()
################################################
##################### MAIN #####################
################################################
if __name__ == '__main__':
rjObj = RadosJSON()
try:
rjObj.main()
except ExecutionFailureException as err:
print("Execution Failed: {}".format(err))
except KeyError as kErr:
print("KeyError: %s", kErr)
except OSError as osErr:
print("Error while trying to output the data: {}".format(osErr))
finally:
rjObj.shutdown()
################################################
##################### TEST #####################
################################################
# inorder to test the package,
# cd <script_directory>
# python -m unittest --verbose <script_name_without_dot_py>
class TestRadosJSON(unittest.TestCase):
def setUp(self):
print("\nI am in setup")
self.rjObj = RadosJSON(['--rbd-data-pool-name=abc',
'--rgw-endpoint=10.10.212.122:9000', '--format=json'])
# for testing, we are using 'DummyRados' object
self.rjObj.cluster = DummyRados.Rados()
def tearDown(self):
print("\nI am tearing down the setup\n")
self.rjObj.shutdown()
def test_method_main_output(self):
print("JSON Output")
self.rjObj._arg_parser.format = "json"
self.rjObj.main()
print("\n\nShell Output")
self.rjObj._arg_parser.format = "bash"
self.rjObj.main()
print("\n\nNon compatible output (--abcd)")
try:
self.rjObj._arg_parser.format = 'abcd'
self.rjObj.main()
self.fail("Function should have thrown an Exception")
except ExecutionFailureException as err:
print("Exception thrown successfully: {}".format(err))
def test_method_create_cephCSIKeyring_cephFSProvisioner(self):
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("{}".format(csiKeyring))
def test_non_zero_return_and_error(self):
self.rjObj.cluster.return_val = 1
self.rjObj.cluster.err_message = "Dummy Error"
try:
self.rjObj.create_checkerKey()
self.fail("Failed to raise an exception, 'ExecutionFailureException'")
except ExecutionFailureException as err:
print("Successfully thrown error.\nError: {}".format(err))
def test_multi_filesystem_scenario(self):
cmd_key = self.rjObj.cluster.cmd_names['fs ls']
cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key]
cmd_json_out = json.loads(cmd_out)
second_fs_details = dict(cmd_json_out[0])
second_fs_details['name'] += '-2'
cmd_json_out.append(second_fs_details)
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
# multiple filesystem present,
# but no specific '--cephfs-filesystem-name' argument provided
try:
self.rjObj.get_cephfs_data_pool_details()
print("As we are returning silently, no error thrown as expected")
except ExecutionFailureException as err:
self.fail(
"Supposed to get returned silently, but instead error thrown: {}".format(err))
# pass an existing filesystem name
try:
self.rjObj._arg_parser.cephfs_filesystem_name = second_fs_details['name']
self.rjObj.get_cephfs_data_pool_details()
except ExecutionFailureException as err:
self.fail("Should not have thrown error: {}".format(err))
# pass a non-existing filesystem name
try:
self.rjObj._arg_parser.cephfs_filesystem_name += "-non-existing-fs-name"
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# empty file-system array
try:
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps([])
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def test_multi_data_pool_scenario(self):
cmd_key = self.rjObj.cluster.cmd_names['fs ls']
cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key]
cmd_json_out = json.loads(cmd_out)
first_fs_details = cmd_json_out[0]
new_data_pool_name = 'myfs-data1'
first_fs_details['data_pools'].append(new_data_pool_name)
print("Modified JSON Cmd Out: {}".format(cmd_json_out))
self.rjObj._arg_parser.cephfs_data_pool_name = new_data_pool_name
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
self.rjObj.get_cephfs_data_pool_details()
# use a non-existing data-pool-name
bad_data_pool_name = 'myfs-data3'
self.rjObj._arg_parser.cephfs_data_pool_name = bad_data_pool_name
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# empty data-pool scenario
first_fs_details['data_pools'] = []
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def test_valid_rgw_endpoint(self):
self.rjObj._invalid_endpoint("10.10.212.133:8000")
# invalid port
try:
self.rjObj._invalid_endpoint("10.10.212.133:238000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# out of range IP
try:
self.rjObj._invalid_endpoint("10.1033.212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# mal formatted IP
try:
self.rjObj._invalid_endpoint("10.103..212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
try:
self.rjObj._invalid_endpoint("10.103.212.133::8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
try:
self.rjObj._invalid_endpoint("10.10.103.212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def add_non_default_pool_prefix_cmd(self, non_default_pool_prefix):
json_cmd = json.loads(
self.rjObj.cluster.cmd_names['caps_change_default_pool_prefix'])
cur_osd_caps = json_cmd['caps'][json_cmd['caps'].index('osd') + 1]
new_osd_caps = cur_osd_caps.replace(
'default.', '{}.'.format(non_default_pool_prefix))
all_osd_caps = "{}, {}".format(new_osd_caps, cur_osd_caps)
caps_list = [x.strip() for x in all_osd_caps.split(',') if x.strip()]
new_caps_list = []
[new_caps_list.append(x) for x in caps_list if x not in new_caps_list]
all_osd_caps = ", ".join(new_caps_list)
json_cmd['caps'][json_cmd['caps'].index('osd') + 1] = all_osd_caps
self.rjObj.cluster.cmd_names['caps_change_non_default_pool_prefix'] = json.dumps(
json_cmd)
self.rjObj.cluster.cmd_output_map[
self.rjObj.cluster.cmd_names['caps_change_non_default_pool_prefix']] = '[{}]'
def test_upgrade_user_permissions(self):
self.rjObj = RadosJSON(
['--upgrade', '--run-as-user=client.healthchecker'])
# for testing, we are using 'DummyRados' object
self.rjObj.cluster = DummyRados.Rados()
self.rjObj.main()
self.rjObj = RadosJSON(
['--upgrade', '--run-as-user=client.healthchecker', '--rgw-pool-prefix=nonDefault'])
self.rjObj.cluster = DummyRados.Rados()
self.add_non_default_pool_prefix_cmd('nonDefault')
self.rjObj.main()
def test_monitoring_endpoint_validation(self):
self.rjObj = RadosJSON(['--rbd-data-pool-name=abc', '--format=json'])
self.rjObj.cluster = DummyRados.Rados()
valid_ip_ports = [("10.22.31.131", "9283"),
("10.177.3.81", ""), ("", ""), ("", "9283")]
for each_ip_port_pair in valid_ip_ports:
# reset monitoring ip and port
self.rjObj._arg_parser.monitoring_endpoint = ''
self.rjObj._arg_parser.monitoring_endpoint_port = ''
new_mon_ip, new_mon_port = each_ip_port_pair
check_ip_val = self.rjObj.cluster.dummy_host_ip_map.get(
new_mon_ip, new_mon_ip)
check_port_val = RadosJSON.DEFAULT_MONITORING_ENDPOINT_PORT
if new_mon_ip:
self.rjObj._arg_parser.monitoring_endpoint = new_mon_ip
if new_mon_port:
check_port_val = new_mon_port
self.rjObj._arg_parser.monitoring_endpoint_port = new_mon_port
# for testing, we are using 'DummyRados' object
mon_ip, mon_port = self.rjObj.get_active_ceph_mgr()
if check_ip_val and check_ip_val != mon_ip:
self.fail("Expected IP: {}, Returned IP: {}".format(
check_ip_val, mon_ip))
if check_port_val and check_port_val != mon_port:
self.fail("Expected Port: '{}', Returned Port: '{}'".format(
check_port_val, mon_port))
print("MonIP: {}, MonPort: {}".format(mon_ip, mon_port))
invalid_ip_ports = [("10.22.31.131.43", "5334"), ("", "9194"),
("10.177.3.81", "90320"), ("", "73422"), ("10.232.12.8", "9092")]
for each_ip_port_pair in invalid_ip_ports:
# reset the command-line monitoring args
self.rjObj._arg_parser.monitoring_endpoint = ''
self.rjObj._arg_parser.monitoring_endpoint_port = ''
new_mon_ip, new_mon_port = each_ip_port_pair
if new_mon_ip:
self.rjObj._arg_parser.monitoring_endpoint = new_mon_ip
if new_mon_port:
self.rjObj._arg_parser.monitoring_endpoint_port = new_mon_port
try:
mon_ip, mon_port = self.rjObj.get_active_ceph_mgr()
print("[Wrong] MonIP: {}, MonPort: {}".format(mon_ip, mon_port))
self.fail("An exception was expected")
except ExecutionFailureException as err:
print("Exception thrown successfully: {}".format(err))
ceph: revert fail if mgr prometheus is not default
This reverts commit 1ed307cb0991dfd716bce471c8f88e6a11b7def0. In
8aaff235bbdf2264949145fad6804dea3865d873 we have introduced the ability
to set a specific monitoring port so the block that fails if the port is
not the default 9283 is not needed anymore.
Signed-off-by: Sébastien Han <62d754cc350e84d3b1c32ae79f976f5348e74a40@redhat.com>
'''
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import json
import argparse
import unittest
import re
import requests
from os import linesep as LINESEP
# backward compatibility with 2.x
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
import rados
except ModuleNotFoundError as noModErr:
print("Error: %s\nExiting the script..." % noModErr)
sys.exit(1)
try:
# for 2.7.x
from StringIO import StringIO
except ModuleNotFoundError:
# for 3.x
from io import StringIO
try:
# for 2.7.x
from urlparse import urlparse
except ModuleNotFoundError:
# for 3.x
from urllib.parse import urlparse
class ExecutionFailureException(Exception):
pass
################################################
################## DummyRados ##################
################################################
# this is mainly for testing and could be used where 'rados' is not available
class DummyRados(object):
def __init__(self):
self.return_val = 0
self.err_message = ''
self.state = 'connected'
self.cmd_output_map = {}
self.cmd_names = {}
self._init_cmd_output_map()
self.dummy_host_ip_map = {}
def _init_cmd_output_map(self):
self.cmd_names['fs ls'] = '''{"format": "json", "prefix": "fs ls"}'''
self.cmd_names['quorum_status'] = '''{"format": "json", "prefix": "quorum_status"}'''
self.cmd_names['caps_change_default_pool_prefix'] = '''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}'''
# all the commands and their output
self.cmd_output_map[self.cmd_names['fs ls']
] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-data0"]}]'''
self.cmd_output_map[self.cmd_names['quorum_status']] = '''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs *=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs metadata=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"format": "json", "prefix": "mgr services"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}'''
self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}'''
self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map[self.cmd_names['caps_change_default_pool_prefix']] = '''[{}]'''
def shutdown(self):
pass
def get_fsid(self):
return 'af4e1673-0b72-402d-990a-22d2919d0f1c'
def conf_read_file(self):
pass
def connect(self):
pass
def pool_exists(self, pool_name):
return True
def mon_command(self, cmd, out):
json_cmd = json.loads(cmd)
json_cmd_str = json.dumps(json_cmd, sort_keys=True)
cmd_output = self.cmd_output_map[json_cmd_str]
return self.return_val, \
cmd_output, \
"{}".format(self.err_message).encode('utf-8')
def _convert_hostname_to_ip(self, host_name):
ip_reg_x = re.compile(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}')
# if provided host is directly an IP address, return the same
if ip_reg_x.match(host_name):
return host_name
import random
host_ip = self.dummy_host_ip_map.get(host_name, "")
if not host_ip:
host_ip = "172.9.{}.{}".format(
random.randint(0, 255), random.randint(0, 255))
self.dummy_host_ip_map[host_name] = host_ip
del random
return host_ip
@classmethod
def Rados(conffile=None):
return DummyRados()
class RadosJSON:
EXTERNAL_USER_NAME = "client.healthchecker"
EMPTY_OUTPUT_LIST = "Empty output list"
DEFAULT_RGW_POOL_PREFIX = "default"
DEFAULT_MONITORING_ENDPOINT_PORT = "9283"
@classmethod
def gen_arg_parser(cls, args_to_parse=None):
argP = argparse.ArgumentParser()
common_group = argP.add_argument_group('common')
common_group.add_argument("--verbose", "-v",
action='store_true', default=False)
common_group.add_argument("--ceph-conf", "-c",
help="Provide a ceph conf file.", type=str)
common_group.add_argument("--run-as-user", "-u", default="", type=str,
help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'")
common_group.add_argument("--cluster-name", default="openshift-storage",
help="Ceph cluster name")
common_group.add_argument("--namespace", default="",
help="Namespace where CephCluster is running")
common_group.add_argument("--rgw-pool-prefix", default="",
help="RGW Pool prefix")
output_group = argP.add_argument_group('output')
output_group.add_argument("--format", "-t", choices=["json", "bash"],
default='json', help="Provides the output format (json | bash)")
output_group.add_argument("--output", "-o", default="",
help="Output will be stored into the provided file")
output_group.add_argument("--cephfs-filesystem-name", default="",
help="Provides the name of the Ceph filesystem")
output_group.add_argument("--cephfs-data-pool-name", default="",
help="Provides the name of the cephfs data pool")
output_group.add_argument("--rbd-data-pool-name", default="", required=False,
help="Provides the name of the RBD datapool")
output_group.add_argument("--rgw-endpoint", default="", required=False,
help="Rados GateWay endpoint (in <IP>:<PORT> format)")
output_group.add_argument("--monitoring-endpoint", default="", required=False,
help="Ceph Manager prometheus exporter endpoints comma separated list of <IP> entries")
output_group.add_argument("--monitoring-endpoint-port", default="", required=False,
help="Ceph Manager prometheus exporter port")
upgrade_group = argP.add_argument_group('upgrade')
upgrade_group.add_argument("--upgrade", action='store_true', default=False,
help="Upgrades the 'user' with all the permissions needed for the new cluster version")
if args_to_parse:
assert type(args_to_parse) == list, \
"Argument to 'gen_arg_parser' should be a list"
else:
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)
def _check_conflicting_options(self):
if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified")
if self._arg_parser.upgrade and self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Both '--upgrade' and '--rbd-data-pool-name <pool_name>' should not be specified, choose only one")
# a user name must be provided while using '--upgrade' option
if not self._arg_parser.run_as_user and self._arg_parser.upgrade:
raise ExecutionFailureException(
"Please provide an existing user-name through '--run-as-user' (or '-u') flag while upgrading")
def _invalid_endpoint(self, endpoint_str):
try:
ipv4, port = endpoint_str.split(':')
except ValueError:
raise ExecutionFailureException(
"Not a proper endpoint: {}, <IPv4>:<PORT>, format is expected".format(endpoint_str))
ipParts = ipv4.split('.')
if len(ipParts) != 4:
raise ExecutionFailureException(
"Not a valid IP address: {}".format(ipv4))
for eachPart in ipParts:
if not eachPart.isdigit():
raise ExecutionFailureException(
"IP address parts should be numbers: {}".format(ipv4))
intPart = int(eachPart)
if intPart < 0 or intPart > 254:
raise ExecutionFailureException(
"Out of range IP addresses: {}".format(ipv4))
if not port.isdigit():
raise ExecutionFailureException("Port not valid: {}".format(port))
intPort = int(port)
if intPort < 1 or intPort > 2**16-1:
raise ExecutionFailureException(
"Out of range port number: {}".format(port))
return False
def endpoint_dial(self, endpoint_str, timeout=3):
# if the 'cluster' instance is a dummy one,
# don't try to reach out to the endpoint
if isinstance(self.cluster, DummyRados):
return
protocols = ["http", "https"]
for prefix in protocols:
try:
ep = "{}://{}".format(prefix, endpoint_str)
r = requests.head(ep, timeout=timeout)
if r.status_code == 200:
return
except:
continue
raise ExecutionFailureException(
"unable to connect to endpoint: {}".format(endpoint_str))
def __init__(self, arg_list=None):
self.out_map = {}
self._excluded_keys = set()
self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
self._check_conflicting_options()
self.run_as_user = self._arg_parser.run_as_user
self.output_file = self._arg_parser.output
self.ceph_conf = self._arg_parser.ceph_conf
self.MIN_USER_CAP_PERMISSIONS = {
'mgr': 'allow command config',
'mon': 'allow r, allow command quorum_status, allow command version',
'osd': "allow rwx pool={0}.rgw.meta, " +
"allow r pool=.rgw.root, " +
"allow rw pool={0}.rgw.control, " +
"allow rx pool={0}.rgw.log, " +
"allow x pool={0}.rgw.buckets.index"
}
# if user not provided, give a default user
if not self.run_as_user and not self._arg_parser.upgrade:
self.run_as_user = self.EXTERNAL_USER_NAME
if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade:
self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX
if self.ceph_conf:
self.cluster = rados.Rados(conffile=self.ceph_conf)
else:
self.cluster = rados.Rados()
self.cluster.conf_read_file()
self.cluster.connect()
def shutdown(self):
if self.cluster.state == "connected":
self.cluster.shutdown()
def get_fsid(self):
return str(self.cluster.get_fsid())
def _common_cmd_json_gen(self, cmd_json):
cmd = json.dumps(cmd_json, sort_keys=True)
ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b'')
if self._arg_parser.verbose:
print("Command Input: {}".format(cmd))
print("Return Val: {}\nCommand Output: {}\nError Message: {}\n----------\n".format(
ret_val, cmd_out, err_msg))
json_out = {}
# if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty
# then convert 'cmd_out' to a json output
if ret_val == 0 and cmd_out:
json_out = json.loads(cmd_out)
return ret_val, json_out, err_msg
def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'quorum_status' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
q_leader_name = json_out['quorum_leader_name']
q_leader_details = {}
q_leader_matching_list = [l for l in json_out['monmap']['mons']
if l['name'] == q_leader_name]
if len(q_leader_matching_list) == 0:
raise ExecutionFailureException("No matching 'mon' details found")
q_leader_details = q_leader_matching_list[0]
ip_port = str(q_leader_details['public_addr'].split('/')[0])
return "{}={}".format(str(q_leader_name), ip_port)
def _join_host_port(self, endpoint, port):
port = "{}".format(port)
# regex to check the given endpoint is enclosed in square brackets
ipv6_regx = re.compile(r'^\[[^]]*\]$')
# endpoint has ':' in it and if not (already) enclosed in square brackets
if endpoint.count(':') and not ipv6_regx.match(endpoint):
endpoint = '[{}]'.format(endpoint)
if not port:
return endpoint
return ':'.join([endpoint, port])
def _convert_hostname_to_ip(self, host_name):
# if 'cluster' instance is a dummy type,
# call the dummy instance's "convert" method
if not host_name:
raise ExecutionFailureException("Empty hostname provided")
if isinstance(self.cluster, DummyRados):
return self.cluster._convert_hostname_to_ip(host_name)
import socket
ip = socket.gethostbyname(host_name)
del socket
return ip
def get_active_ceph_mgr(self):
monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
monitoring_endpoint_ip = self._arg_parser.monitoring_endpoint
if not monitoring_endpoint_ip:
cmd_json = {"prefix": "mgr services", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'mgr services' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
monitoring_endpoint = json_out.get('prometheus')
if not monitoring_endpoint:
raise ExecutionFailureException(
"'prometheus' service not found, is the exporter enabled?'.\n")
try:
parsed_endpoint = urlparse(monitoring_endpoint)
except ValueError:
raise ExecutionFailureException(
"invalid endpoint: {}".format(monitoring_endpoint))
monitoring_endpoint_ip = parsed_endpoint.hostname
if not monitoring_endpoint_port:
monitoring_endpoint_port = "{}".format(parsed_endpoint.port)
# if monitoring endpoint port is not set, put a default mon port
if not monitoring_endpoint_port:
monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT
try:
monitoring_endpoint_ip = self._convert_hostname_to_ip(
monitoring_endpoint_ip)
except:
raise ExecutionFailureException(
"unable to convert a hostname to an IP address, monitoring host name: {}".format(monitoring_endpoint_ip))
monitoring_endpoint = self._join_host_port(
monitoring_endpoint_ip, monitoring_endpoint_port)
self._invalid_endpoint(monitoring_endpoint)
self.endpoint_dial(monitoring_endpoint)
return monitoring_endpoint_ip, monitoring_endpoint_port
def create_cephCSIKeyring_cephFSProvisioner(self):
'''
command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'
'''
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-cephfs-provisioner",
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata=*"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_cephFSNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-cephfs-node",
"caps": ["mon", "allow r",
"mgr", "allow rw",
"osd", "allow rw tag cephfs *=*",
"mds", "allow rw"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-node' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_RBDProvisioner(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-rbd-provisioner",
"caps": ["mon", "profile rbd",
"mgr", "allow rw",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt, report an error
if ret_val != 0:
# if fs and data_pool arguments are not set, silently return
if self._arg_parser.cephfs_filesystem_name == "" and self._arg_parser.cephfs_data_pool_name == "":
return
# if user has provided any of the
# '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments,
# raise an exception as we are unable to verify the args
raise ExecutionFailureException(
"'fs ls' ceph call failed with error: {}".format(err_msg))
matching_json_out = {}
# if '--cephfs-filesystem-name' argument is provided,
# check whether the provided filesystem-name exists or not
if self._arg_parser.cephfs_filesystem_name:
# get the matching list
matching_json_out_list = [matched for matched in json_out
if str(matched['name']) == self._arg_parser.cephfs_filesystem_name]
# unable to find a matching fs-name, raise an error
if len(matching_json_out_list) == 0:
raise ExecutionFailureException(
("Filesystem provided, '{}', " +
"is not found in the fs-list: '{}'").format(
self._arg_parser.cephfs_filesystem_name,
[str(x['name']) for x in json_out]))
matching_json_out = matching_json_out_list[0]
# if cephfs filesystem name is not provided,
# try to get a default fs name by doing the following
else:
# a. check if there is only one filesystem is present
if len(json_out) == 1:
matching_json_out = json_out[0]
# b. or else, check if data_pool name is provided
elif self._arg_parser.cephfs_data_pool_name:
# and if present, check whether there exists a fs which has the data_pool
for eachJ in json_out:
if self._arg_parser.cephfs_data_pool_name in eachJ['data_pools']:
matching_json_out = eachJ
break
# if there is no matching fs exists, that means provided data_pool name is invalid
if not matching_json_out:
raise ExecutionFailureException(
"Provided data_pool name, {}, does not exists".format(
self._arg_parser.cephfs_data_pool_name))
# c. if nothing is set and couldn't find a default,
else:
# just return silently
return
if matching_json_out:
self._arg_parser.cephfs_filesystem_name = str(
matching_json_out['name'])
if type(matching_json_out['data_pools']) == list:
# if the user has already provided data-pool-name,
# through --cephfs-data-pool-name
if self._arg_parser.cephfs_data_pool_name:
# if the provided name is not matching with the one in the list
if self._arg_parser.cephfs_data_pool_name not in matching_json_out['data_pools']:
raise ExecutionFailureException(
"{}: '{}', {}: {}".format(
"Provided data-pool-name",
self._arg_parser.cephfs_data_pool_name,
"doesn't match from the data-pools' list",
[str(x) for x in matching_json_out['data_pools']]))
# if data_pool name is not provided,
# then try to find a default data pool name
else:
# if no data_pools exist, silently return
if len(matching_json_out['data_pools']) == 0:
return
self._arg_parser.cephfs_data_pool_name = str(
matching_json_out['data_pools'][0])
# if there are more than one 'data_pools' exist,
# then warn the user that we are using the selected name
if len(matching_json_out['data_pools']) > 1:
print("{}: {}\n{}: '{}'\n".format(
"WARNING: Multiple data pools detected",
[str(x) for x in matching_json_out['data_pools']],
"Using the data-pool",
self._arg_parser.cephfs_data_pool_name))
def create_cephCSIKeyring_RBDNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-rbd-node",
"caps": ["mon", "profile rbd",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-node' command failed\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_checkerKey(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": self.run_as_user,
"caps": ["mon", self.MIN_USER_CAP_PERMISSIONS['mon'],
"mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'],
"osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create {}' command failed\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def _gen_output_map(self):
if self.out_map:
return
pools_to_validate = [self._arg_parser.rbd_data_pool_name]
# if rgw_endpoint is provided, validate it
if self._arg_parser.rgw_endpoint:
self._invalid_endpoint(self._arg_parser.rgw_endpoint)
self.endpoint_dial(self._arg_parser.rgw_endpoint)
rgw_pool_to_validate = ["{0}.rgw.meta".format(self._arg_parser.rgw_pool_prefix),
".rgw.root",
"{0}.rgw.control".format(
self._arg_parser.rgw_pool_prefix),
"{0}.rgw.log".format(
self._arg_parser.rgw_pool_prefix)]
pools_to_validate.extend(rgw_pool_to_validate)
for pool in pools_to_validate:
if not self.cluster.pool_exists(pool):
raise ExecutionFailureException(
"The provided pool, '{}', does not exist".format(pool))
self._excluded_keys.add('CLUSTER_NAME')
self.get_cephfs_data_pool_details()
self.out_map['NAMESPACE'] = self._arg_parser.namespace
self.out_map['CLUSTER_NAME'] = self._arg_parser.cluster_name
self.out_map['ROOK_EXTERNAL_FSID'] = self.get_fsid()
self.out_map['ROOK_EXTERNAL_USERNAME'] = self.run_as_user
self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'] = self.get_ceph_external_mon_data()
self.out_map['ROOK_EXTERNAL_USER_SECRET'] = self.create_checkerKey()
self.out_map['CSI_RBD_NODE_SECRET_SECRET'] = self.create_cephCSIKeyring_RBDNode()
self.out_map['CSI_RBD_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_RBDProvisioner()
self.out_map['CEPHFS_POOL_NAME'] = self._arg_parser.cephfs_data_pool_name
self.out_map['CEPHFS_FS_NAME'] = self._arg_parser.cephfs_filesystem_name
self.out_map['CSI_CEPHFS_NODE_SECRET'] = ''
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = ''
# create CephFS node and provisioner keyring only when MDS exists
if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']:
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode(
)
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner()
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['MONITORING_ENDPOINT'], \
self.out_map['MONITORING_ENDPOINT_PORT'] = self.get_active_ceph_mgr()
self.out_map['RBD_POOL_NAME'] = self._arg_parser.rbd_data_pool_name
self.out_map['RGW_POOL_PREFIX'] = self._arg_parser.rgw_pool_prefix
def gen_shell_out(self):
self._gen_output_map()
shOutIO = StringIO()
for k, v in self.out_map.items():
if v and k not in self._excluded_keys:
shOutIO.write('export {}={}{}'.format(k, v, LINESEP))
shOut = shOutIO.getvalue()
shOutIO.close()
return shOut
def gen_json_out(self):
self._gen_output_map()
json_out = [
{
"name": "rook-ceph-mon-endpoints",
"kind": "ConfigMap",
"data": {
"data": self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'],
"maxMonId": "0",
"mapping": "{}"
}
},
{
"name": "rook-ceph-mon",
"kind": "Secret",
"data": {
"admin-secret": "admin-secret",
"fsid": self.out_map['ROOK_EXTERNAL_FSID'],
"mon-secret": "mon-secret"
},
},
{
"name": "rook-ceph-operator-creds",
"kind": "Secret",
"data": {
"userID": self.out_map['ROOK_EXTERNAL_USERNAME'],
"userKey": self.out_map['ROOK_EXTERNAL_USER_SECRET']
}
},
{
"name": "rook-csi-rbd-node",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-node',
"userKey": self.out_map['CSI_RBD_NODE_SECRET_SECRET']
}
},
{
"name": "ceph-rbd",
"kind": "StorageClass",
"data": {
"pool": self.out_map['RBD_POOL_NAME']
}
},
{
"name": "monitoring-endpoint",
"kind": "CephCluster",
"data": {
"MonitoringEndpoint": self.out_map['MONITORING_ENDPOINT'],
"MonitoringPort": self.out_map['MONITORING_ENDPOINT_PORT']
}
}
]
# if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
if self.out_map['CSI_RBD_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-rbd-provisioner",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-provisioner',
"userKey": self.out_map['CSI_RBD_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret
if self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-cephfs-provisioner",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-provisioner',
"adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret
if self.out_map['CSI_CEPHFS_NODE_SECRET']:
json_out.append({
"name": "rook-csi-cephfs-node",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-node',
"adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET']
}
})
# if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass
if self.out_map['CEPHFS_FS_NAME']:
json_out.append({
"name": "cephfs",
"kind": "StorageClass",
"data": {
"fsName": self.out_map['CEPHFS_FS_NAME'],
"pool": self.out_map['CEPHFS_POOL_NAME']
}
})
# if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass
if self.out_map['RGW_ENDPOINT']:
json_out.append({
"name": "ceph-rgw",
"kind": "StorageClass",
"data": {
"endpoint": self.out_map['RGW_ENDPOINT'],
"poolPrefix": self.out_map['RGW_POOL_PREFIX']
}
})
return json.dumps(json_out)+LINESEP
def upgrade_user_permissions(self):
# check whether the given user exists or not
cmd_json = {"prefix": "auth get", "entity": "{}".format(
self.run_as_user), "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException("'auth get {}' command failed.\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
j_first = json_out[0]
existing_caps = j_first['caps']
osd_cap = "osd"
cap_keys = ["mon", "mgr", "osd"]
for eachCap in cap_keys:
min_cap_values = self.MIN_USER_CAP_PERMISSIONS.get(eachCap, '')
cur_cap_values = existing_caps.get(eachCap, '')
# detect rgw-pool-prefix
if eachCap == osd_cap:
# if directly provided through '--rgw-pool-prefix' argument, use it
if self._arg_parser.rgw_pool_prefix:
min_cap_values = min_cap_values.format(
self._arg_parser.rgw_pool_prefix)
# or else try to detect one from the existing/current osd cap values
else:
rc = re.compile(r' pool=([^.]+)\.rgw\.[^ ]*')
# 'findall()' method will give a list of prefixes
# and 'set' will eliminate any duplicates
cur_rgw_pool_prefix_list = list(
set(rc.findall(cur_cap_values)))
if len(cur_rgw_pool_prefix_list) != 1:
raise ExecutionFailureException(
"Unable to determine 'rgw-pool-prefx'. Please provide one with '--rgw-pool-prefix' flag")
min_cap_values = min_cap_values.format(
cur_rgw_pool_prefix_list[0])
cur_cap_perm_list = [x.strip()
for x in cur_cap_values.split(',') if x.strip()]
min_cap_perm_list = [x.strip()
for x in min_cap_values.split(',') if x.strip()]
min_cap_perm_list.extend(cur_cap_perm_list)
# eliminate duplicates without using 'set'
# set re-orders items in the list and we have to keep the order
new_cap_perm_list = []
[new_cap_perm_list.append(
x) for x in min_cap_perm_list if x not in new_cap_perm_list]
existing_caps[eachCap] = ", ".join(new_cap_perm_list)
cmd_json = {"prefix": "auth caps",
"entity": self.run_as_user,
"caps": ["mon", existing_caps["mon"],
"mgr", existing_caps["mgr"],
"osd", existing_caps["osd"]],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0:
raise ExecutionFailureException("'auth caps {}' command failed.\n".format(self.run_as_user) +
"Error: {}".format(err_msg))
print("Updated user, {}, successfully.".format(self.run_as_user))
def main(self):
generated_output = ''
if self._arg_parser.upgrade:
self.upgrade_user_permissions()
elif self._arg_parser.format == 'json':
generated_output = self.gen_json_out()
elif self._arg_parser.format == 'bash':
generated_output = self.gen_shell_out()
else:
raise ExecutionFailureException("Unsupported format: {}".format(
self._arg_parser.format))
print('{}'.format(generated_output))
if self.output_file and generated_output:
fOut = open(self.output_file, 'w')
fOut.write(generated_output)
fOut.close()
################################################
##################### MAIN #####################
################################################
if __name__ == '__main__':
rjObj = RadosJSON()
try:
rjObj.main()
except ExecutionFailureException as err:
print("Execution Failed: {}".format(err))
except KeyError as kErr:
print("KeyError: %s", kErr)
except OSError as osErr:
print("Error while trying to output the data: {}".format(osErr))
finally:
rjObj.shutdown()
################################################
##################### TEST #####################
################################################
# inorder to test the package,
# cd <script_directory>
# python -m unittest --verbose <script_name_without_dot_py>
class TestRadosJSON(unittest.TestCase):
def setUp(self):
print("\nI am in setup")
self.rjObj = RadosJSON(['--rbd-data-pool-name=abc',
'--rgw-endpoint=10.10.212.122:9000', '--format=json'])
# for testing, we are using 'DummyRados' object
self.rjObj.cluster = DummyRados.Rados()
def tearDown(self):
print("\nI am tearing down the setup\n")
self.rjObj.shutdown()
def test_method_main_output(self):
print("JSON Output")
self.rjObj._arg_parser.format = "json"
self.rjObj.main()
print("\n\nShell Output")
self.rjObj._arg_parser.format = "bash"
self.rjObj.main()
print("\n\nNon compatible output (--abcd)")
try:
self.rjObj._arg_parser.format = 'abcd'
self.rjObj.main()
self.fail("Function should have thrown an Exception")
except ExecutionFailureException as err:
print("Exception thrown successfully: {}".format(err))
def test_method_create_cephCSIKeyring_cephFSProvisioner(self):
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("{}".format(csiKeyring))
def test_non_zero_return_and_error(self):
self.rjObj.cluster.return_val = 1
self.rjObj.cluster.err_message = "Dummy Error"
try:
self.rjObj.create_checkerKey()
self.fail("Failed to raise an exception, 'ExecutionFailureException'")
except ExecutionFailureException as err:
print("Successfully thrown error.\nError: {}".format(err))
def test_multi_filesystem_scenario(self):
cmd_key = self.rjObj.cluster.cmd_names['fs ls']
cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key]
cmd_json_out = json.loads(cmd_out)
second_fs_details = dict(cmd_json_out[0])
second_fs_details['name'] += '-2'
cmd_json_out.append(second_fs_details)
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
# multiple filesystem present,
# but no specific '--cephfs-filesystem-name' argument provided
try:
self.rjObj.get_cephfs_data_pool_details()
print("As we are returning silently, no error thrown as expected")
except ExecutionFailureException as err:
self.fail(
"Supposed to get returned silently, but instead error thrown: {}".format(err))
# pass an existing filesystem name
try:
self.rjObj._arg_parser.cephfs_filesystem_name = second_fs_details['name']
self.rjObj.get_cephfs_data_pool_details()
except ExecutionFailureException as err:
self.fail("Should not have thrown error: {}".format(err))
# pass a non-existing filesystem name
try:
self.rjObj._arg_parser.cephfs_filesystem_name += "-non-existing-fs-name"
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# empty file-system array
try:
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps([])
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def test_multi_data_pool_scenario(self):
cmd_key = self.rjObj.cluster.cmd_names['fs ls']
cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key]
cmd_json_out = json.loads(cmd_out)
first_fs_details = cmd_json_out[0]
new_data_pool_name = 'myfs-data1'
first_fs_details['data_pools'].append(new_data_pool_name)
print("Modified JSON Cmd Out: {}".format(cmd_json_out))
self.rjObj._arg_parser.cephfs_data_pool_name = new_data_pool_name
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
self.rjObj.get_cephfs_data_pool_details()
# use a non-existing data-pool-name
bad_data_pool_name = 'myfs-data3'
self.rjObj._arg_parser.cephfs_data_pool_name = bad_data_pool_name
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# empty data-pool scenario
first_fs_details['data_pools'] = []
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def test_valid_rgw_endpoint(self):
self.rjObj._invalid_endpoint("10.10.212.133:8000")
# invalid port
try:
self.rjObj._invalid_endpoint("10.10.212.133:238000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# out of range IP
try:
self.rjObj._invalid_endpoint("10.1033.212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# mal formatted IP
try:
self.rjObj._invalid_endpoint("10.103..212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
try:
self.rjObj._invalid_endpoint("10.103.212.133::8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
try:
self.rjObj._invalid_endpoint("10.10.103.212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def add_non_default_pool_prefix_cmd(self, non_default_pool_prefix):
json_cmd = json.loads(
self.rjObj.cluster.cmd_names['caps_change_default_pool_prefix'])
cur_osd_caps = json_cmd['caps'][json_cmd['caps'].index('osd') + 1]
new_osd_caps = cur_osd_caps.replace(
'default.', '{}.'.format(non_default_pool_prefix))
all_osd_caps = "{}, {}".format(new_osd_caps, cur_osd_caps)
caps_list = [x.strip() for x in all_osd_caps.split(',') if x.strip()]
new_caps_list = []
[new_caps_list.append(x) for x in caps_list if x not in new_caps_list]
all_osd_caps = ", ".join(new_caps_list)
json_cmd['caps'][json_cmd['caps'].index('osd') + 1] = all_osd_caps
self.rjObj.cluster.cmd_names['caps_change_non_default_pool_prefix'] = json.dumps(
json_cmd)
self.rjObj.cluster.cmd_output_map[
self.rjObj.cluster.cmd_names['caps_change_non_default_pool_prefix']] = '[{}]'
def test_upgrade_user_permissions(self):
self.rjObj = RadosJSON(
['--upgrade', '--run-as-user=client.healthchecker'])
# for testing, we are using 'DummyRados' object
self.rjObj.cluster = DummyRados.Rados()
self.rjObj.main()
self.rjObj = RadosJSON(
['--upgrade', '--run-as-user=client.healthchecker', '--rgw-pool-prefix=nonDefault'])
self.rjObj.cluster = DummyRados.Rados()
self.add_non_default_pool_prefix_cmd('nonDefault')
self.rjObj.main()
def test_monitoring_endpoint_validation(self):
self.rjObj = RadosJSON(['--rbd-data-pool-name=abc', '--format=json'])
self.rjObj.cluster = DummyRados.Rados()
valid_ip_ports = [("10.22.31.131", "3534"),
("10.177.3.81", ""), ("", ""), ("", "9092")]
for each_ip_port_pair in valid_ip_ports:
# reset monitoring ip and port
self.rjObj._arg_parser.monitoring_endpoint = ''
self.rjObj._arg_parser.monitoring_endpoint_port = ''
new_mon_ip, new_mon_port = each_ip_port_pair
check_ip_val = self.rjObj.cluster.dummy_host_ip_map.get(
new_mon_ip, new_mon_ip)
check_port_val = RadosJSON.DEFAULT_MONITORING_ENDPOINT_PORT
if new_mon_ip:
self.rjObj._arg_parser.monitoring_endpoint = new_mon_ip
if new_mon_port:
check_port_val = new_mon_port
self.rjObj._arg_parser.monitoring_endpoint_port = new_mon_port
# for testing, we are using 'DummyRados' object
mon_ip, mon_port = self.rjObj.get_active_ceph_mgr()
if check_ip_val and check_ip_val != mon_ip:
self.fail("Expected IP: {}, Returned IP: {}".format(
check_ip_val, mon_ip))
if check_port_val and check_port_val != mon_port:
self.fail("Expected Port: '{}', Returned Port: '{}'".format(
check_port_val, mon_port))
print("MonIP: {}, MonPort: {}".format(mon_ip, mon_port))
invalid_ip_ports = [("10.22.31.131.43", "5334"), ("", "91943"),
("10.177.3.81", "90320"), ("", "73422"), ("10.232.12.8", "90922")]
for each_ip_port_pair in invalid_ip_ports:
# reset the command-line monitoring args
self.rjObj._arg_parser.monitoring_endpoint = ''
self.rjObj._arg_parser.monitoring_endpoint_port = ''
new_mon_ip, new_mon_port = each_ip_port_pair
if new_mon_ip:
self.rjObj._arg_parser.monitoring_endpoint = new_mon_ip
if new_mon_port:
self.rjObj._arg_parser.monitoring_endpoint_port = new_mon_port
try:
mon_ip, mon_port = self.rjObj.get_active_ceph_mgr()
print("[Wrong] MonIP: {}, MonPort: {}".format(mon_ip, mon_port))
self.fail("An exception was expected")
except ExecutionFailureException as err:
print("Exception thrown successfully: {}".format(err))
|
__author__ = "pyton_guy"
"""
Commands for the #/r/nyc Freenode IRC bot
This module is used by bot.py and is separate so that it can be hot-swapped.
So if a command breaks, or a new one needs to be added, we don't need to
disconnect the bot from IRC to fix it. This bot is built for five-nines!
"""
import sys
import random
import urllib2
import json
import inspect
import time
from datetime import date, timedelta
def reddit(self, user, channel, args):
if args:
uname = args
else:
# user is in the format "nick_name!~real_name@host.name"
uname = user.split("!", 1)[0]
try:
# Let the JSON module read in the response from Reddit's User API
data = json.load(urllib2.urlopen("http://reddit.com/user/%s/about.json" % uname))["data"]
# Feed the JSON-sourced dictionary to a format string
epoch_time = data["created_utc"]
created_date = date.fromtimestamp(int(epoch_time))
age = date.today() - created_date
if (age.days>365):
days = age.days%365
years = age.days/365
age_str = " Redditor for %s year(s) and %s day(s)." % (years, days)
else:
age_str = " Redditor for %s day(s)." % age.days
self.msg(
channel,
"User: {name} Link Karma: {link_karma} Comment Karma: {comment_karma}".format(**data) + age_str
)
except urllib2.HTTPError, e:
if e.code == 404:
self.msg(channel, "User: %s does not exist." % uname)
else:
self.msg(channel, "Reddit is down!")
except KeyError:
# Happens when the data is malformed, and we can't get what we want from the JSON
self.msg(channel, "Reddit broke :(")
def karma (self, user, channel, args):
""" Responds with a list of karma records. """
# Check for any sub-commands (like merge)
args = args.split(" ")
if args[0]:
karma_record = db.karma.find_one({"nick" : args[0]})
karma = karma_record["karma"] if karma_record else 0
karma_text = "{0}: {1}".format(args[0], karma)
else:
# TODO: Make this into a for loop if someone complains >_>
# Put together a readable karma list, and display it'
all = [x for x in db.karma.find()]
all.sort(lambda x, y: cmp(y["karma"], x["karma"]))
print all
top_5 = all[:5]
bottom_5 = all[-5:]
karmaValues = lambda y: ", ".join(["%s(%s)" % (x["nick"], x["karma"]) for x in y])
karma_text = "Top 5: %s | Bottom 5: %s" % (karmaValues(top_5), karmaValues(bottom_5))
karma_text = karma_text.replace("<random>", str(random.randint(-1000, 1000)))
self.msg(channel, str(karma_text))
def production (self, user, channel, args):
if args == "join":
self.join("#/r/nyc")
elif args == "leave":
self.leave("#/r/nyc")
def help (self, user, channel, args):
""" Reponds with a list of commands. """
funcs = [member for member in inspect.getmembers(sys.modules[__name__]) if inspect.isfunction(member[1])]
command_pairs = [f for f in funcs if len(inspect.getargspec(f[1])[0]) == 4]
self.msg(channel, "Commands: %s" % ", ".join([command_pair[0] for command_pair in command_pairs]))
def reload_nick (self, user, channel, args):
self.setNick("cobra_bot")
def src (self, user, channel, args):
self.msg(channel, "https://github.com/uniite/rnyc_irc")
def rickroll (self, user, channel, args):
print "Rick rolling %s" % args
self.msg(channel, "Only available on April Fool's Day")
return
make_call(args)
self.msg(channel, "Calling %s..." % args)
import wikipedia as w
def wiki(self, user, channel, args):
if not args:
self.msg(channel, "Usage !wiki <article>")
else:
origterm = args
origterm = origterm.encode('utf-8')
term = urllib2.unquote(origterm)
term = term[0].upper() + term[1:]
term = term.replace(' ', '_')
try: result = w.wikipedia(term)
except IOError:
error = "Can't connect to en.wikipedia.org (%s)" % (wikiuri % term)
self.msg(channel, error)
return
if result is not None:
self.msg(channel, result)
else: self.msg(channel, 'Can\'t find anything in Wikipedia for "%s".' % origterm)
Added karma velocity calculation
__author__ = "pyton_guy"
"""
Commands for the #/r/nyc Freenode IRC bot
This module is used by bot.py and is separate so that it can be hot-swapped.
So if a command breaks, or a new one needs to be added, we don't need to
disconnect the bot from IRC to fix it. This bot is built for five-nines!
"""
import sys
import random
import urllib2
import json
import inspect
import time
from datetime import date, timedelta
def reddit(self, user, channel, args):
if args:
uname = args
else:
# user is in the format "nick_name!~real_name@host.name"
uname = user.split("!", 1)[0]
try:
# Let the JSON module read in the response from Reddit's User API
data = json.load(urllib2.urlopen("http://reddit.com/user/%s/about.json" % uname))["data"]
# Feed the JSON-sourced dictionary to a format string
epoch_time = data["created_utc"]
created_date = date.fromtimestamp(int(epoch_time))
age = date.today() - created_date
if (age.days>365):
days = age.days%365
years = age.days/365
age_str = " Redditor for %s year(s) and %s day(s)." % (years, days)
else:
age_str = " Redditor for %s day(s)." % age.days
link_velocity = data["link_karma"] / float(age.days)
comment_velocity = data["comment_karma"] / float(age.days)
self.msg(
channel,
"User: {name} Link Karma: {link_karma} ".format(**data) + \
"({0:.2f} per day) ".format(link_velocity) + \
"Comment Karma: {comment_karma} ".format(**data) + \
"({0:.2f} per day) ".format(comment_velocity) + age_str
)
except urllib2.HTTPError, e:
if e.code == 404:
self.msg(channel, "User: %s does not exist." % uname)
else:
self.msg(channel, "Reddit is down!")
except KeyError:
# Happens when the data is malformed, and we can't get what we want from the JSON
self.msg(channel, "Reddit broke :(")
def karma (self, user, channel, args):
""" Responds with a list of karma records. """
# Check for any sub-commands (like merge)
args = args.split(" ")
if args[0]:
karma_record = db.karma.find_one({"nick" : args[0]})
karma = karma_record["karma"] if karma_record else 0
karma_text = "{0}: {1}".format(args[0], karma)
else:
# TODO: Make this into a for loop if someone complains >_>
# Put together a readable karma list, and display it'
all = [x for x in db.karma.find()]
all.sort(lambda x, y: cmp(y["karma"], x["karma"]))
print all
top_5 = all[:5]
bottom_5 = all[-5:]
karmaValues = lambda y: ", ".join(["%s(%s)" % (x["nick"], x["karma"]) for x in y])
karma_text = "Top 5: %s | Bottom 5: %s" % (karmaValues(top_5), karmaValues(bottom_5))
karma_text = karma_text.replace("<random>", str(random.randint(-1000, 1000)))
self.msg(channel, str(karma_text))
def production (self, user, channel, args):
if args == "join":
self.join("#/r/nyc")
elif args == "leave":
self.leave("#/r/nyc")
def help (self, user, channel, args):
""" Reponds with a list of commands. """
funcs = [member for member in inspect.getmembers(sys.modules[__name__]) if inspect.isfunction(member[1])]
command_pairs = [f for f in funcs if len(inspect.getargspec(f[1])[0]) == 4]
self.msg(channel, "Commands: %s" % ", ".join([command_pair[0] for command_pair in command_pairs]))
def reload_nick (self, user, channel, args):
self.setNick("cobra_bot")
def src (self, user, channel, args):
self.msg(channel, "https://github.com/uniite/rnyc_irc")
def rickroll (self, user, channel, args):
print "Rick rolling %s" % args
self.msg(channel, "Only available on April Fool's Day")
return
make_call(args)
self.msg(channel, "Calling %s..." % args)
import wikipedia as w
def wiki(self, user, channel, args):
if not args:
self.msg(channel, "Usage !wiki <article>")
else:
origterm = args
origterm = origterm.encode('utf-8')
term = urllib2.unquote(origterm)
term = term[0].upper() + term[1:]
term = term.replace(' ', '_')
try: result = w.wikipedia(term)
except IOError:
error = "Can't connect to en.wikipedia.org (%s)" % (wikiuri % term)
self.msg(channel, error)
return
if result is not None:
self.msg(channel, result)
else: self.msg(channel, 'Can\'t find anything in Wikipedia for "%s".' % origterm)
|
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Basic wrapping of Windows controls"
__revision__ = "$Revision$"
# pylint: disable-msg=W0611
import time
import re
import ctypes
# the wrappers may be used in an environment that does not need
# the actions - as such I don't want to require sendkeys - so
# the following makes the import optional.
try:
import SendKeys
except ImportError:
pass
# I leave this optional because PIL is a large dependency
try:
import PIL.ImageGrab
except ImportError:
pass
from pywinauto import win32defines
from pywinauto import win32functions
from pywinauto import win32structures
from pywinauto.timings import Timings
#from pywinauto import findbestmatch
from pywinauto import handleprops
# also import MenuItemNotEnabled so that it is
# accessible from HwndWrapper module
from menuwrapper import Menu #, MenuItemNotEnabled
#====================================================================
class ControlNotEnabled(RuntimeError):
"Raised when a control is not enabled"
pass
#====================================================================
class ControlNotVisible(RuntimeError):
"Raised when a control is nto visible"
pass
#====================================================================
class InvalidWindowHandle(RuntimeError):
"Raised when an invalid handle is passed to HwndWrapper "
def __init__(self, hwnd):
"Initialise the RuntimError parent with the mesage"
RuntimeError.__init__(self,
"Handle 0x%d is not a vaild window handle"% hwnd)
# metaclass that will know about
class _MetaWrapper(type):
"Metaclass for Wrapper objects"
re_wrappers = {}
str_wrappers = {}
def __init__(cls, name, bases, attrs):
# register the class names, both the regular expression
# or the classes directly
#print "metaclass __init__", cls
type.__init__(cls, name, bases, attrs)
for win_class in cls.windowclasses:
_MetaWrapper.re_wrappers[re.compile(win_class)] = cls
_MetaWrapper.str_wrappers[win_class] = cls
def FindWrapper(handle):
"""Find the correct wrapper for this handle"""
class_name = handleprops.classname(handle)
try:
return _MetaWrapper.str_wrappers[class_name]
except KeyError:
wrapper_match = None
for regex, wrapper in _MetaWrapper.re_wrappers.items():
if regex.match(class_name):
wrapper_match = wrapper
_MetaWrapper.str_wrappers[class_name] = wrapper
break
# if it is a dialog then override the wrapper we found
# and make it a DialogWrapper
if handleprops.is_toplevel_window(handle):
import win32_controls
wrapper_match = win32_controls.DialogWrapper
if wrapper_match is None:
wrapper_match = HwndWrapper
return wrapper_match
#if handle in meta.wrappers:
# return meta.wrappers[handle]
FindWrapper = staticmethod(FindWrapper)
#====================================================================
class HwndWrapper(object):
"""Default wrapper for controls.
All other wrappers are derived from this.
This class wraps a lot of functionality of underlying windows API
features for working with windows.
Most of the methods apply to every single window type. For example
you can Click() on any window.
Most of the methods of this class are simple wrappers around
API calls and as such they try do the simplest thing possible.
A HwndWrapper object can be passed directly to a ctypes wrapped
C function - and it will get converted to a Long with the value of
it's handle (see ctypes, _as_parameter_)"""
__metaclass__ = _MetaWrapper
friendlyclassname = None
windowclasses = []
handle = None
#-----------------------------------------------------------
def __new__(cls, handle):
new_class = cls.FindWrapper(handle)
#super(currentclass, cls).__new__(cls[, ...])"
obj = object.__new__(new_class)
obj.__init__(handle)
return obj
#-----------------------------------------------------------
def __init__(self, hwnd):
"""Initialize the control
* **hwnd** is either a valid window handle or it can be an
instance or subclass of HwndWrapper.
If the handle is not valid then an InvalidWindowHandle error
is raised.
"""
# handle if hwnd is actually a HwndWrapper
try:
self.handle = hwnd.handle
except AttributeError:
self.handle = hwnd
# verify that we have been passed in a valid windows handle
if not win32functions.IsWindow(hwnd):
raise InvalidWindowHandle(hwnd)
# make it so that ctypes conversion happens correctly
self._as_parameter_ = self.handle
#win32functions.WaitGuiThreadIdle(self)
# specify whether we need to grab an image of ourselves
# when asked for properties
self._NeedsImageProp = False
# default to not having a reference control added
self.ref = None
self.appdata = None
self._cache = {}
# build the list of default properties to be written
# Derived classes can either modify this list or override
# GetProperties depending on how much control they need.
self.writable_props = [
'Class',
'FriendlyClassName',
'Texts',
'Style',
'ExStyle',
'ControlID',
'UserData',
'ContextHelpID',
'Fonts',
'ClientRects',
'Rectangle',
'IsVisible',
'IsUnicode',
'IsEnabled',
'MenuItems',
'ControlCount',
]
#-----------------------------------------------------------
def FriendlyClassName(self):
"""Return the friendly class name for the control
This differs from the class of the control in some cases.
Class() is the actual 'Registered' window class of the control
while FriendlyClassName() is hopefully something that will make
more sense to the user.
For example Checkboxes are implemented as Buttons - so the class
of a CheckBox is "Button" - but the friendly class is "CheckBox"
"""
if self.friendlyclassname is None:
self.friendlyclassname = handleprops.classname(self)
return self.friendlyclassname
#-----------------------------------------------------------
def Class(self):
"""Return the class name of the window"""
if not self._cache.has_key("class"):
self._cache['class'] = handleprops.classname(self)
return self._cache['class']
#-----------------------------------------------------------
def WindowText(self):
"""Window text of the control
Quite a few contorls have other text that is visible, for example
Edit controls usually have an empty string for WindowText but still
have text displayed in the edit window.
"""
return handleprops.text(self)
#-----------------------------------------------------------
def Style(self):
"""Returns the style of window
Return value is a long.
Combination of WS_* and specific control specific styles.
See HwndWrapper.HasStyle() to easily check if the window has a
particular style.
"""
return handleprops.style(self)
#-----------------------------------------------------------
def ExStyle(self):
"""Returns the Extended style of window
Return value is a long.
Combination of WS_* and specific control specific styles.
See HwndWrapper.HasStyle() to easily check if the window has a
particular style.
"""
return handleprops.exstyle(self)
#-----------------------------------------------------------
def ControlID(self):
"""Return the ID of the window
Only controls have a valid ID - dialogs usually have no ID assigned.
The ID usually identified the control in the window - but there can
be duplicate ID's for example lables in a dialog may have duplicate
ID's.
"""
return handleprops.controlid(self)
#-----------------------------------------------------------
def UserData(self):
"""Extra data associted with the window
This value is a long value that has been associated with the window
and rarely has useful data (or at least data that you know the use
of).
"""
return handleprops.userdata(self)
#-----------------------------------------------------------
def ContextHelpID(self):
"Return the Context Help ID of the window"
return handleprops.contexthelpid(self)
#-----------------------------------------------------------
def IsUnicode(self):
"""Whether the window is unicode or not
A window is Unicode if it was registered by the Wide char version
of RegisterClass(Ex).
"""
return handleprops.isunicode(self)
#-----------------------------------------------------------
def IsVisible(self):
"""Whether the window is visible or not
Checks that both the Top Level Parent (probably dialog) that
owns this window and the window itself are both visible.
If you want to wait for a control to become visible (or wait
for it to become hidden) use ``Application.Wait('visible')`` or
``Application.WaitNot('visible')``.
If you want to raise an exception immediately if a window is
not visible then you can use the HwndWrapper.VerifyVisible().
HwndWrapper.VerifyActionable() raises if the window is not both
visible and enabled.
"""
return handleprops.isvisible(self.TopLevelParent()) and \
handleprops.isvisible(self)
#-----------------------------------------------------------
def IsEnabled(self):
"""Whether the window is enabled or not
Checks that both the Top Level Parent (probably dialog) that
owns this window and the window itself are both enabled.
If you want to wait for a control to become enabled (or wait
for it to become disabled) use ``Application.Wait('visible')`` or
``Application.WaitNot('visible')``.
If you want to raise an exception immediately if a window is
not enabled then you can use the HwndWrapper.VerifyEnabled().
HwndWrapper.VerifyReady() raises if the window is not both
visible and enabled.
"""
return handleprops.isenabled(self.TopLevelParent()) and \
handleprops.isenabled(self)
#-----------------------------------------------------------
def Rectangle(self):
"""Return the rectangle of window
The rectangle is the rectangle of the control on the screen,
coordinates are given from the top left of the screen.
This method returns a RECT structure, Which has attributes - top,
left, right, bottom. and has methods width() and height().
See win32structures.RECT for more information.
"""
return handleprops.rectangle(self)
#-----------------------------------------------------------
def ClientRect(self):
"""Returns the client rectangle of window
The client rectangle is the window rectangle minus any borders that
are not available to the control for drawing.
Both top and left are always 0 for this method.
This method returns a RECT structure, Which has attributes - top,
left, right, bottom. and has methods width() and height().
See win32structures.RECT for more information.
"""
return handleprops.clientrect(self)
#-----------------------------------------------------------
def Font(self):
"""Return the font of the window
The font of the window is used to draw the text of that window.
It is a structure which has attributes for Font name, height, width
etc.
See win32structures.LOGFONTW for more information.
"""
return handleprops.font(self)
#-----------------------------------------------------------
def ProcessID(self):
"""Return the ID of process that owns this window"""
return handleprops.processid(self)
#-----------------------------------------------------------
def HasStyle(self, style):
"Return True if the control has the specified sytle"
return handleprops.has_style(self, style)
#-----------------------------------------------------------
def HasExStyle(self, exstyle):
"Return True if the control has the specified extended sytle"
return handleprops.has_exstyle(self, exstyle)
#-----------------------------------------------------------
def IsDialog(self):
"Return true if the control is a top level window"
if not self._cache.has_key("isdialog"):
self._cache['isdialog'] = handleprops.is_toplevel_window(self)
return self._cache['isdialog']
#-----------------------------------------------------------
def Parent(self):
"""Return the parent of this control
Note that the parent of a control is not necesarily a dialog or
other main window. A group box may be the parent of some radio
buttons for example.
To get the main (or top level) window then use
HwndWrapper.TopLevelParent().
"""
if not self._cache.has_key("parent"):
parent_hwnd = handleprops.parent(self)
if parent_hwnd:
#return WrapHandle(parent_hwnd)
self._cache["parent"] = HwndWrapper(parent_hwnd)
else:
self._cache["parent"] = None
return self._cache["parent"]
#-----------------------------------------------------------
def TopLevelParent(self):
"""Return the top level window of this control
The TopLevel parent is different from the parent in that the Parent
is the window that owns this window - but it may not be a dialog/main
window. For example most Comboboxes have an Edit. The ComboBox is the
parent of the Edit control.
This will always return a valid window handle (if the control has
no top level parent then the control itself is returned - as it is
a top level window already!)
"""
if not self._cache.has_key("top_level_parent"):
parent = self.Parent()
if self.IsDialog():
self._cache["top_level_parent"] = self
#return self
elif not parent:
self._cache["top_level_parent"] = self
#return self
elif not parent.IsDialog():
self._cache["top_level_parent"] = parent.TopLevelParent()
#return parent.TopLevelParent()
else:
self._cache["top_level_parent"] = parent
#return parent
return self._cache["top_level_parent"]
#-----------------------------------------------------------
def Texts(self):
"""Return the text for each item of this control"
It is a list of strings for the control. It is frequently over-ridden
to extract all strings from a control with multiple items.
It is always a list with one or more strings:
* First elemtent is the window text of the control
* Subsequent elements contain the text of any items of the
control (e.g. items in a listbox/combobox, tabs in a tabcontrol)
"""
texts = [self.WindowText(), ]
return texts
#-----------------------------------------------------------
def ClientRects(self):
"""Return the client rect for each item in this control
It is a list of rectangles for the control. It is frequently over-ridden
to extract all rectangles from a control with multiple items.
It is always a list with one or more rectangles:
* First elemtent is the client rectangle of the control
* Subsequent elements contain the client rectangle of any items of
the control (e.g. items in a listbox/combobox, tabs in a
tabcontrol)
"""
return [self.ClientRect(), ]
#-----------------------------------------------------------
def Fonts(self):
"""Return the font for each item in this control
It is a list of fonts for the control. It is frequently over-ridden
to extract all fonts from a control with multiple items.
It is always a list with one or more fonts:
* First elemtent is the control font
* Subsequent elements contain the font of any items of
the control (e.g. items in a listbox/combobox, tabs in a
tabcontrol)
"""
return [self.Font(), ]
#-----------------------------------------------------------
def Children(self):
"""Return the children of this control as a list
It returns a list of HwndWrapper (or subclass) instances, it
returns an empty list if there are no children.
"""
child_windows = handleprops.children(self)
return [HwndWrapper(hwnd) for hwnd in child_windows]
#-----------------------------------------------------------
def ControlCount(self):
"Return the number of children of this control"
return len(handleprops.children(self))
#-----------------------------------------------------------
def IsChild(self, parent):
"""Return True if this window is a child of 'parent'.
A window is a child of another window when it is a direct of the
other window. A window is a direct descendant of a given
window if the parent window is the the chain of parent windows
for the child window.
"""
# Call the IsChild API funciton and convert the result
# to True/False
return win32functions.IsChild(parent, self.handle) != 0
#-----------------------------------------------------------
def SendMessage(self, message, wparam = 0 , lparam = 0):
"Send a message to the control and wait for it to return"
return win32functions.SendMessage(self, message, wparam, lparam)
#result = ctypes.c_long()
#ret = win32functions.SendMessageTimeout(self, message, wparam, lparam,
# win32defines.SMTO_NORMAL, 400, ctypes.byref(result))
#return result.value
#-----------------------------------------------------------
def SendMessageTimeout(
self,
message,
wparam = 0 ,
lparam = 0,
timeout = None,
timeoutflags = win32defines.SMTO_NORMAL):
"""Send a message to the control and wait for it to return or to timeout
If no timeout is given then a default timeout of .4 of a second will
be used.
"""
if timeout is None:
timeout = Timings.sendmessagetimeout_timeout
result = ctypes.c_long()
win32functions.SendMessageTimeout(self,
message, wparam, lparam,
timeoutflags, int(timeout * 1000),
ctypes.byref(result))
return result.value
#-----------------------------------------------------------
def PostMessage(self, message, wparam = 0 , lparam = 0):
"Post a message to the control message queue and return"
return win32functions.PostMessage(self, message, wparam, lparam)
#result = ctypes.c_long()
#ret = win32functions.SendMessageTimeout(self, message, wparam, lparam,
# win32defines.SMTO_NORMAL, 400, ctypes.byref(result))
#return result.value
# #-----------------------------------------------------------
# def NotifyMenuSelect(self, menu_id):
# """Notify the dialog that one of it's menu items was selected
#
# **This method is Deprecated**
# """
#
# import warnings
# warning_msg = "HwndWrapper.NotifyMenuSelect() is deprecated - " \
# "equivalent functionality is being moved to the MenuWrapper class."
# warnings.warn(warning_msg, DeprecationWarning)
#
# self.SetFocus()
#
# msg = win32defines.WM_COMMAND
# return self.SendMessageTimeout(
# msg,
# win32functions.MakeLong(0, menu_id), #wparam
# )
#
#-----------------------------------------------------------
def NotifyParent(self, message):
"Send the notification message to parent of this control"
return self.Parent().PostMessage(
win32defines.WM_COMMAND,
win32functions.MakeLong(message, self.ControlID()),
self)
#-----------------------------------------------------------
def GetProperties(self):
"Return the properties of the control as a dictionary"
props = {}
# for each of the properties that can be written out
for propname in self.writable_props:
# set the item in the props dictionary keyed on the propname
props[propname] = getattr(self, propname)()
if self._NeedsImageProp:
props["Image"] = self.CaptureAsImage()
return props
#-----------------------------------------------------------
def CaptureAsImage(self):
"""Return a PIL image of the control
See PIL documentation to know what you can do with the resulting
image"""
if not (self.Rectangle().width() and self.Rectangle().height()):
return None
# get the control rectangle in a way that PIL likes it
box = (
self.Rectangle().left,
self.Rectangle().top,
self.Rectangle().right,
self.Rectangle().bottom)
# grab the image and get raw data as a string
# wrapped in try because PIL is optional
try:
return PIL.ImageGrab.grab(box)
# if that fails due to a NameError - it is most likely because
# PIL was not found - and the package not loaded
except NameError:
pass
#-----------------------------------------------------------
def __hash__(self):
"Returns the hash value of the handle"
return hash(self.handle)
#-----------------------------------------------------------
def __eq__(self, other):
"Returns True if the handles of both controls are the same"
if isinstance(other, HwndWrapper):
return self.handle == other.handle
else:
return self.handle == other
#-----------------------------------------------------------
def VerifyActionable(self):
"""Verify that the control is both visible and enabled
Raise either ControlNotEnalbed or ControlNotVisible if not
enabled or visible respectively.
"""
win32functions.WaitGuiThreadIdle(self)
self.VerifyVisible()
self.VerifyEnabled()
#-----------------------------------------------------------
def VerifyEnabled(self):
"""Verify that the control is enabled
Check first if the control's parent is enabled (skip if no parent),
then check if control itself is enabled.
"""
# Check if the control and it's parent are enabled
if not self.IsEnabled():
raise ControlNotEnabled()
#-----------------------------------------------------------
def VerifyVisible(self):
"""Verify that the control is visible
Check first if the control's parent is visible. (skip if no parent),
then check if control itself is visible.
"""
# check if the control and it's parent are visible
if not self.IsVisible():
raise ControlNotVisible()
#-----------------------------------------------------------
def Click(
self, button = "left", pressed = "", coords = (0, 0), double = False):
"""Simulates a mouse click on the control
This method sends WM_* messages to the control, to do a more
'realistic' mouse click use ClickInput() which uses SendInput() API
to perform the click.
This method does not require that the control be visible on the screen
(i.e. is can be hidden beneath another window and it will still work.)
"""
_perform_click(self, button, pressed, coords, double)
return self
#-----------------------------------------------------------
def ClickInput(
self, button = "left", coords = (None, None), double = False ):
"""Click at the specified coordinates
* **button** The mouse button to click. One of 'left', 'right',
'middle' or 'x' (Default: 'left')
* **coords** The coordinates to click at.(Default: center of control)
* **double** Whether to perform a double click or not (Default: False)
This is different from Click in that it requires the control to
be visible on the screen but performs a more realistic 'click'
simulation.
This method is also vulnerable if the mouse if moved by the user
as that could easily move the mouse off the control before the
Click has finished.
"""
_perform_click_input(self, button, coords, double)
#-----------------------------------------------------------
def CloseClick(
self, button = "left", pressed = "", coords = (0, 0), double = False):
"""Peform a click action that should make the window go away
The only difference from Click is that there are extra delays
before and after the click action.
"""
time.sleep(Timings.before_closeclick_wait)
_perform_click(self, button, pressed, coords, double)
start = time.time()
timeout = Timings.closeclick_dialog_close_wait
# Keep waiting until both this control and it's parent
# are no longer valid controls
while (win32functions.IsWindow(self) or \
win32functions.IsWindow(self.Parent())) and \
time.time() - start < timeout:
time.sleep(min(
Timings.closeclick_retry,
timeout - (time.time() - start) ))
time.sleep(Timings.after_closeclick_wait)
return self
#-----------------------------------------------------------
def DoubleClick(
self, button = "left", pressed = "", coords = (0, 0)):
"Perform a double click action"
_perform_click(self, button, pressed, coords, double = True)
return self
#-----------------------------------------------------------
def DoubleClickInput(self, button = "left", coords = (None, None)):
"Double click at the specified coordinates"
_perform_click_input(self, button, coords, double = True)
#-----------------------------------------------------------
def RightClick(
self, pressed = "", coords = (0, 0)):
"Perform a right click action"
_perform_click(
self, "right", "right " + pressed, coords, button_up = False)
_perform_click(self, "right", pressed, coords, button_down = False)
return self
#-----------------------------------------------------------
def RightClickInput(self, coords = (None, None)):
"Right click at the specified coords"
_perform_click_input(self, 'right', coords)
#-----------------------------------------------------------
def PressMouse(self, button = "left", pressed = "", coords = (0, 0)):
"Press the mouse button"
#flags, click_point = _calc_flags_and_coords(pressed, coords)
_perform_click(self, button, pressed, coords, button_up = False)
return self
#-----------------------------------------------------------
def PressMouseInput(self, button = "left", coords = (None, None)):
"Press a mouse button using SendInput"
_perform_click_input(self, button, coords, button_up = False)
#-----------------------------------------------------------
def ReleaseMouse(self, button = "left", pressed = "", coords = (0, 0)):
"Release the mouse button"
#flags, click_point = _calc_flags_and_coords(pressed, coords)
_perform_click(self, button, pressed, coords, button_down = False)
return self
#-----------------------------------------------------------
def ReleaseMouseInput(self, button = "left", coords = (None, None)):
"Release the mouse button"
_perform_click_input(self, button, coords, button_down = False)
#-----------------------------------------------------------
def MoveMouse(self, pressed = "left", coords = (0, 0)):
"Move the mouse"
flags, click_point = _calc_flags_and_coords(pressed, coords)
self.SendMessageTimeout(win32defines.WM_MOUSEMOVE, flags, click_point)
win32functions.WaitGuiThreadIdle(self)
return self
#-----------------------------------------------------------
def DragMouse(self,
button = "left",
pressed = "",
press_coords = (0, 0),
release_coords = (0, 0)):
"Drag the mouse"
self.PressMouse(button, pressed, press_coords)
self.MoveMouse(pressed, press_coords)
self.ReleaseMouse(button, pressed, release_coords)
return self
#-----------------------------------------------------------
def SetWindowText(self, text, append = False):
"Set the text of the window"
self.VerifyActionable()
if append:
text = self.WindowText() + text
text = ctypes.c_wchar_p(unicode(text))
self.PostMessage(win32defines.WM_SETTEXT, 0, text)
win32functions.WaitGuiThreadIdle(self)
return self
#-----------------------------------------------------------
def TypeKeys(
self,
keys,
pause = None,
with_spaces = False,
with_tabs = False,
with_newlines = False,
turn_off_numlock = True):
"""Type keys to the window using SendKeys
This uses the SendKeys python module from
http://www.rutherfurd.net/python/sendkeys/ .This is the best place
to find documentation on what to use for the ``keys``
"""
self.VerifyActionable()
if pause is None:
pause = Timings.after_sendkeys_key_wait
self.SetFocus()
# attach the Python process with the process that self is in
win32functions.AttachThreadInput(
win32functions.GetCurrentThreadId(), self.ProcessID(), 1)
# make sure that the control is in the foreground
win32functions.SetForegroundWindow(self)
#win32functions.SetActiveWindow(self)
# Play the keys to the active window
SendKeys.SendKeys(
keys.encode('mbcs'),
pause, with_spaces,
with_tabs,
with_newlines,
turn_off_numlock)
# detach the python process from the window's process
win32functions.AttachThreadInput(
win32functions.GetCurrentThreadId(), self.ProcessID(), 0)
win32functions.WaitGuiThreadIdle(self)
return self
#-----------------------------------------------------------
def DebugMessage(self, text):
"Write some debug text over the window"
# don't draw if dialog is not visible
dc = win32functions.CreateDC(u"DISPLAY", None, None, None )
if not dc:
raise ctypes.WinError()
rect = self.Rectangle
#ret = win32functions.TextOut(
# dc, rect.left, rect.top, unicode(text), len(text))
ret = win32functions.DrawText(
dc,
unicode(text),
len(text),
ctypes.byref(rect),
win32defines.DT_SINGLELINE)
# delete the Display context that we created
win32functions.DeleteDC(dc)
if not ret:
raise ctypes.WinError()
return self
#-----------------------------------------------------------
def DrawOutline(
self,
colour = 'green',
thickness = 2,
fill = win32defines.BS_NULL,
rect = None):
"""Draw an outline around the window
* **colour** can be either an integer or one of 'red', 'green', 'blue'
(default 'green')
* **thickness** thickness of rectangle (default 2)
* **fill** how to fill in the rectangle (default BS_NULL)
* **rect** the coordinates of the rectangle to draw (defaults to
the rectangle of the control.
"""
# don't draw if dialog is not visible
if not self.IsVisible():
return
colours = {
"green" : 0x00ff00,
"blue" : 0xff0000,
"red" : 0x0000ff,
}
# if it's a known colour
if colour in colours:
colour = colours[colour]
if not rect:
rect = self.Rectangle()
# create the pen(outline)
pen_handle = win32functions.CreatePen(
win32defines.PS_SOLID, thickness, colour)
# create the brush (inside)
brush = win32structures.LOGBRUSH()
brush.lbStyle = fill
brush.lbHatch = win32defines.HS_DIAGCROSS
brush_handle = win32functions.CreateBrushIndirect(ctypes.byref(brush))
# get the Device Context
dc = win32functions.CreateDC(u"DISPLAY", None, None, None )
# push our objects into it
win32functions.SelectObject(dc, brush_handle)
win32functions.SelectObject(dc, pen_handle)
# draw the rectangle to the DC
win32functions.Rectangle(
dc, rect.left, rect.top, rect.right, rect.bottom)
# Delete the brush and pen we created
win32functions.DeleteObject(brush_handle)
win32functions.DeleteObject(pen_handle)
# delete the Display context that we created
win32functions.DeleteDC(dc)
#-----------------------------------------------------------
def PopupWindow(self):
"""Return any owned Popups
Please do not use in production code yet - not tested fully
"""
popup = win32functions.GetWindow(self, win32defines.GW_HWNDNEXT)
return popup
#-----------------------------------------------------------
def Owner(self):
"""Return the owner window for the window if it exists
Returns None if there is no owner"""
owner = win32functions.GetWindow(self, win32defines.GW_OWNER)
if owner:
return HwndWrapper(owner)
else:
return None
#-----------------------------------------------------------
# def ContextMenuSelect(self, path, x = None, y = None):
# "TODO ContextMenuSelect Not Implemented"
# pass
# #raise NotImplementedError(
# # "HwndWrapper.ContextMenuSelect not implemented yet")
#-----------------------------------------------------------
def _menu_handle(self):
"Simple Overridable method to get the menu handle"
return win32functions.GetMenu(self)
#-----------------------------------------------------------
def Menu(self):
"Return the menu of the control"
return Menu(self, self._menu_handle())
#-----------------------------------------------------------
def MenuItem(self, path):
"""Return the menu item specifed by path
Path can be a string in the form "MenuItem->MenuItem->MenuItem..."
where each MenuItem is the text of an item at that level of the menu.
E.g. ::
File->Export->ExportAsPNG
spaces are not important so you could also have written... ::
File -> Export -> Export As PNG
"""
if self.appdata is not None:
menu_appdata = self.appdata['MenuItems']
else:
menu_appdata = None
return self.Menu().GetMenuPath(path, appdata = menu_appdata)[-1]
#-----------------------------------------------------------
def MenuItems(self):
"""Return the menu items for the dialog
If there are no menu items then return an empty list
"""
if self.IsDialog():
#menu_handle = win32functions.GetMenu(self)
#self.SendMessage(win32defines.WM_INITMENU, menu_handle)
return self.Menu().GetProperties()
#self.SendMessage(win32defines.WM_INITMENU, menu_handle)
#return _GetMenuItems(menu_handle, self)
else:
return []
# #-----------------------------------------------------------
# def MenuClick(self, path):
# "Select the menuitem specifed in path"
#
# self.VerifyActionable()
#
# self.SetFocus()
#
# menu = Menu(self, self._menu_handle())
#
# path_items = menu.GetMenuPath(path)
#
# for menu_item in path_items:
# if not menu_item.IsEnabled():
# raise MenuItemNotEnabled(
# "MenuItem '%s' is disabled"% menu_item.Text())
#
# menu_item.Click()
#
# return self
#-----------------------------------------------------------
def MenuSelect(self, path, ):
"Select the menuitem specifed in path"
self.VerifyActionable()
self.MenuItem(path).Select()
#-----------------------------------------------------------
def MoveWindow(
self,
x = None,
y = None,
width = None,
height = None,
repaint = True):
"""Move the window to the new coordinates
* **x** Specifies the new left position of the window.
Defaults to the current left position of the window.
* **y** Specifies the new top position of the window.
Defaults to the current top position of the window.
* **width** Specifies the new width of the window. Defaults to the
current width of the window.
* **height** Specifies the new height of the window. Default to the
current height of the window.
* **repaint** Whether the window should be repainted or not.
Defaults to True
"""
cur_rect = self.Rectangle()
# if no X is specified - so use current coordinate
if x is None:
x = cur_rect.left
else:
try:
y = x.top
width = x.width()
height = x.height()
x = x.left
except AttributeError:
pass
# if no Y is specified - so use current coordinate
if y is None:
y = cur_rect.top
# if no width is specified - so use current width
if width is None:
width = cur_rect.width()
# if no height is specified - so use current height
if height is None:
height = cur_rect.height()
# ask for the window to be moved
ret = win32functions.MoveWindow(self, x, y, width, height, repaint)
# check that it worked correctly
if not ret:
raise ctypes.WinError()
win32functions.WaitGuiThreadIdle(self)
time.sleep(Timings.after_movewindow_wait)
#-----------------------------------------------------------
def Close(self):
"""Close the window
Code modified from http://msdn.microsoft.com/msdnmag/issues/02/08/CQA/
"""
# tell the window it must close
self.PostMessage(win32defines.WM_CLOSE)
start = time.time()
# Keeps trying while
# we have not timed out and
# window is still a valid handle and
# window is still visible
# any one of these conditions evaluates to false means the window is
# closed
while (
(time.time() - start) < Timings.after_windowclose_timeout and
win32functions.IsWindow(self) and
self.IsVisible()):
time.sleep(min(
Timings.after_windowclose_retry,
Timings.after_windowclose_timeout - (time.time() - start) ))
# # get a handle we can wait on
# process_wait_handle = win32functions.OpenProcess(
# win32defines.SYNCHRONIZE | win32defines.PROCESS_TERMINATE ,
# False ,
# self.ProcessID())
#
# # wait for the window to close
# win32functions.WaitForSingleObject(
# process_wait_handle,
# )
#-----------------------------------------------------------
def Maximize(self):
"""Maximize the window"""
win32functions.ShowWindow(self, win32defines.SW_MAXIMIZE)
#-----------------------------------------------------------
def Minimize(self):
"""Minimize the window"""
win32functions.ShowWindow(self, win32defines.SW_MINIMIZE)
#-----------------------------------------------------------
def Restore(self):
"""Restore the window"""
# do it twice just in case the window was minimized from being
# maximized - because then the window would come up maximized
# after the first ShowWindow, and Restored after the 2nd
win32functions.ShowWindow(self, win32defines.SW_RESTORE)
win32functions.ShowWindow(self, win32defines.SW_RESTORE)
#-----------------------------------------------------------
def GetShowState(self):
"""Get the show state and Maximized/minimzed/restored state
Returns a value that is a union of the following
* SW_HIDE the window is hidden.
* SW_MAXIMIZE the window is maximized
* SW_MINIMIZE the window is minimized
* SW_RESTORE the window is in the 'restored'
state (neither minimized or maximized)
* SW_SHOW The window is not hidden
"""
wp = win32structures.WINDOWPLACEMENT()
wp.lenght = ctypes.sizeof(wp)
ret = win32functions.GetWindowPlacement(self, ctypes.byref(wp))
if not ret:
raise ctypes.WinError()
return wp.showCmd
#-----------------------------------------------------------
def GetFocus(self):
"""Return the control in the process of this window that has the Focus
"""
gui_info = win32structures.GUITHREADINFO()
gui_info.cbSize = ctypes.sizeof(gui_info)
ret = win32functions.GetGUIThreadInfo(
win32functions.GetWindowThreadProcessId(self, 0),
ctypes.byref(gui_info))
if not ret:
return None
return HwndWrapper(gui_info.hwndFocus)
#-----------------------------------------------------------
def SetFocus(self):
"""Set the focus to this control
Bring the window to the foreground first if necessary."""
# find the current foreground window
cur_foreground = win32functions.GetForegroundWindow()
# if it is already foreground then just return
if self.handle != cur_foreground:
# get the thread of the window that is in the foreground
cur_fore_thread = win32functions.GetWindowThreadProcessId(
cur_foreground, 0)
# get the thread of the window that we want to be in the foreground
control_thread = win32functions.GetWindowThreadProcessId(self, 0)
# if a different thread owns the active window
if cur_fore_thread != control_thread:
# Attach the two threads and set the foreground window
win32functions.AttachThreadInput(
cur_fore_thread, control_thread, True)
win32functions.SetForegroundWindow(self)
# detach the thread again
win32functions.AttachThreadInput(
cur_fore_thread, control_thread, False)
else: # same threads - just set the foreground window
win32functions.SetForegroundWindow(self)
# make sure that we are idle before returning
win32functions.WaitGuiThreadIdle(self)
# only sleep if we had to change something!
time.sleep(Timings.after_setfocus_wait)
return self
#-----------------------------------------------------------
def SetApplicationData(self, appdata):
"""Application data is data from a previous run of the software
It is essential for running scripts written for one spoke language
on a different spoken langauge
"""
self.appdata = appdata
_scroll_types = {"left": {
"line" : win32defines.SB_LINELEFT,
"page" : win32defines.SB_PAGELEFT,
"end" : win32defines.SB_LEFT,
},
"right": {
"line" : win32defines.SB_LINERIGHT,
"page" : win32defines.SB_PAGERIGHT,
"end" : win32defines.SB_RIGHT,
},
"up": {
"line" : win32defines.SB_LINEUP,
"page" : win32defines.SB_PAGEUP,
"end" : win32defines.SB_TOP,
},
"down": {
"line" : win32defines.SB_LINEDOWN,
"page" : win32defines.SB_PAGEDOWN,
"end" : win32defines.SB_BOTTOM,
},
}
#-----------------------------------------------------------
def Scroll(self, direction, amount, count = 1):
"""Ask the control to scroll itself
direction can be any of "up", "down", "left", "right"
amount can be one of "line", "page", "end"
count (optional) the number of times to scroll
"""
# check which message we want to send
if direction.lower() in ("left", "right"):
message = win32defines.WM_HSCROLL
elif direction.lower() in ("up", "down"):
message = win32defines.WM_VSCROLL
# the constant that matches direction, and how much
scroll_type = \
HwndWrapper._scroll_types[direction.lower()][amount.lower()]
# Scroll as often as we have been asked to
while count > 0:
self.SendMessage(message, scroll_type)
count -= 1
return self
#
#def MouseLeftClick():
# pass
#def MouseRightClick():
# pass
#def MouseDoubleClick():
# pass
#def MouseDown():
# pass
#def MouseUp():
# pass
#def MoveMouse():
# pass
#def DragMouse():
# pass
#
#def LeftClick(x, y):
# win32defines.MOUSEEVENTF_LEFTDOWN
# win32defines.MOUSEEVENTF_LEFTUP
#
# # set the cursor position
# win32functions.SetCursorPos(x, y)
# time.sleep(Timings.after_setcursorpos_wait)
#
# inp_struct = win32structures.INPUT()
# inp_struct.type = win32defines.INPUT_MOUSE
# for event in (win32defines.MOUSEEVENTF_LEFTDOWN, win32defines.MOUSEEVENTF_LEFTUP):
# inp_struct._.mi.dwFlags = event
# win32functions.SendInput(
# 1,
# ctypes.pointer(inp_struct),
# ctypes.sizeof(inp_struct))
#
# time.sleep(Timings.after_clickinput_wait)
#====================================================================
def _perform_click_input(
ctrl = None,
button = "left",
coords = (None, None),
double = False,
button_down = True,
button_up = True,
absolute = False):
"""Peform a click action using SendInput
All the *ClickInput() and *MouseInput() methods use this function.
"""
events = []
if button.lower() == 'left':
if button_down:
events.append(win32defines.MOUSEEVENTF_LEFTDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_LEFTUP)
elif button.lower() == 'right':
if button_down:
events.append(win32defines.MOUSEEVENTF_RIGHTDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_RIGHTUP)
elif button.lower() == 'middle':
if button_down:
events.append(win32defines.MOUSEEVENTF_MIDDLEDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_MIDDLEUP)
elif button.lower() == 'x':
if button_down:
events.append(win32defines.MOUSEEVENTF_XDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_XUP)
# if we were asked to double click (and we are doing a full click
# not just up or down.
if double and button_down and button_up:
events *= 2
if ctrl == None:
ctrl = HwndWrapper(win32functions.GetDesktopWindow())
else:
ctrl.SetFocus()
# # allow points objects to be passed as the coords
# if isinstance(coords, win32structures.POINT):
# coords = [coords.x, coords.y]
# else:
coords = list(coords)
# set the default coordinates
if coords[0] is None:
coords[0] = ctrl.Rectangle().width() / 2
if coords[1] is None:
coords[1] = ctrl.Rectangle().height() / 2
if not absolute:
coords[0] = coords[0] + ctrl.Rectangle().left
coords[1] = coords[1] + ctrl.Rectangle().top
# set the cursor position
win32functions.SetCursorPos(coords[0], coords[1])
time.sleep(Timings.after_setcursorpos_wait)
inp_struct = win32structures.INPUT()
inp_struct.type = win32defines.INPUT_MOUSE
for event in events:
inp_struct._.mi.dwFlags = event
win32functions.SendInput(
1,
ctypes.pointer(inp_struct),
ctypes.sizeof(inp_struct))
time.sleep(Timings.after_clickinput_wait)
#====================================================================
def _perform_click(
ctrl,
button = "left",
pressed = "",
coords = (0, 0),
double = False,
button_down = True,
button_up = True):
"Low level method for performing click operations"
ctrl.VerifyActionable()
# figure out the messages for click/press
msgs = []
if not double:
if button.lower() == "left":
if button_down:
msgs.append(win32defines.WM_LBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_LBUTTONUP)
elif button.lower() == "middle":
if button_down:
msgs.append(win32defines.WM_MBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_MBUTTONUP)
elif button.lower() == "right":
if button_down:
msgs.append(win32defines.WM_RBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_RBUTTONUP)
# figure out the messages for double clicking
else:
if button.lower() == "left":
msgs = (
win32defines.WM_LBUTTONDOWN,
win32defines.WM_LBUTTONUP,
win32defines.WM_LBUTTONDBLCLK,
win32defines.WM_LBUTTONUP)
elif button.lower() == "middle":
msgs = (
win32defines.WM_MBUTTONDOWN,
win32defines.WM_MBUTTONUP,
win32defines.WM_MBUTTONDBLCLK,
win32defines.WM_MBUTTONUP)
elif button.lower() == "right":
msgs = (
win32defines.WM_RBUTTONDOWN,
win32defines.WM_RBUTTONUP,
win32defines.WM_RBUTTONDBLCLK,
win32defines.WM_RBUTTONUP)
# figure out the flags and pack coordinates
flags, click_point = _calc_flags_and_coords(pressed, coords)
# send each message
for msg in msgs:
ctrl.SendMessageTimeout(msg, flags, click_point)
#ctrl.PostMessage(msg, flags, click_point)
#flags = 0
# wait until the thread can accept another message
win32functions.WaitGuiThreadIdle(ctrl)
# wait a certain(short) time after the click
time.sleep(Timings.after_click_wait)
_mouse_flags = {
"left": win32defines.MK_LBUTTON,
"right": win32defines.MK_RBUTTON,
"middle": win32defines.MK_MBUTTON,
"shift": win32defines.MK_SHIFT,
"control": win32defines.MK_CONTROL,
}
#====================================================================
def _calc_flags_and_coords(pressed, coords):
"Calculate the flags to use and the coordinates for mouse actions"
flags = 0
for key in pressed.split():
flags |= _mouse_flags[key.lower()]
click_point = win32functions.MakeLong(coords[1], coords[0])
return flags, click_point
#====================================================================
class _dummy_control(dict):
"A subclass of dict so that we can assign attributes"
pass
#====================================================================
def GetDialogPropsFromHandle(hwnd):
"Get the properties of all the controls as a list of dictionaries"
# wrap the dialog handle and start a new list for the
# controls on the dialog
try:
controls = [hwnd, ]
controls.extend(hwnd.Children())
except AttributeError:
controls = [HwndWrapper(hwnd), ]
# add all the children of the dialog
controls.extend(controls[0].Children())
props = []
# Add each control to the properties for this dialog
for ctrl in controls:
# Get properties for each control and wrap them in
# _dummy_control so that we can assign handle
ctrl_props = _dummy_control(ctrl.GetProperties())
# assign the handle
ctrl_props.handle = ctrl.handle
# offset the rectangle from the dialog rectangle
ctrl_props['Rectangle'] -= controls[0].Rectangle()
props.append(ctrl_props)
return props
Added Basic handling for the mouse wheel.
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Basic wrapping of Windows controls"
__revision__ = "$Revision$"
# pylint: disable-msg=W0611
import time
import re
import ctypes
# the wrappers may be used in an environment that does not need
# the actions - as such I don't want to require sendkeys - so
# the following makes the import optional.
try:
import SendKeys
except ImportError:
pass
# I leave this optional because PIL is a large dependency
try:
import PIL.ImageGrab
except ImportError:
pass
from pywinauto import win32defines
from pywinauto import win32functions
from pywinauto import win32structures
from pywinauto.timings import Timings
#from pywinauto import findbestmatch
from pywinauto import handleprops
# also import MenuItemNotEnabled so that it is
# accessible from HwndWrapper module
from menuwrapper import Menu #, MenuItemNotEnabled
#====================================================================
class ControlNotEnabled(RuntimeError):
"Raised when a control is not enabled"
pass
#====================================================================
class ControlNotVisible(RuntimeError):
"Raised when a control is nto visible"
pass
#====================================================================
class InvalidWindowHandle(RuntimeError):
"Raised when an invalid handle is passed to HwndWrapper "
def __init__(self, hwnd):
"Initialise the RuntimError parent with the mesage"
RuntimeError.__init__(self,
"Handle 0x%d is not a vaild window handle"% hwnd)
# metaclass that will know about
class _MetaWrapper(type):
"Metaclass for Wrapper objects"
re_wrappers = {}
str_wrappers = {}
def __init__(cls, name, bases, attrs):
# register the class names, both the regular expression
# or the classes directly
#print "metaclass __init__", cls
type.__init__(cls, name, bases, attrs)
for win_class in cls.windowclasses:
_MetaWrapper.re_wrappers[re.compile(win_class)] = cls
_MetaWrapper.str_wrappers[win_class] = cls
def FindWrapper(handle):
"""Find the correct wrapper for this handle"""
class_name = handleprops.classname(handle)
try:
return _MetaWrapper.str_wrappers[class_name]
except KeyError:
wrapper_match = None
for regex, wrapper in _MetaWrapper.re_wrappers.items():
if regex.match(class_name):
wrapper_match = wrapper
_MetaWrapper.str_wrappers[class_name] = wrapper
break
# if it is a dialog then override the wrapper we found
# and make it a DialogWrapper
if handleprops.is_toplevel_window(handle):
import win32_controls
wrapper_match = win32_controls.DialogWrapper
if wrapper_match is None:
wrapper_match = HwndWrapper
return wrapper_match
#if handle in meta.wrappers:
# return meta.wrappers[handle]
FindWrapper = staticmethod(FindWrapper)
#====================================================================
class HwndWrapper(object):
"""Default wrapper for controls.
All other wrappers are derived from this.
This class wraps a lot of functionality of underlying windows API
features for working with windows.
Most of the methods apply to every single window type. For example
you can Click() on any window.
Most of the methods of this class are simple wrappers around
API calls and as such they try do the simplest thing possible.
A HwndWrapper object can be passed directly to a ctypes wrapped
C function - and it will get converted to a Long with the value of
it's handle (see ctypes, _as_parameter_)"""
__metaclass__ = _MetaWrapper
friendlyclassname = None
windowclasses = []
handle = None
#-----------------------------------------------------------
def __new__(cls, handle):
new_class = cls.FindWrapper(handle)
#super(currentclass, cls).__new__(cls[, ...])"
obj = object.__new__(new_class)
obj.__init__(handle)
return obj
#-----------------------------------------------------------
def __init__(self, hwnd):
"""Initialize the control
* **hwnd** is either a valid window handle or it can be an
instance or subclass of HwndWrapper.
If the handle is not valid then an InvalidWindowHandle error
is raised.
"""
# handle if hwnd is actually a HwndWrapper
try:
self.handle = hwnd.handle
except AttributeError:
self.handle = hwnd
# verify that we have been passed in a valid windows handle
if not win32functions.IsWindow(hwnd):
raise InvalidWindowHandle(hwnd)
# make it so that ctypes conversion happens correctly
self._as_parameter_ = self.handle
#win32functions.WaitGuiThreadIdle(self)
# specify whether we need to grab an image of ourselves
# when asked for properties
self._NeedsImageProp = False
# default to not having a reference control added
self.ref = None
self.appdata = None
self._cache = {}
# build the list of default properties to be written
# Derived classes can either modify this list or override
# GetProperties depending on how much control they need.
self.writable_props = [
'Class',
'FriendlyClassName',
'Texts',
'Style',
'ExStyle',
'ControlID',
'UserData',
'ContextHelpID',
'Fonts',
'ClientRects',
'Rectangle',
'IsVisible',
'IsUnicode',
'IsEnabled',
'MenuItems',
'ControlCount',
]
#-----------------------------------------------------------
def FriendlyClassName(self):
"""Return the friendly class name for the control
This differs from the class of the control in some cases.
Class() is the actual 'Registered' window class of the control
while FriendlyClassName() is hopefully something that will make
more sense to the user.
For example Checkboxes are implemented as Buttons - so the class
of a CheckBox is "Button" - but the friendly class is "CheckBox"
"""
if self.friendlyclassname is None:
self.friendlyclassname = handleprops.classname(self)
return self.friendlyclassname
#-----------------------------------------------------------
def Class(self):
"""Return the class name of the window"""
if not self._cache.has_key("class"):
self._cache['class'] = handleprops.classname(self)
return self._cache['class']
#-----------------------------------------------------------
def WindowText(self):
"""Window text of the control
Quite a few contorls have other text that is visible, for example
Edit controls usually have an empty string for WindowText but still
have text displayed in the edit window.
"""
return handleprops.text(self)
#-----------------------------------------------------------
def Style(self):
"""Returns the style of window
Return value is a long.
Combination of WS_* and specific control specific styles.
See HwndWrapper.HasStyle() to easily check if the window has a
particular style.
"""
return handleprops.style(self)
#-----------------------------------------------------------
def ExStyle(self):
"""Returns the Extended style of window
Return value is a long.
Combination of WS_* and specific control specific styles.
See HwndWrapper.HasStyle() to easily check if the window has a
particular style.
"""
return handleprops.exstyle(self)
#-----------------------------------------------------------
def ControlID(self):
"""Return the ID of the window
Only controls have a valid ID - dialogs usually have no ID assigned.
The ID usually identified the control in the window - but there can
be duplicate ID's for example lables in a dialog may have duplicate
ID's.
"""
return handleprops.controlid(self)
#-----------------------------------------------------------
def UserData(self):
"""Extra data associted with the window
This value is a long value that has been associated with the window
and rarely has useful data (or at least data that you know the use
of).
"""
return handleprops.userdata(self)
#-----------------------------------------------------------
def ContextHelpID(self):
"Return the Context Help ID of the window"
return handleprops.contexthelpid(self)
#-----------------------------------------------------------
def IsUnicode(self):
"""Whether the window is unicode or not
A window is Unicode if it was registered by the Wide char version
of RegisterClass(Ex).
"""
return handleprops.isunicode(self)
#-----------------------------------------------------------
def IsVisible(self):
"""Whether the window is visible or not
Checks that both the Top Level Parent (probably dialog) that
owns this window and the window itself are both visible.
If you want to wait for a control to become visible (or wait
for it to become hidden) use ``Application.Wait('visible')`` or
``Application.WaitNot('visible')``.
If you want to raise an exception immediately if a window is
not visible then you can use the HwndWrapper.VerifyVisible().
HwndWrapper.VerifyActionable() raises if the window is not both
visible and enabled.
"""
return handleprops.isvisible(self.TopLevelParent()) and \
handleprops.isvisible(self)
#-----------------------------------------------------------
def IsEnabled(self):
"""Whether the window is enabled or not
Checks that both the Top Level Parent (probably dialog) that
owns this window and the window itself are both enabled.
If you want to wait for a control to become enabled (or wait
for it to become disabled) use ``Application.Wait('visible')`` or
``Application.WaitNot('visible')``.
If you want to raise an exception immediately if a window is
not enabled then you can use the HwndWrapper.VerifyEnabled().
HwndWrapper.VerifyReady() raises if the window is not both
visible and enabled.
"""
return handleprops.isenabled(self.TopLevelParent()) and \
handleprops.isenabled(self)
#-----------------------------------------------------------
def Rectangle(self):
"""Return the rectangle of window
The rectangle is the rectangle of the control on the screen,
coordinates are given from the top left of the screen.
This method returns a RECT structure, Which has attributes - top,
left, right, bottom. and has methods width() and height().
See win32structures.RECT for more information.
"""
return handleprops.rectangle(self)
#-----------------------------------------------------------
def ClientRect(self):
"""Returns the client rectangle of window
The client rectangle is the window rectangle minus any borders that
are not available to the control for drawing.
Both top and left are always 0 for this method.
This method returns a RECT structure, Which has attributes - top,
left, right, bottom. and has methods width() and height().
See win32structures.RECT for more information.
"""
return handleprops.clientrect(self)
#-----------------------------------------------------------
def Font(self):
"""Return the font of the window
The font of the window is used to draw the text of that window.
It is a structure which has attributes for Font name, height, width
etc.
See win32structures.LOGFONTW for more information.
"""
return handleprops.font(self)
#-----------------------------------------------------------
def ProcessID(self):
"""Return the ID of process that owns this window"""
return handleprops.processid(self)
#-----------------------------------------------------------
def HasStyle(self, style):
"Return True if the control has the specified sytle"
return handleprops.has_style(self, style)
#-----------------------------------------------------------
def HasExStyle(self, exstyle):
"Return True if the control has the specified extended sytle"
return handleprops.has_exstyle(self, exstyle)
#-----------------------------------------------------------
def IsDialog(self):
"Return true if the control is a top level window"
if not self._cache.has_key("isdialog"):
self._cache['isdialog'] = handleprops.is_toplevel_window(self)
return self._cache['isdialog']
#-----------------------------------------------------------
def Parent(self):
"""Return the parent of this control
Note that the parent of a control is not necesarily a dialog or
other main window. A group box may be the parent of some radio
buttons for example.
To get the main (or top level) window then use
HwndWrapper.TopLevelParent().
"""
if not self._cache.has_key("parent"):
parent_hwnd = handleprops.parent(self)
if parent_hwnd:
#return WrapHandle(parent_hwnd)
self._cache["parent"] = HwndWrapper(parent_hwnd)
else:
self._cache["parent"] = None
return self._cache["parent"]
#-----------------------------------------------------------
def TopLevelParent(self):
"""Return the top level window of this control
The TopLevel parent is different from the parent in that the Parent
is the window that owns this window - but it may not be a dialog/main
window. For example most Comboboxes have an Edit. The ComboBox is the
parent of the Edit control.
This will always return a valid window handle (if the control has
no top level parent then the control itself is returned - as it is
a top level window already!)
"""
if not self._cache.has_key("top_level_parent"):
parent = self.Parent()
if self.IsDialog():
self._cache["top_level_parent"] = self
#return self
elif not parent:
self._cache["top_level_parent"] = self
#return self
elif not parent.IsDialog():
self._cache["top_level_parent"] = parent.TopLevelParent()
#return parent.TopLevelParent()
else:
self._cache["top_level_parent"] = parent
#return parent
return self._cache["top_level_parent"]
#-----------------------------------------------------------
def Texts(self):
"""Return the text for each item of this control"
It is a list of strings for the control. It is frequently over-ridden
to extract all strings from a control with multiple items.
It is always a list with one or more strings:
* First elemtent is the window text of the control
* Subsequent elements contain the text of any items of the
control (e.g. items in a listbox/combobox, tabs in a tabcontrol)
"""
texts = [self.WindowText(), ]
return texts
#-----------------------------------------------------------
def ClientRects(self):
"""Return the client rect for each item in this control
It is a list of rectangles for the control. It is frequently over-ridden
to extract all rectangles from a control with multiple items.
It is always a list with one or more rectangles:
* First elemtent is the client rectangle of the control
* Subsequent elements contain the client rectangle of any items of
the control (e.g. items in a listbox/combobox, tabs in a
tabcontrol)
"""
return [self.ClientRect(), ]
#-----------------------------------------------------------
def Fonts(self):
"""Return the font for each item in this control
It is a list of fonts for the control. It is frequently over-ridden
to extract all fonts from a control with multiple items.
It is always a list with one or more fonts:
* First elemtent is the control font
* Subsequent elements contain the font of any items of
the control (e.g. items in a listbox/combobox, tabs in a
tabcontrol)
"""
return [self.Font(), ]
#-----------------------------------------------------------
def Children(self):
"""Return the children of this control as a list
It returns a list of HwndWrapper (or subclass) instances, it
returns an empty list if there are no children.
"""
child_windows = handleprops.children(self)
return [HwndWrapper(hwnd) for hwnd in child_windows]
#-----------------------------------------------------------
def ControlCount(self):
"Return the number of children of this control"
return len(handleprops.children(self))
#-----------------------------------------------------------
def IsChild(self, parent):
"""Return True if this window is a child of 'parent'.
A window is a child of another window when it is a direct of the
other window. A window is a direct descendant of a given
window if the parent window is the the chain of parent windows
for the child window.
"""
# Call the IsChild API funciton and convert the result
# to True/False
return win32functions.IsChild(parent, self.handle) != 0
#-----------------------------------------------------------
def SendMessage(self, message, wparam = 0 , lparam = 0):
"Send a message to the control and wait for it to return"
return win32functions.SendMessage(self, message, wparam, lparam)
#result = ctypes.c_long()
#ret = win32functions.SendMessageTimeout(self, message, wparam, lparam,
# win32defines.SMTO_NORMAL, 400, ctypes.byref(result))
#return result.value
#-----------------------------------------------------------
def SendMessageTimeout(
self,
message,
wparam = 0 ,
lparam = 0,
timeout = None,
timeoutflags = win32defines.SMTO_NORMAL):
"""Send a message to the control and wait for it to return or to timeout
If no timeout is given then a default timeout of .4 of a second will
be used.
"""
if timeout is None:
timeout = Timings.sendmessagetimeout_timeout
result = ctypes.c_long()
win32functions.SendMessageTimeout(self,
message, wparam, lparam,
timeoutflags, int(timeout * 1000),
ctypes.byref(result))
return result.value
#-----------------------------------------------------------
def PostMessage(self, message, wparam = 0 , lparam = 0):
"Post a message to the control message queue and return"
return win32functions.PostMessage(self, message, wparam, lparam)
#result = ctypes.c_long()
#ret = win32functions.SendMessageTimeout(self, message, wparam, lparam,
# win32defines.SMTO_NORMAL, 400, ctypes.byref(result))
#return result.value
# #-----------------------------------------------------------
# def NotifyMenuSelect(self, menu_id):
# """Notify the dialog that one of it's menu items was selected
#
# **This method is Deprecated**
# """
#
# import warnings
# warning_msg = "HwndWrapper.NotifyMenuSelect() is deprecated - " \
# "equivalent functionality is being moved to the MenuWrapper class."
# warnings.warn(warning_msg, DeprecationWarning)
#
# self.SetFocus()
#
# msg = win32defines.WM_COMMAND
# return self.SendMessageTimeout(
# msg,
# win32functions.MakeLong(0, menu_id), #wparam
# )
#
#-----------------------------------------------------------
def NotifyParent(self, message):
"Send the notification message to parent of this control"
return self.Parent().PostMessage(
win32defines.WM_COMMAND,
win32functions.MakeLong(message, self.ControlID()),
self)
#-----------------------------------------------------------
def GetProperties(self):
"Return the properties of the control as a dictionary"
props = {}
# for each of the properties that can be written out
for propname in self.writable_props:
# set the item in the props dictionary keyed on the propname
props[propname] = getattr(self, propname)()
if self._NeedsImageProp:
props["Image"] = self.CaptureAsImage()
return props
#-----------------------------------------------------------
def CaptureAsImage(self):
"""Return a PIL image of the control
See PIL documentation to know what you can do with the resulting
image"""
if not (self.Rectangle().width() and self.Rectangle().height()):
return None
# get the control rectangle in a way that PIL likes it
box = (
self.Rectangle().left,
self.Rectangle().top,
self.Rectangle().right,
self.Rectangle().bottom)
# grab the image and get raw data as a string
# wrapped in try because PIL is optional
try:
return PIL.ImageGrab.grab(box)
# if that fails due to a NameError - it is most likely because
# PIL was not found - and the package not loaded
except NameError:
pass
#-----------------------------------------------------------
def __hash__(self):
"Returns the hash value of the handle"
return hash(self.handle)
#-----------------------------------------------------------
def __eq__(self, other):
"Returns True if the handles of both controls are the same"
if isinstance(other, HwndWrapper):
return self.handle == other.handle
else:
return self.handle == other
#-----------------------------------------------------------
def VerifyActionable(self):
"""Verify that the control is both visible and enabled
Raise either ControlNotEnalbed or ControlNotVisible if not
enabled or visible respectively.
"""
win32functions.WaitGuiThreadIdle(self)
self.VerifyVisible()
self.VerifyEnabled()
#-----------------------------------------------------------
def VerifyEnabled(self):
"""Verify that the control is enabled
Check first if the control's parent is enabled (skip if no parent),
then check if control itself is enabled.
"""
# Check if the control and it's parent are enabled
if not self.IsEnabled():
raise ControlNotEnabled()
#-----------------------------------------------------------
def VerifyVisible(self):
"""Verify that the control is visible
Check first if the control's parent is visible. (skip if no parent),
then check if control itself is visible.
"""
# check if the control and it's parent are visible
if not self.IsVisible():
raise ControlNotVisible()
#-----------------------------------------------------------
def Click(
self, button = "left", pressed = "", coords = (0, 0), double = False):
"""Simulates a mouse click on the control
This method sends WM_* messages to the control, to do a more
'realistic' mouse click use ClickInput() which uses SendInput() API
to perform the click.
This method does not require that the control be visible on the screen
(i.e. is can be hidden beneath another window and it will still work.)
"""
_perform_click(self, button, pressed, coords, double)
return self
#-----------------------------------------------------------
def ClickInput(
self, button = "left", coords = (None, None), double = False, wheel_dist = 0):
"""Click at the specified coordinates
* **button** The mouse button to click. One of 'left', 'right',
'middle' or 'x' (Default: 'left')
* **coords** The coordinates to click at.(Default: center of control)
* **double** Whether to perform a double click or not (Default: False)
This is different from Click in that it requires the control to
be visible on the screen but performs a more realistic 'click'
simulation.
This method is also vulnerable if the mouse if moved by the user
as that could easily move the mouse off the control before the
Click has finished.
"""
_perform_click_input(self, button, coords, double, wheel_dist = wheel_dist)
#-----------------------------------------------------------
def CloseClick(
self, button = "left", pressed = "", coords = (0, 0), double = False):
"""Peform a click action that should make the window go away
The only difference from Click is that there are extra delays
before and after the click action.
"""
time.sleep(Timings.before_closeclick_wait)
_perform_click(self, button, pressed, coords, double)
start = time.time()
timeout = Timings.closeclick_dialog_close_wait
# Keep waiting until both this control and it's parent
# are no longer valid controls
while (win32functions.IsWindow(self) or \
win32functions.IsWindow(self.Parent())) and \
time.time() - start < timeout:
time.sleep(min(
Timings.closeclick_retry,
timeout - (time.time() - start) ))
time.sleep(Timings.after_closeclick_wait)
return self
#-----------------------------------------------------------
def DoubleClick(
self, button = "left", pressed = "", coords = (0, 0)):
"Perform a double click action"
_perform_click(self, button, pressed, coords, double = True)
return self
#-----------------------------------------------------------
def DoubleClickInput(self, button = "left", coords = (None, None)):
"Double click at the specified coordinates"
_perform_click_input(self, button, coords, double = True)
#-----------------------------------------------------------
def RightClick(
self, pressed = "", coords = (0, 0)):
"Perform a right click action"
_perform_click(
self, "right", "right " + pressed, coords, button_up = False)
_perform_click(self, "right", pressed, coords, button_down = False)
return self
#-----------------------------------------------------------
def RightClickInput(self, coords = (None, None)):
"Right click at the specified coords"
_perform_click_input(self, 'right', coords)
#-----------------------------------------------------------
def PressMouse(self, button = "left", pressed = "", coords = (0, 0)):
"Press the mouse button"
#flags, click_point = _calc_flags_and_coords(pressed, coords)
_perform_click(self, button, pressed, coords, button_up = False)
return self
#-----------------------------------------------------------
def PressMouseInput(self, button = "left", coords = (None, None)):
"Press a mouse button using SendInput"
_perform_click_input(self, button, coords, button_up = False)
#-----------------------------------------------------------
def ReleaseMouse(self, button = "left", pressed = "", coords = (0, 0)):
"Release the mouse button"
#flags, click_point = _calc_flags_and_coords(pressed, coords)
_perform_click(self, button, pressed, coords, button_down = False)
return self
#-----------------------------------------------------------
def ReleaseMouseInput(self, button = "left", coords = (None, None)):
"Release the mouse button"
_perform_click_input(self, button, coords, button_down = False)
#-----------------------------------------------------------
def MoveMouse(self, pressed = "left", coords = (0, 0)):
"Move the mouse"
flags, click_point = _calc_flags_and_coords(pressed, coords)
self.SendMessageTimeout(win32defines.WM_MOUSEMOVE, flags, click_point)
win32functions.WaitGuiThreadIdle(self)
return self
#-----------------------------------------------------------
def DragMouse(self,
button = "left",
pressed = "",
press_coords = (0, 0),
release_coords = (0, 0)):
"Drag the mouse"
self.PressMouse(button, pressed, press_coords)
self.MoveMouse(pressed, press_coords)
self.ReleaseMouse(button, pressed, release_coords)
return self
#-----------------------------------------------------------
def SetWindowText(self, text, append = False):
"Set the text of the window"
self.VerifyActionable()
if append:
text = self.WindowText() + text
text = ctypes.c_wchar_p(unicode(text))
self.PostMessage(win32defines.WM_SETTEXT, 0, text)
win32functions.WaitGuiThreadIdle(self)
return self
#-----------------------------------------------------------
def TypeKeys(
self,
keys,
pause = None,
with_spaces = False,
with_tabs = False,
with_newlines = False,
turn_off_numlock = True):
"""Type keys to the window using SendKeys
This uses the SendKeys python module from
http://www.rutherfurd.net/python/sendkeys/ .This is the best place
to find documentation on what to use for the ``keys``
"""
self.VerifyActionable()
if pause is None:
pause = Timings.after_sendkeys_key_wait
self.SetFocus()
# attach the Python process with the process that self is in
win32functions.AttachThreadInput(
win32functions.GetCurrentThreadId(), self.ProcessID(), 1)
# make sure that the control is in the foreground
win32functions.SetForegroundWindow(self)
#win32functions.SetActiveWindow(self)
# Play the keys to the active window
SendKeys.SendKeys(
keys.encode('mbcs'),
pause, with_spaces,
with_tabs,
with_newlines,
turn_off_numlock)
# detach the python process from the window's process
win32functions.AttachThreadInput(
win32functions.GetCurrentThreadId(), self.ProcessID(), 0)
win32functions.WaitGuiThreadIdle(self)
return self
#-----------------------------------------------------------
def DebugMessage(self, text):
"Write some debug text over the window"
# don't draw if dialog is not visible
dc = win32functions.CreateDC(u"DISPLAY", None, None, None )
if not dc:
raise ctypes.WinError()
rect = self.Rectangle
#ret = win32functions.TextOut(
# dc, rect.left, rect.top, unicode(text), len(text))
ret = win32functions.DrawText(
dc,
unicode(text),
len(text),
ctypes.byref(rect),
win32defines.DT_SINGLELINE)
# delete the Display context that we created
win32functions.DeleteDC(dc)
if not ret:
raise ctypes.WinError()
return self
#-----------------------------------------------------------
def DrawOutline(
self,
colour = 'green',
thickness = 2,
fill = win32defines.BS_NULL,
rect = None):
"""Draw an outline around the window
* **colour** can be either an integer or one of 'red', 'green', 'blue'
(default 'green')
* **thickness** thickness of rectangle (default 2)
* **fill** how to fill in the rectangle (default BS_NULL)
* **rect** the coordinates of the rectangle to draw (defaults to
the rectangle of the control.
"""
# don't draw if dialog is not visible
if not self.IsVisible():
return
colours = {
"green" : 0x00ff00,
"blue" : 0xff0000,
"red" : 0x0000ff,
}
# if it's a known colour
if colour in colours:
colour = colours[colour]
if not rect:
rect = self.Rectangle()
# create the pen(outline)
pen_handle = win32functions.CreatePen(
win32defines.PS_SOLID, thickness, colour)
# create the brush (inside)
brush = win32structures.LOGBRUSH()
brush.lbStyle = fill
brush.lbHatch = win32defines.HS_DIAGCROSS
brush_handle = win32functions.CreateBrushIndirect(ctypes.byref(brush))
# get the Device Context
dc = win32functions.CreateDC(u"DISPLAY", None, None, None )
# push our objects into it
win32functions.SelectObject(dc, brush_handle)
win32functions.SelectObject(dc, pen_handle)
# draw the rectangle to the DC
win32functions.Rectangle(
dc, rect.left, rect.top, rect.right, rect.bottom)
# Delete the brush and pen we created
win32functions.DeleteObject(brush_handle)
win32functions.DeleteObject(pen_handle)
# delete the Display context that we created
win32functions.DeleteDC(dc)
#-----------------------------------------------------------
def PopupWindow(self):
"""Return any owned Popups
Please do not use in production code yet - not tested fully
"""
popup = win32functions.GetWindow(self, win32defines.GW_HWNDNEXT)
return popup
#-----------------------------------------------------------
def Owner(self):
"""Return the owner window for the window if it exists
Returns None if there is no owner"""
owner = win32functions.GetWindow(self, win32defines.GW_OWNER)
if owner:
return HwndWrapper(owner)
else:
return None
#-----------------------------------------------------------
# def ContextMenuSelect(self, path, x = None, y = None):
# "TODO ContextMenuSelect Not Implemented"
# pass
# #raise NotImplementedError(
# # "HwndWrapper.ContextMenuSelect not implemented yet")
#-----------------------------------------------------------
def _menu_handle(self):
"Simple Overridable method to get the menu handle"
return win32functions.GetMenu(self)
#-----------------------------------------------------------
def Menu(self):
"Return the menu of the control"
return Menu(self, self._menu_handle())
#-----------------------------------------------------------
def MenuItem(self, path):
"""Return the menu item specifed by path
Path can be a string in the form "MenuItem->MenuItem->MenuItem..."
where each MenuItem is the text of an item at that level of the menu.
E.g. ::
File->Export->ExportAsPNG
spaces are not important so you could also have written... ::
File -> Export -> Export As PNG
"""
if self.appdata is not None:
menu_appdata = self.appdata['MenuItems']
else:
menu_appdata = None
return self.Menu().GetMenuPath(path, appdata = menu_appdata)[-1]
#-----------------------------------------------------------
def MenuItems(self):
"""Return the menu items for the dialog
If there are no menu items then return an empty list
"""
if self.IsDialog():
#menu_handle = win32functions.GetMenu(self)
#self.SendMessage(win32defines.WM_INITMENU, menu_handle)
return self.Menu().GetProperties()
#self.SendMessage(win32defines.WM_INITMENU, menu_handle)
#return _GetMenuItems(menu_handle, self)
else:
return []
# #-----------------------------------------------------------
# def MenuClick(self, path):
# "Select the menuitem specifed in path"
#
# self.VerifyActionable()
#
# self.SetFocus()
#
# menu = Menu(self, self._menu_handle())
#
# path_items = menu.GetMenuPath(path)
#
# for menu_item in path_items:
# if not menu_item.IsEnabled():
# raise MenuItemNotEnabled(
# "MenuItem '%s' is disabled"% menu_item.Text())
#
# menu_item.Click()
#
# return self
#-----------------------------------------------------------
def MenuSelect(self, path, ):
"Select the menuitem specifed in path"
self.VerifyActionable()
self.MenuItem(path).Select()
#-----------------------------------------------------------
def MoveWindow(
self,
x = None,
y = None,
width = None,
height = None,
repaint = True):
"""Move the window to the new coordinates
* **x** Specifies the new left position of the window.
Defaults to the current left position of the window.
* **y** Specifies the new top position of the window.
Defaults to the current top position of the window.
* **width** Specifies the new width of the window. Defaults to the
current width of the window.
* **height** Specifies the new height of the window. Default to the
current height of the window.
* **repaint** Whether the window should be repainted or not.
Defaults to True
"""
cur_rect = self.Rectangle()
# if no X is specified - so use current coordinate
if x is None:
x = cur_rect.left
else:
try:
y = x.top
width = x.width()
height = x.height()
x = x.left
except AttributeError:
pass
# if no Y is specified - so use current coordinate
if y is None:
y = cur_rect.top
# if no width is specified - so use current width
if width is None:
width = cur_rect.width()
# if no height is specified - so use current height
if height is None:
height = cur_rect.height()
# ask for the window to be moved
ret = win32functions.MoveWindow(self, x, y, width, height, repaint)
# check that it worked correctly
if not ret:
raise ctypes.WinError()
win32functions.WaitGuiThreadIdle(self)
time.sleep(Timings.after_movewindow_wait)
#-----------------------------------------------------------
def Close(self):
"""Close the window
Code modified from http://msdn.microsoft.com/msdnmag/issues/02/08/CQA/
"""
# tell the window it must close
self.PostMessage(win32defines.WM_CLOSE)
start = time.time()
# Keeps trying while
# we have not timed out and
# window is still a valid handle and
# window is still visible
# any one of these conditions evaluates to false means the window is
# closed
while (
(time.time() - start) < Timings.after_windowclose_timeout and
win32functions.IsWindow(self) and
self.IsVisible()):
time.sleep(min(
Timings.after_windowclose_retry,
Timings.after_windowclose_timeout - (time.time() - start) ))
# # get a handle we can wait on
# process_wait_handle = win32functions.OpenProcess(
# win32defines.SYNCHRONIZE | win32defines.PROCESS_TERMINATE ,
# False ,
# self.ProcessID())
#
# # wait for the window to close
# win32functions.WaitForSingleObject(
# process_wait_handle,
# )
#-----------------------------------------------------------
def Maximize(self):
"""Maximize the window"""
win32functions.ShowWindow(self, win32defines.SW_MAXIMIZE)
#-----------------------------------------------------------
def Minimize(self):
"""Minimize the window"""
win32functions.ShowWindow(self, win32defines.SW_MINIMIZE)
#-----------------------------------------------------------
def Restore(self):
"""Restore the window"""
# do it twice just in case the window was minimized from being
# maximized - because then the window would come up maximized
# after the first ShowWindow, and Restored after the 2nd
win32functions.ShowWindow(self, win32defines.SW_RESTORE)
win32functions.ShowWindow(self, win32defines.SW_RESTORE)
#-----------------------------------------------------------
def GetShowState(self):
"""Get the show state and Maximized/minimzed/restored state
Returns a value that is a union of the following
* SW_HIDE the window is hidden.
* SW_MAXIMIZE the window is maximized
* SW_MINIMIZE the window is minimized
* SW_RESTORE the window is in the 'restored'
state (neither minimized or maximized)
* SW_SHOW The window is not hidden
"""
wp = win32structures.WINDOWPLACEMENT()
wp.lenght = ctypes.sizeof(wp)
ret = win32functions.GetWindowPlacement(self, ctypes.byref(wp))
if not ret:
raise ctypes.WinError()
return wp.showCmd
#-----------------------------------------------------------
def GetFocus(self):
"""Return the control in the process of this window that has the Focus
"""
gui_info = win32structures.GUITHREADINFO()
gui_info.cbSize = ctypes.sizeof(gui_info)
ret = win32functions.GetGUIThreadInfo(
win32functions.GetWindowThreadProcessId(self, 0),
ctypes.byref(gui_info))
if not ret:
return None
return HwndWrapper(gui_info.hwndFocus)
#-----------------------------------------------------------
def SetFocus(self):
"""Set the focus to this control
Bring the window to the foreground first if necessary."""
# find the current foreground window
cur_foreground = win32functions.GetForegroundWindow()
# if it is already foreground then just return
if self.handle != cur_foreground:
# get the thread of the window that is in the foreground
cur_fore_thread = win32functions.GetWindowThreadProcessId(
cur_foreground, 0)
# get the thread of the window that we want to be in the foreground
control_thread = win32functions.GetWindowThreadProcessId(self, 0)
# if a different thread owns the active window
if cur_fore_thread != control_thread:
# Attach the two threads and set the foreground window
win32functions.AttachThreadInput(
cur_fore_thread, control_thread, True)
win32functions.SetForegroundWindow(self)
# detach the thread again
win32functions.AttachThreadInput(
cur_fore_thread, control_thread, False)
else: # same threads - just set the foreground window
win32functions.SetForegroundWindow(self)
# make sure that we are idle before returning
win32functions.WaitGuiThreadIdle(self)
# only sleep if we had to change something!
time.sleep(Timings.after_setfocus_wait)
return self
#-----------------------------------------------------------
def SetApplicationData(self, appdata):
"""Application data is data from a previous run of the software
It is essential for running scripts written for one spoke language
on a different spoken langauge
"""
self.appdata = appdata
_scroll_types = {"left": {
"line" : win32defines.SB_LINELEFT,
"page" : win32defines.SB_PAGELEFT,
"end" : win32defines.SB_LEFT,
},
"right": {
"line" : win32defines.SB_LINERIGHT,
"page" : win32defines.SB_PAGERIGHT,
"end" : win32defines.SB_RIGHT,
},
"up": {
"line" : win32defines.SB_LINEUP,
"page" : win32defines.SB_PAGEUP,
"end" : win32defines.SB_TOP,
},
"down": {
"line" : win32defines.SB_LINEDOWN,
"page" : win32defines.SB_PAGEDOWN,
"end" : win32defines.SB_BOTTOM,
},
}
#-----------------------------------------------------------
def Scroll(self, direction, amount, count = 1):
"""Ask the control to scroll itself
direction can be any of "up", "down", "left", "right"
amount can be one of "line", "page", "end"
count (optional) the number of times to scroll
"""
# check which message we want to send
if direction.lower() in ("left", "right"):
message = win32defines.WM_HSCROLL
elif direction.lower() in ("up", "down"):
message = win32defines.WM_VSCROLL
# the constant that matches direction, and how much
scroll_type = \
HwndWrapper._scroll_types[direction.lower()][amount.lower()]
# Scroll as often as we have been asked to
while count > 0:
self.SendMessage(message, scroll_type)
count -= 1
return self
#
#def MouseLeftClick():
# pass
#def MouseRightClick():
# pass
#def MouseDoubleClick():
# pass
#def MouseDown():
# pass
#def MouseUp():
# pass
#def MoveMouse():
# pass
#def DragMouse():
# pass
#
#def LeftClick(x, y):
# win32defines.MOUSEEVENTF_LEFTDOWN
# win32defines.MOUSEEVENTF_LEFTUP
#
# # set the cursor position
# win32functions.SetCursorPos(x, y)
# time.sleep(Timings.after_setcursorpos_wait)
#
# inp_struct = win32structures.INPUT()
# inp_struct.type = win32defines.INPUT_MOUSE
# for event in (win32defines.MOUSEEVENTF_LEFTDOWN, win32defines.MOUSEEVENTF_LEFTUP):
# inp_struct._.mi.dwFlags = event
# win32functions.SendInput(
# 1,
# ctypes.pointer(inp_struct),
# ctypes.sizeof(inp_struct))
#
# time.sleep(Timings.after_clickinput_wait)
#====================================================================
def _perform_click_input(
ctrl = None,
button = "left",
coords = (None, None),
double = False,
button_down = True,
button_up = True,
absolute = False,
wheel_dist = 0):
"""Peform a click action using SendInput
All the *ClickInput() and *MouseInput() methods use this function.
"""
events = []
if button.lower() == 'left':
if button_down:
events.append(win32defines.MOUSEEVENTF_LEFTDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_LEFTUP)
elif button.lower() == 'right':
if button_down:
events.append(win32defines.MOUSEEVENTF_RIGHTDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_RIGHTUP)
elif button.lower() == 'middle':
if button_down:
events.append(win32defines.MOUSEEVENTF_MIDDLEDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_MIDDLEUP)
elif button.lower() == 'x':
if button_down:
events.append(win32defines.MOUSEEVENTF_XDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_XUP)
if button.lower() == 'wheel':
events.append(win32defines.MOUSEEVENTF_WHEEL)
# if we were asked to double click (and we are doing a full click
# not just up or down.
if double and button_down and button_up:
events *= 2
if ctrl == None:
ctrl = HwndWrapper(win32functions.GetDesktopWindow())
else:
ctrl.SetFocus()
# # allow points objects to be passed as the coords
# if isinstance(coords, win32structures.POINT):
# coords = [coords.x, coords.y]
# else:
coords = list(coords)
# set the default coordinates
if coords[0] is None:
coords[0] = ctrl.Rectangle().width() / 2
if coords[1] is None:
coords[1] = ctrl.Rectangle().height() / 2
if not absolute:
coords[0] = coords[0] + ctrl.Rectangle().left
coords[1] = coords[1] + ctrl.Rectangle().top
# set the cursor position
win32functions.SetCursorPos(coords[0], coords[1])
time.sleep(Timings.after_setcursorpos_wait)
inp_struct = win32structures.INPUT()
inp_struct.type = win32defines.INPUT_MOUSE
for event in events:
inp_struct._.mi.dwFlags = event
if button.lower() == 'wheel':
inp_struct._.mi.mouseData = wheel_dist
else:
inp_struct._.mi.mouseData = 0
win32functions.SendInput(
1,
ctypes.pointer(inp_struct),
ctypes.sizeof(inp_struct))
time.sleep(Timings.after_clickinput_wait)
#====================================================================
def _perform_click(
ctrl,
button = "left",
pressed = "",
coords = (0, 0),
double = False,
button_down = True,
button_up = True):
"Low level method for performing click operations"
ctrl.VerifyActionable()
# figure out the messages for click/press
msgs = []
if not double:
if button.lower() == "left":
if button_down:
msgs.append(win32defines.WM_LBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_LBUTTONUP)
elif button.lower() == "middle":
if button_down:
msgs.append(win32defines.WM_MBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_MBUTTONUP)
elif button.lower() == "right":
if button_down:
msgs.append(win32defines.WM_RBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_RBUTTONUP)
# figure out the messages for double clicking
else:
if button.lower() == "left":
msgs = (
win32defines.WM_LBUTTONDOWN,
win32defines.WM_LBUTTONUP,
win32defines.WM_LBUTTONDBLCLK,
win32defines.WM_LBUTTONUP)
elif button.lower() == "middle":
msgs = (
win32defines.WM_MBUTTONDOWN,
win32defines.WM_MBUTTONUP,
win32defines.WM_MBUTTONDBLCLK,
win32defines.WM_MBUTTONUP)
elif button.lower() == "right":
msgs = (
win32defines.WM_RBUTTONDOWN,
win32defines.WM_RBUTTONUP,
win32defines.WM_RBUTTONDBLCLK,
win32defines.WM_RBUTTONUP)
# figure out the flags and pack coordinates
flags, click_point = _calc_flags_and_coords(pressed, coords)
# send each message
for msg in msgs:
ctrl.SendMessageTimeout(msg, flags, click_point)
#ctrl.PostMessage(msg, flags, click_point)
#flags = 0
# wait until the thread can accept another message
win32functions.WaitGuiThreadIdle(ctrl)
# wait a certain(short) time after the click
time.sleep(Timings.after_click_wait)
_mouse_flags = {
"left": win32defines.MK_LBUTTON,
"right": win32defines.MK_RBUTTON,
"middle": win32defines.MK_MBUTTON,
"shift": win32defines.MK_SHIFT,
"control": win32defines.MK_CONTROL,
}
#====================================================================
def _calc_flags_and_coords(pressed, coords):
"Calculate the flags to use and the coordinates for mouse actions"
flags = 0
for key in pressed.split():
flags |= _mouse_flags[key.lower()]
click_point = win32functions.MakeLong(coords[1], coords[0])
return flags, click_point
#====================================================================
class _dummy_control(dict):
"A subclass of dict so that we can assign attributes"
pass
#====================================================================
def GetDialogPropsFromHandle(hwnd):
"Get the properties of all the controls as a list of dictionaries"
# wrap the dialog handle and start a new list for the
# controls on the dialog
try:
controls = [hwnd, ]
controls.extend(hwnd.Children())
except AttributeError:
controls = [HwndWrapper(hwnd), ]
# add all the children of the dialog
controls.extend(controls[0].Children())
props = []
# Add each control to the properties for this dialog
for ctrl in controls:
# Get properties for each control and wrap them in
# _dummy_control so that we can assign handle
ctrl_props = _dummy_control(ctrl.GetProperties())
# assign the handle
ctrl_props.handle = ctrl.handle
# offset the rectangle from the dialog rectangle
ctrl_props['Rectangle'] -= controls[0].Rectangle()
props.append(ctrl_props)
return props
|
Cambios
|
#!/usr/bin/env python2.7
# Copyright (c) 2012 Jonathan Warren
# Copyright (c) 2012 The Bitmessage developers
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#Right now, PyBitmessage only support connecting to stream 1. It doesn't yet contain logic to expand into further streams.
softwareVersion = '0.2.7'
verbose = 2
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 #Equals two days and 12 hours.
lengthOfTimeToLeaveObjectsInInventory = 237600 #Equals two days and 18 hours. This should be longer than maximumAgeOfAnObjectThatIAmWillingToAccept so that we don't process messages twice.
lengthOfTimeToHoldOnToAllPubkeys = 2419200 #Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
maximumAgeOfObjectsThatIAdvertiseToOthers = 216000 #Equals two days and 12 hours
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 #Equals three hours
storeConfigFilesInSameDirectoryAsProgramByDefault = False #The user may de-select Portable Mode in the settings if they want the config files to stay in the application data folder.
useVeryEasyProofOfWorkForTesting = False #If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
import sys
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
except Exception, err:
print 'PyBitmessage requires PyQt. You can download it from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\' (without quotes).'
print 'Error message:', err
sys.exit()
import ConfigParser
from bitmessageui import *
from newaddressdialog import *
from newsubscriptiondialog import *
from regenerateaddresses import *
from specialaddressbehavior import *
from settings import *
from about import *
from help import *
from iconglossary import *
from addresses import *
import Queue
from defaultKnownNodes import *
import time
import socket
import threading
#import rsa
#from rsa.bigfile import *
import hashlib
from struct import *
import pickle
import random
import sqlite3
import threading #used for the locks, not for the threads
from time import strftime, localtime
import os
import shutil #used for moving the messages.dat file
import string
import socks
import highlevelcrypto
from pyelliptic.openssl import OpenSSL
import ctypes
from pyelliptic import arithmetic
#The next 3 are used for the API
from SimpleXMLRPCServer import *
import json
from subprocess import call #used when the API must execute an outside program
#For each stream to which we connect, one outgoingSynSender thread will exist and will create 8 connections with peers.
class outgoingSynSender(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.selfInitiatedConnectionList = [] #This is a list of current connections (the thread pointers at least)
self.alreadyAttemptedConnectionsList = [] #This is a list of nodes to which we have already attempted a connection
def setup(self,streamNumber):
self.streamNumber = streamNumber
def run(self):
time.sleep(1)
resetTime = int(time.time()) #used below to clear out the alreadyAttemptedConnectionsList periodically so that we will retry connecting to hosts to which we have already tried to connect.
while True:
#time.sleep(999999)#I sometimes use this to prevent connections for testing.
if len(self.selfInitiatedConnectionList) < 8: #maximum number of outgoing connections = 8
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
while HOST in self.alreadyAttemptedConnectionsList or HOST in connectedHostsList:
#print 'choosing new sample'
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
time.sleep(1)
#Clear out the alreadyAttemptedConnectionsList every half hour so that this program will again attempt a connection to any nodes, even ones it has already tried.
if (int(time.time()) - resetTime) > 1800:
self.alreadyAttemptedConnectionsList = []
resetTime = int(time.time())
self.alreadyAttemptedConnectionsList.append(HOST)
PORT, timeNodeLastSeen = knownNodes[self.streamNumber][HOST]
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(20)
if config.get('bitmessagesettings', 'socksproxytype') == 'none':
printLock.acquire()
print 'Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS4a':
printLock.acquire()
print '(Using SOCKS4a) Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
proxytype = socks.PROXY_TYPE_SOCKS4
sockshostname = config.get('bitmessagesettings', 'sockshostname')
socksport = config.getint('bitmessagesettings', 'socksport')
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = config.get('bitmessagesettings', 'socksusername')
sockspassword = config.get('bitmessagesettings', 'sockspassword')
sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(proxytype, sockshostname, socksport, rdns)
elif config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5':
printLock.acquire()
print '(Using SOCKS5) Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
proxytype = socks.PROXY_TYPE_SOCKS5
sockshostname = config.get('bitmessagesettings', 'sockshostname')
socksport = config.getint('bitmessagesettings', 'socksport')
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = config.get('bitmessagesettings', 'socksusername')
sockspassword = config.get('bitmessagesettings', 'sockspassword')
sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(proxytype, sockshostname, socksport, rdns)
try:
sock.connect((HOST, PORT))
rd = receiveDataThread()
self.emit(SIGNAL("passObjectThrough(PyQt_PyObject)"),rd)
objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
rd.setup(sock,HOST,PORT,self.streamNumber,self.selfInitiatedConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware)
rd.start()
printLock.acquire()
print self, 'connected to', HOST, 'during outgoing attempt.'
printLock.release()
sd = sendDataThread()
sd.setup(sock,HOST,PORT,self.streamNumber,objectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
sd.sendVersionMessage()
except socks.GeneralProxyError, err:
printLock.acquire()
print 'Could NOT connect to', HOST, 'during outgoing attempt.', err
printLock.release()
PORT, timeLastSeen = knownNodes[self.streamNumber][HOST]
if (int(time.time())-timeLastSeen) > 172800 and len(knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
del knownNodes[self.streamNumber][HOST]
print 'deleting ', HOST, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
except socks.Socks5AuthError, err:
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS5 Authentication problem: "+str(err))
except socks.Socks5Error, err:
pass
print 'SOCKS5 error. (It is possible that the server wants authentication).)' ,str(err)
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS5 error. Server might require authentication. "+str(err))
except socks.Socks4Error, err:
print 'Socks4Error:', err
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS4 error: "+str(err))
except socket.error, err:
if config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
print 'Bitmessage MIGHT be having trouble connecting to the SOCKS server. '+str(err)
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Problem: Bitmessage can not connect to the SOCKS server. "+str(err))
else:
printLock.acquire()
print 'Could NOT connect to', HOST, 'during outgoing attempt.', err
printLock.release()
PORT, timeLastSeen = knownNodes[self.streamNumber][HOST]
if (int(time.time())-timeLastSeen) > 172800 and len(knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
del knownNodes[self.streamNumber][HOST]
print 'deleting ', HOST, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
except Exception, err:
print 'An exception has occurred in the outgoingSynSender thread that was not caught by other exception types:', err
time.sleep(0.1)
#Only one singleListener thread will ever exist. It creates the receiveDataThread and sendDataThread for each incoming connection. Note that it cannot set the stream number because it is not known yet- the other node will have to tell us its stream number in a version message. If we don't care about their stream, we will close the connection (within the recversion function of the recieveData thread)
class singleListener(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If they eventually select proxy 'none' then this will start listening for connections.
while config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
time.sleep(300)
print 'Listening for incoming connections.'
HOST = '' # Symbolic name meaning all available interfaces
PORT = config.getint('bitmessagesettings', 'port')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#This option apparently avoids the TIME_WAIT state so that we can rebind faster
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(2)
self.incomingConnectionList = [] #This list isn't used for anything. The reason it exists is because receiveData threads expect that a list be passed to them. They expect this because the outgoingSynSender thread DOES use a similar list to keep track of the number of outgoing connections it has created.
while True:
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If the user eventually select proxy 'none' then this will start listening for connections.
while config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
time.sleep(10)
a,(HOST,PORT) = sock.accept()
#Users are finding that if they run more than one node in the same network (thus with the same public IP), they can not connect with the second node. This is because this section of code won't accept the connection from the same IP. This problem will go away when the Bitmessage network grows beyond being tiny but in the mean time I'll comment out this code section.
"""while HOST in connectedHostsList:
print 'incoming connection is from a host in connectedHostsList (we are already connected to it). Ignoring it.'
a.close()
a,(HOST,PORT) = sock.accept()"""
rd = receiveDataThread()
self.emit(SIGNAL("passObjectThrough(PyQt_PyObject)"),rd)
objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
rd.setup(a,HOST,PORT,-1,self.incomingConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware)
printLock.acquire()
print self, 'connected to', HOST,'during INCOMING request.'
printLock.release()
rd.start()
sd = sendDataThread()
sd.setup(a,HOST,PORT,-1,objectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
#This thread is created either by the synSenderThread(for outgoing connections) or the singleListenerThread(for incoming connectiosn).
class receiveDataThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.data = ''
self.verackSent = False
self.verackReceived = False
def setup(self,sock,HOST,port,streamNumber,selfInitiatedConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware):
self.sock = sock
self.HOST = HOST
self.PORT = port
self.sock.settimeout(600) #We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
self.streamNumber = streamNumber
self.selfInitiatedConnectionList = selfInitiatedConnectionList
self.selfInitiatedConnectionList.append(self)
self.payloadLength = 0 #This is the protocol payload length thus it doesn't include the 24 byte message header
self.receivedgetbiginv = False #Gets set to true once we receive a getbiginv message from our peer. An abusive peer might request it too much so we use this variable to check whether they have already asked for a big inv message.
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = {}
connectedHostsList[self.HOST] = 0 #The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that the outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished = False #set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
if self.streamNumber == -1: #This was an incoming connection. Send out a version message if we accept the other node's version message.
self.initiatedConnection = False
else:
self.initiatedConnection = True
self.ackDataThatWeHaveYetToSend = [] #When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here until we are done processing all other data received from this peer.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
def run(self):
while True:
try:
self.data = self.data + self.sock.recv(65536)
except socket.timeout:
printLock.acquire()
print 'Timeout occurred waiting for data. Closing receiveData thread.'
printLock.release()
break
except Exception, err:
printLock.acquire()
print 'sock.recv error. Closing receiveData thread.', err
printLock.release()
break
#print 'Received', repr(self.data)
if self.data == "":
printLock.acquire()
print 'Connection closed. Closing receiveData thread.'
printLock.release()
break
else:
self.processData()
try:
self.sock.close()
except Exception, err:
print 'Within receiveDataThread run(), self.sock.close() failed.', err
try:
self.selfInitiatedConnectionList.remove(self)
printLock.acquire()
print 'removed self (a receiveDataThread) from ConnectionList'
printLock.release()
except:
pass
broadcastToSendDataQueues((0, 'shutdown', self.HOST))
if self.connectionIsOrWasFullyEstablished: #We don't want to decrement the number of connections and show the result if we never incremented it in the first place (which we only do if the connection is fully established- meaning that both nodes accepted each other's version packets.)
connectionsCountLock.acquire()
connectionsCount[self.streamNumber] -= 1
self.emit(SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"),self.streamNumber,connectionsCount[self.streamNumber])
printLock.acquire()
print 'Updating network status tab with current connections count:', connectionsCount[self.streamNumber]
printLock.release()
connectionsCountLock.release()
try:
del connectedHostsList[self.HOST]
except Exception, err:
print 'Could not delete', self.HOST, 'from connectedHostsList.', err
def processData(self):
global verbose
#if verbose >= 2:
#printLock.acquire()
#print 'self.data is currently ', repr(self.data)
#printLock.release()
if len(self.data) < 20: #if so little of the data has arrived that we can't even unpack the payload length
pass
elif self.data[0:4] != '\xe9\xbe\xb4\xd9':
if verbose >= 2:
printLock.acquire()
sys.stderr.write('The magic bytes were not correct. First 40 bytes of data: %s\n' % repr(self.data[0:40]))
print 'self.data:', self.data.encode('hex')
printLock.release()
self.data = ""
else:
self.payloadLength, = unpack('>L',self.data[16:20])
if len(self.data) >= self.payloadLength+24: #check if the whole message has arrived yet. If it has,...
if self.data[20:24] == hashlib.sha512(self.data[24:self.payloadLength+24]).digest()[0:4]:#test the checksum in the message. If it is correct...
#print 'message checksum is correct'
#The time we've last seen this node is obviously right now since we just received valid data from it. So update the knownNodes list so that other peers can be made aware of its existance.
if self.initiatedConnection: #The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
knownNodes[self.streamNumber][self.HOST] = (self.PORT,int(time.time()))
if self.payloadLength <= 180000000: #If the size of the message is greater than 180MB, ignore it. (I get memory errors when processing messages much larger than this though it is concievable that this value will have to be lowered if some systems are less tolarant of large messages.)
remoteCommand = self.data[4:16]
printLock.acquire()
print 'remoteCommand ', remoteCommand, 'from', self.HOST
printLock.release()
if remoteCommand == 'version\x00\x00\x00\x00\x00':
self.recversion()
elif remoteCommand == 'verack\x00\x00\x00\x00\x00\x00':
self.recverack()
elif remoteCommand == 'addr\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recaddr()
elif remoteCommand == 'getpubkey\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recgetpubkey()
elif remoteCommand == 'pubkey\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recpubkey()
elif remoteCommand == 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recinv()
elif remoteCommand == 'getdata\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recgetdata()
elif remoteCommand == 'getbiginv\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendBigInv()
elif remoteCommand == 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recmsg()
elif remoteCommand == 'broadcast\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recbroadcast()
elif remoteCommand == 'getaddr\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendaddr()
elif remoteCommand == 'ping\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendpong()
elif remoteCommand == 'pong\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
pass
elif remoteCommand == 'alert\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
pass
self.data = self.data[self.payloadLength+24:]#take this message out and then process the next message
if self.data == '':
while len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
random.seed()
objectHash, = random.sample(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave, 1)
if objectHash in inventory:
printLock.acquire()
print 'Inventory (in memory) already has object listed in inv message.'
printLock.release()
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]
elif isInSqlInventory(objectHash):
printLock.acquire()
print 'Inventory (SQL on disk) already has object listed in inv message.'
printLock.release()
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]
else:
#print 'processData function making request for object:', objectHash.encode('hex')
self.sendgetdata(objectHash)
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash] #It is possible that the remote node doesn't respond with the object. In that case, we'll very likely get it from someone else anyway.
break
if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
printLock.acquire()
print 'within processData, number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now', len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)
printLock.release()
if len(self.ackDataThatWeHaveYetToSend) > 0:
self.data = self.ackDataThatWeHaveYetToSend.pop()
self.processData()
else:
print 'Checksum incorrect. Clearing this message.'
self.data = self.data[self.payloadLength+24:]
def isProofOfWorkSufficient(self):
POW, = unpack('>Q',hashlib.sha512(hashlib.sha512(self.data[24:32]+ hashlib.sha512(self.data[32:24+self.payloadLength]).digest()).digest()).digest()[0:8])
#print 'POW:', POW
#Notice that I have divided the averageProofOfWorkNonceTrialsPerByte by two. This makes the POW requirement easier. This gives us wiggle-room: if we decide that we want to make the POW easier, the change won't obsolete old clients because they already expect a lower POW. If we decide that the current work done by clients feels approperate then we can remove this division by 2 and make the requirement match what is actually done by a sending node. If we want to raise the POW requirement then old nodes will HAVE to upgrade no matter what.
return POW < 2**64 / ((self.payloadLength+payloadLengthExtraBytes) * (averageProofOfWorkNonceTrialsPerByte/2))
def sendpong(self):
print 'Sending pong'
self.sock.sendall('\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
def recverack(self):
print 'verack received'
self.verackReceived = True
if self.verackSent == True:
#We have thus both sent and received a verack.
self.connectionFullyEstablished()
def connectionFullyEstablished(self):
self.connectionIsOrWasFullyEstablished = True
if not self.initiatedConnection:
self.emit(SIGNAL("setStatusIcon(PyQt_PyObject)"),'green')
#Update the 'Network Status' tab
connectionsCountLock.acquire()
connectionsCount[self.streamNumber] += 1
self.emit(SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"),self.streamNumber,connectionsCount[self.streamNumber])
connectionsCountLock.release()
remoteNodeIncomingPort, remoteNodeSeenTime = knownNodes[self.streamNumber][self.HOST]
printLock.acquire()
print 'Connection fully established with', self.HOST, remoteNodeIncomingPort
print 'broadcasting addr from within connectionFullyEstablished function.'
printLock.release()
self.broadcastaddr([(int(time.time()), self.streamNumber, 1, self.HOST, remoteNodeIncomingPort)]) #This lets all of our peers know about this new node.
self.sendaddr() #This is one large addr message to this one peer.
if connectionsCount[self.streamNumber] > 150:
printLock.acquire()
print 'We are connected to too many people. Closing connection.'
printLock.release()
self.sock.close()
return
self.sendBigInv()
def sendBigInv(self): #I used capitals in for this function name because there is no such Bitmessage command as 'biginv'.
if self.receivedgetbiginv:
print 'We have already sent a big inv message to this peer. Ignoring request.'
return
else:
self.receivedgetbiginv = True
sqlLock.acquire()
#Select all hashes which are younger than two days old and in this stream.
t = (int(time.time())-maximumAgeOfObjectsThatIAdvertiseToOthers,self.streamNumber)
sqlSubmitQueue.put('''SELECT hash FROM inventory WHERE receivedtime>? and streamnumber=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
bigInvList = {}
for row in queryreturn:
hash, = row
if hash not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
bigInvList[hash] = 0
else:
printLock.acquire()
print 'Not including an object hash in a big inv message because the remote node is already aware of it.'#This line is here to check that this feature is working.
printLock.release()
#We also have messages in our inventory in memory (which is a python dictionary). Let's fetch those too.
for hash, storedValue in inventory.items():
if hash not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
objectType, streamNumber, payload, receivedTime = storedValue
if streamNumber == self.streamNumber and receivedTime > int(time.time())-maximumAgeOfObjectsThatIAdvertiseToOthers:
bigInvList[hash] = 0
else:
printLock.acquire()
print 'Not including an object hash in a big inv message because the remote node is already aware of it.'#This line is here to check that this feature is working.
printLock.release()
numberOfObjectsInInvMessage = 0
payload = ''
#Now let us start appending all of these hashes together. They will be sent out in a big inv message to our new peer.
for hash, storedValue in bigInvList.items():
payload += hash
numberOfObjectsInInvMessage += 1
if numberOfObjectsInInvMessage >= 50000: #We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
self.sendinvMessageToJustThisOnePeer(numberOfObjectsInInvMessage,payload)
payload = ''
numberOfObjectsInInvMessage = 0
if numberOfObjectsInInvMessage > 0:
self.sendinvMessageToJustThisOnePeer(numberOfObjectsInInvMessage,payload)
#Self explanatory. Notice that there is also a broadcastinv function for broadcasting invs to everyone in our stream.
def sendinvMessageToJustThisOnePeer(self,numberOfObjects,payload):
payload = encodeVarint(numberOfObjects) + payload
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
printLock.acquire()
print 'Sending huge inv message with', numberOfObjects, 'objects to just this one peer'
printLock.release()
self.sock.send(headerData + payload)
#We have received a broadcast message
def recbroadcast(self):
self.messageProcessingStartTime = time.time()
#First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in broadcast message insufficient.'
return
embeddedTime, = unpack('>I',self.data[32:36])
if embeddedTime > (int(time.time())+10800): #prevent funny business
print 'The embedded time in this broadcast message is more than three hours in the future. That doesn\'t make sense. Ignoring message.'
return
if embeddedTime < (int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept):
print 'The embedded time in this broadcast message is too old. Ignoring message.'
return
if self.payloadLength < 66: #todo: When version 1 addresses are completely abandoned, this should be changed to 180
print 'The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.'
return
inventoryLock.acquire()
self.inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
if self.inventoryHash in inventory:
print 'We have already received this broadcast object. Ignoring.'
inventoryLock.release()
return
elif isInSqlInventory(self.inventoryHash):
print 'We have already received this broadcast object (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
#It is valid so far. Let's let our peers know about it.
objectType = 'broadcast'
inventory[self.inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
self.broadcastinv(self.inventoryHash)
self.emit(SIGNAL("incrementNumberOfBroadcastsProcessed()"))
self.processbroadcast()#When this function returns, we will have either successfully processed this broadcast because we are interested in it, ignored it because we aren't interested in it, or found problem with the broadcast that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are mostly the same values used for msg messages although broadcast messages are processed faster.
if self.payloadLength > 100000000: #Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds.
elif self.payloadLength > 10000000: #Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds.
elif self.payloadLength > 1000000: #Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds.
else: #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .1 #seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.messageProcessingStartTime)
if sleepTime > 0:
printLock.acquire()
print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
printLock.release()
time.sleep(sleepTime)
printLock.acquire()
print 'Total message processing time:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
#A broadcast message has a valid time and POW and requires processing. The recbroadcast function calls this one.
def processbroadcast(self):
readPosition = 36
broadcastVersion, broadcastVersionLength = decodeVarint(self.data[readPosition:readPosition+9])
if broadcastVersion <> 1:
#Cannot decode incoming broadcast versions higher than 1. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
return
readPosition += broadcastVersionLength
beginningOfPubkeyPosition = readPosition #used when we add the pubkey to our pubkey table
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersAddressVersion <= 1 or sendersAddressVersion >=3:
#Cannot decode senderAddressVersion higher than 2. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
return
readPosition += sendersAddressVersionLength
if sendersAddressVersion == 2:
sendersStream, sendersStreamLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersStream <= 0 or sendersStream <> self.streamNumber:
return
readPosition += sendersStreamLength
behaviorBitfield = self.data[readPosition:readPosition+4]
readPosition += 4
sendersPubSigningKey = '\x04' + self.data[readPosition:readPosition+64]
readPosition += 64
sendersPubEncryptionKey = '\x04' + self.data[readPosition:readPosition+64]
readPosition += 64
endOfPubkeyPosition = readPosition
sendersHash = self.data[readPosition:readPosition+20]
if sendersHash not in broadcastSendersForWhichImWatching:
#Display timing data
printLock.acquire()
print 'Time spent deciding that we are not interested in this broadcast:', time.time()- self.messageProcessingStartTime
printLock.release()
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition += 20
sha = hashlib.new('sha512')
sha.update(sendersPubSigningKey+sendersPubEncryptionKey)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
if ripe.digest() != sendersHash:
#The sender of this message lied.
return
messageEncodingType, messageEncodingTypeLength = decodeVarint(self.data[readPosition:readPosition+9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += messageLengthLength
message = self.data[readPosition:readPosition+messageLength]
readPosition += messageLength
readPositionAtBottomOfMessage = readPosition
signatureLength, signatureLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += signatureLengthLength
signature = self.data[readPosition:readPosition+signatureLength]
try:
highlevelcrypto.verify(self.data[36:readPositionAtBottomOfMessage],signature,sendersPubSigningKey.encode('hex'))
print 'ECDSA verify passed'
except Exception, err:
print 'ECDSA verify failed', err
return
#verify passed
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce or time (which would let us send out a pubkey message) so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+'\xFF\xFF\xFF\xFF'+self.data[beginningOfPubkeyPosition:endOfPubkeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('newpubkey',(sendersAddressVersion,sendersStream,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
fromAddress = encodeAddress(sendersAddressVersion,sendersStream,ripe.digest())
print 'fromAddress:', fromAddress
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#If we are behaving as an API then we might need to run an outside command to let some program know that a new message has arrived.
if safeConfigGetBoolean('bitmessagesettings','apienabled'):
try:
apiNotifyPath = config.get('bitmessagesettings','apinotifypath')
except:
apiNotifyPath = ''
if apiNotifyPath != '':
call([apiNotifyPath, "newBroadcast"])
#Display timing data
printLock.acquire()
print 'Time spent processing this interesting broadcast:', time.time()- self.messageProcessingStartTime
printLock.release()
"""elif sendersAddressVersion == 1:
sendersStream, sendersStreamLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersStream <= 0:
return
readPosition += sendersStreamLength
sendersHash = self.data[readPosition:readPosition+20]
if sendersHash not in broadcastSendersForWhichImWatching:
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition += 20
nLength, nLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
if nLength < 1:
return
readPosition += nLengthLength
nString = self.data[readPosition:readPosition+nLength]
readPosition += nLength
eLength, eLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
if eLength < 1:
return
readPosition += eLengthLength
eString = self.data[readPosition:readPosition+eLength]
#We are now ready to hash the public key and verify that its hash matches the hash claimed in the message
readPosition += eLength
sha = hashlib.new('sha512')
sha.update(nString+eString)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
if ripe.digest() != sendersHash:
#The sender of this message lied.
return
readPositionAtBeginningOfMessageEncodingType = readPosition
messageEncodingType, messageEncodingTypeLength = decodeVarint(self.data[readPosition:readPosition+9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += messageLengthLength
message = self.data[readPosition:readPosition+messageLength]
readPosition += messageLength
signature = self.data[readPosition:readPosition+nLength]
sendersPubkey = rsa.PublicKey(convertStringToInt(nString),convertStringToInt(eString))
#print 'senders Pubkey', sendersPubkey
try:
rsa.verify(self.data[readPositionAtBeginningOfMessageEncodingType:readPositionAtBeginningOfMessageEncodingType+messageEncodingTypeLength+messageLengthLength+messageLength],signature,sendersPubkey)
print 'verify passed'
except Exception, err:
print 'verify failed', err
return
#verify passed
fromAddress = encodeAddress(sendersAddressVersion,sendersStream,ripe.digest())
print 'fromAddress:', fromAddress
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)"""
#We have received a msg message.
def recmsg(self):
self.messageProcessingStartTime = time.time()
#First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in msg message insufficient.'
return
readPosition = 32
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
if embeddedTime > int(time.time())+10800:
print 'The time in the msg message is too new. Ignoring it. Time:', embeddedTime
return
if embeddedTime < int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in the msg message is too old. Ignoring it. Time:', embeddedTime
return
readPosition += 4
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint(self.data[readPosition:readPosition+9])
if streamNumberAsClaimedByMsg != self.streamNumber:
print 'The stream number encoded in this msg (' + str(streamNumberAsClaimedByMsg) + ') message does not match the stream number on which it was received. Ignoring it.'
return
readPosition += streamNumberAsClaimedByMsgLength
self.inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if self.inventoryHash in inventory:
print 'We have already received this msg message. Ignoring.'
inventoryLock.release()
return
elif isInSqlInventory(self.inventoryHash):
print 'We have already received this msg message (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
#This msg message is valid. Let's let our peers know about it.
objectType = 'msg'
inventory[self.inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
self.broadcastinv(self.inventoryHash)
self.emit(SIGNAL("incrementNumberOfMessagesProcessed()"))
self.processmsg(readPosition) #When this function returns, we will have either successfully processed the message bound for us, ignored it because it isn't bound for us, or found problem with the message that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are based on test timings and you may change them at-will.
if self.payloadLength > 100000000: #Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 MB message: 3.7 seconds.
elif self.payloadLength > 10000000: #Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 10 MB message: 0.53 seconds. Actual length of time it takes in practice when processing a real message: 1.44 seconds.
elif self.payloadLength > 1000000: #Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 1 MB message: 0.18 seconds. Actual length of time it takes in practice when processing a real message: 0.30 seconds.
else: #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .6 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 KB message: 0.15 seconds. Actual length of time it takes in practice when processing a real message: 0.25 seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.messageProcessingStartTime)
if sleepTime > 0:
printLock.acquire()
print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
printLock.release()
time.sleep(sleepTime)
printLock.acquire()
print 'Total message processing time:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
#This section is for my RSA keys (version 1 addresses). If we don't have any version 1 addresses it will never run. This code will soon be removed.
"""initialDecryptionSuccessful = False
infile = cStringIO.StringIO(self.data[readPosition:self.payloadLength+24])
outfile = cStringIO.StringIO()
#print 'len(myRSAAddressHashes.items()):', len(myRSAAddressHashes.items())
for key, value in myRSAAddressHashes.items():
try:
decrypt_bigfile(infile, outfile, value)
#The initial decryption passed though there is a small chance that the message isn't actually for me. We'll need to check that the 20 zeros are present.
#print 'initial decryption successful using key', repr(key)
initialDecryptionSuccessful = True
printLock.acquire()
print 'Initial decryption passed'
printLock.release()
break
except Exception, err:
infile.seek(0)
#print 'Exception:', err
#print 'outfile len is:', len(outfile.getvalue()),'data is:', repr(outfile.getvalue())
#print 'Initial decryption failed using key', value
#decryption failed for this key. The message is for someone else (or for a different key of mine).
if initialDecryptionSuccessful and outfile.getvalue()[:20] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00': #this run of 0s allows the true message receiver to identify his message
#This is clearly a message bound for me.
outfile.seek(0)
data = outfile.getvalue()
readPosition = 20 #To start reading past the 20 zero bytes
messageVersion, messageVersionLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageVersionLength
if messageVersion == 1:
bitfieldBehavior = data[readPosition:readPosition+4]
readPosition += 4
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersAddressVersionNumber == 1:
readPosition += sendersAddressVersionNumberLength
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersStreamNumber == 0:
print 'sendersStreamNumber = 0. Ignoring message'
else:
readPosition += sendersStreamNumberLength
sendersNLength, sendersNLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersNLengthLength
sendersN = data[readPosition:readPosition+sendersNLength]
readPosition += sendersNLength
sendersELength, sendersELengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersELengthLength
sendersE = data[readPosition:readPosition+sendersELength]
readPosition += sendersELength
endOfThePublicKeyPosition = readPosition
messageEncodingType, messageEncodingTypeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageEncodingTypeLength
print 'Message Encoding Type:', messageEncodingType
messageLength, messageLengthLength = decodeVarint(data[readPosition:readPosition+10])
print 'message length:', messageLength
readPosition += messageLengthLength
message = data[readPosition:readPosition+messageLength]
#print 'First 150 characters of message:', repr(message[:150])
readPosition += messageLength
ackLength, ackLengthLength = decodeVarint(data[readPosition:readPosition+10])
#print 'ackLength:', ackLength
readPosition += ackLengthLength
ackData = data[readPosition:readPosition+ackLength]
readPosition += ackLength
payloadSigniture = data[readPosition:readPosition+sendersNLength] #We're using the length of the sender's n because it should match the signiture size.
sendersPubkey = rsa.PublicKey(convertStringToInt(sendersN),convertStringToInt(sendersE))
print 'sender\'s Pubkey', sendersPubkey
#Check the cryptographic signiture
verifyPassed = False
try:
rsa.verify(data[:-len(payloadSigniture)],payloadSigniture, sendersPubkey)
print 'verify passed'
verifyPassed = True
except Exception, err:
print 'verify failed', err
if verifyPassed:
#calculate the fromRipe.
sha = hashlib.new('sha512')
sha.update(sendersN+sendersE)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce in order to send out a pubkey message so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+data[20+messageVersionLength:endOfThePublicKeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
fromAddress = encodeAddress(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest())
if config.get('bitmessagesettings', 'blackwhitelist') == 'black': #If we are using a blacklist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM blacklist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
label, enabled = row
if enabled:
print 'Message ignored because address is in blacklist.'
blockMessage = True
else: #We're using a whitelist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
print 'Message ignored because address not in whitelist.'
blockMessage = True
for row in queryreturn: #It could be in the whitelist but disabled. Let's check.
label, enabled = row
if not enabled:
print 'Message ignored because address in whitelist but not enabled.'
blockMessage = True
if not blockMessage:
print 'fromAddress:', fromAddress
print 'First 150 characters of message:', repr(message[:150])
#Look up the destination address (my address) based on the destination ripe hash.
#I realize that I could have a data structure devoted to this task, or maintain an indexed table
#in the sql database, but I would prefer to minimize the number of data structures this program
#uses. Searching linearly through the user's short list of addresses doesn't take very long anyway.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if hash == key:
toAddress = addressInKeysFile
toLabel = config.get(addressInKeysFile, 'label')
if toLabel == '':
toLabel = addressInKeysFile
break
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
print 'within recmsg, self.inventoryHash is', repr(self.inventoryHash)
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#Now let us worry about the acknowledgement data
#We'll need to make sure that our client will properly process the ackData; if the packet is malformed, it might cause us to clear out self.data and an attacker could use that behavior to determine that we decoded this message.
ackDataValidThusFar = True
if len(ackData) < 24:
print 'The length of ackData is unreasonably short. Not sending ackData.'
ackDataValidThusFar = False
if ackData[0:4] != '\xe9\xbe\xb4\xd9':
print 'Ackdata magic bytes were wrong. Not sending ackData.'
ackDataValidThusFar = False
if ackDataValidThusFar:
ackDataPayloadLength, = unpack('>L',ackData[16:20])
if len(ackData)-24 != ackDataPayloadLength: #This ackData includes the protocol header which is not counted in the payload length.
print 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.'
ackDataValidThusFar = False
if ackDataValidThusFar:
print 'ackData is valid. Will process it.'
self.ackDataThatWeHaveYetToSend.append(ackData) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
else:
print 'This program cannot decode messages from addresses with versions higher than 1. Ignoring.'
statusbar = 'This program cannot decode messages from addresses with versions higher than 1. Ignoring it.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
else:
statusbar = 'Error: Cannot decode incoming msg versions higher than 1. Assuming the sender isn\' being silly, you should upgrade Bitmessage. Ignoring message.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
else:
printLock.acquire()
print 'Could not decrypt with any RSA keys if you have any.'
printLock.release()
infile.close()
outfile.close()"""
#A msg message has a valid time and POW and requires processing. The recmsg function calls this one.
def processmsg(self,readPosition):
initialDecryptionSuccessful = False
#Let's check whether this is a message acknowledgement bound for us.
if self.data[readPosition:24+self.payloadLength] in ackdataForWhichImWatching:
printLock.acquire()
print 'This msg IS an acknowledgement bound for me.'
printLock.release()
del ackdataForWhichImWatching[self.data[readPosition:24+self.payloadLength]]
t = ('ackreceived',self.data[readPosition:24+self.payloadLength])
sqlLock.acquire()
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE ackdata=?')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),self.data[readPosition:24+self.payloadLength],'Acknowledgement of the message received just now.')
return
else:
printLock.acquire()
print 'This was NOT an acknowledgement bound for me.' #Msg potential ack data:', repr(self.data[readPosition:24+self.payloadLength])
#print 'ackdataForWhichImWatching', ackdataForWhichImWatching
printLock.release()
#This is not an acknowledgement bound for me. See if it is a message bound for me by trying to decrypt it with my private keys.
for key, cryptorObject in myECAddressHashes.items():
try:
data = cryptorObject.decrypt(self.data[readPosition:self.payloadLength+24])
toRipe = key #This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data.
initialDecryptionSuccessful = True
print 'EC decryption successful using key associated with ripe hash:', key.encode('hex')
break
except Exception, err:
pass
#print 'cryptorObject.decrypt Exception:', err
if not initialDecryptionSuccessful:
#This is not a message bound for me.
printLock.acquire()
print 'Length of time program spent failing to decrypt this message:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
else:
#This is a message bound for me.
readPosition = 0
messageVersion, messageVersionLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageVersionLength
if messageVersion != 1:
print 'Cannot understand message versions other than one. Ignoring message.'
return
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersAddressVersionNumberLength
if sendersAddressVersionNumber == 0:
print 'Cannot understand sendersAddressVersionNumber = 0. Ignoring message.'
return
if sendersAddressVersionNumber >= 3:
print 'Sender\'s address version number', sendersAddressVersionNumber, ' not yet supported. Ignoring message.'
return
if len(data) < 170:
print 'Length of the unencrypted data is unreasonably short. Sanity check failed. Ignoring message.'
return
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersStreamNumber == 0:
print 'sender\'s stream number is 0. Ignoring message.'
return
readPosition += sendersStreamNumberLength
behaviorBitfield = data[readPosition:readPosition+4]
readPosition += 4
pubSigningKey = '\x04' + data[readPosition:readPosition+64]
readPosition += 64
pubEncryptionKey = '\x04' + data[readPosition:readPosition+64]
readPosition += 64
endOfThePublicKeyPosition = readPosition #needed for when we store the pubkey in our database of pubkeys for later use.
if toRipe != data[readPosition:readPosition+20]:
printLock.acquire()
print 'The original sender of this message did not send it to you. Someone is attempting a Surreptitious Forwarding Attack.'
print 'See: http://tools.ietf.org/html/draft-ietf-smime-sender-auth-00'
print 'your toRipe:', toRipe.encode('hex')
print 'embedded destination toRipe:', data[readPosition:readPosition+20].encode('hex')
printLock.release()
return
readPosition += 20
messageEncodingType, messageEncodingTypeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageLengthLength
message = data[readPosition:readPosition+messageLength]
#print 'First 150 characters of message:', repr(message[:150])
readPosition += messageLength
ackLength, ackLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += ackLengthLength
ackData = data[readPosition:readPosition+ackLength]
readPosition += ackLength
positionOfBottomOfAckData = readPosition #needed to mark the end of what is covered by the signature
signatureLength, signatureLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += signatureLengthLength
signature = data[readPosition:readPosition+signatureLength]
try:
highlevelcrypto.verify(data[:positionOfBottomOfAckData],signature,pubSigningKey.encode('hex'))
print 'ECDSA verify passed'
except Exception, err:
print 'ECDSA verify failed', err
return
printLock.acquire()
print 'As a matter of intellectual curiosity, here is the Bitcoin address associated with the keys owned by the other person:', calculateBitcoinAddressFromPubkey(pubSigningKey), ' ..and here is the testnet address:',calculateTestnetAddressFromPubkey(pubSigningKey),'. The other person must take their private signing key from Bitmessage and import it into Bitcoin (or a service like Blockchain.info) for it to be of any use. Do not use this unless you know what you are doing.'
printLock.release()
#calculate the fromRipe.
sha = hashlib.new('sha512')
sha.update(pubSigningKey+pubEncryptionKey)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce or time (which would let us send out a pubkey message) so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+'\xFF\xFF\xFF\xFF'+data[messageVersionLength:endOfThePublicKeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('newpubkey',(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
fromAddress = encodeAddress(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest())
if config.get('bitmessagesettings', 'blackwhitelist') == 'black': #If we are using a blacklist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM blacklist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
label, enabled = row
if enabled:
printLock.acquire()
print 'Message ignored because address is in blacklist.'
printLock.release()
blockMessage = True
else: #We're using a whitelist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
print 'Message ignored because address not in whitelist.'
blockMessage = True
for row in queryreturn: #It could be in the whitelist but disabled. Let's check.
label, enabled = row
if not enabled:
print 'Message ignored because address in whitelist but not enabled.'
blockMessage = True
if not blockMessage:
print 'fromAddress:', fromAddress
print 'First 150 characters of message:', repr(message[:150])
#Look up the destination address (my address) based on the destination ripe hash.
#I realize that I could have a data structure devoted to this task, or maintain an indexed table
#in the sql database, but I would prefer to minimize the number of data structures this program
#uses. Searching linearly through the user's short list of addresses doesn't take very long anyway.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if hash == key:
toAddress = addressInKeysFile
toLabel = config.get(addressInKeysFile, 'label')
if toLabel == '':
toLabel = addressInKeysFile
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#If we are behaving as an API then we might need to run an outside command to let some program know that a new message has arrived.
if safeConfigGetBoolean('bitmessagesettings','apienabled'):
try:
apiNotifyPath = config.get('bitmessagesettings','apinotifypath')
except:
apiNotifyPath = ''
if apiNotifyPath != '':
call([apiNotifyPath, "newMessage"])
#Let us now check and see whether our receiving address is behaving as a mailing list
if safeConfigGetBoolean(toAddress,'mailinglist'):
try:
mailingListName = config.get(toAddress, 'mailinglistname')
except:
mailingListName = ''
#Let us send out this message as a broadcast
subject = self.addMailingListNameToSubject(subject,mailingListName)
#Let us now send this message out as a broadcast
message = 'Message ostensibly from ' + fromAddress + ':\n\n' + body
fromAddress = toAddress #The fromAddress for the broadcast is the toAddress (my address) for the msg message we are currently processing.
ackdata = OpenSSL.rand(32) #We don't actually need the ackdata for acknowledgement since this is a broadcast message but we can use it to update the user interface when the POW is done generating.
toAddress = '[Broadcast subscribers]'
ripe = ''
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'broadcastpending',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),toAddress,'[Broadcast subscribers]',fromAddress,subject,message,ackdata)
workerQueue.put(('sendbroadcast',(fromAddress,subject,message)))
#Now let's consider sending the acknowledgement. We'll need to make sure that our client will properly process the ackData; if the packet is malformed, we could clear out self.data and an attacker could use that behavior to determine that we were capable of decoding this message.
ackDataValidThusFar = True
if len(ackData) < 24:
print 'The length of ackData is unreasonably short. Not sending ackData.'
ackDataValidThusFar = False
elif ackData[0:4] != '\xe9\xbe\xb4\xd9':
print 'Ackdata magic bytes were wrong. Not sending ackData.'
ackDataValidThusFar = False
if ackDataValidThusFar:
ackDataPayloadLength, = unpack('>L',ackData[16:20])
if len(ackData)-24 != ackDataPayloadLength:
print 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.'
ackDataValidThusFar = False
if ackDataValidThusFar:
print 'ackData is valid. Will process it.'
self.ackDataThatWeHaveYetToSend.append(ackData) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
#Display timing data
timeRequiredToAttemptToDecryptMessage = time.time()- self.messageProcessingStartTime
successfullyDecryptMessageTimings.append(timeRequiredToAttemptToDecryptMessage)
sum = 0
for item in successfullyDecryptMessageTimings:
sum += item
printLock.acquire()
print 'Time to decrypt this message successfully:', timeRequiredToAttemptToDecryptMessage
print 'Average time for all message decryption successes since startup:', sum / len(successfullyDecryptMessageTimings)
printLock.release()
def addMailingListNameToSubject(self,subject,mailingListName):
subject = subject.strip()
if subject[:3] == 'Re:' or subject[:3] == 'RE:':
subject = subject[3:].strip()
if '['+mailingListName+']' in subject:
return subject
else:
return '['+mailingListName+'] ' + subject
#We have received a pubkey
def recpubkey(self):
self.pubkeyProcessingStartTime = time.time()
if self.payloadLength < 146: #sanity check
return
#We must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in pubkey message insufficient.'
return
readPosition = 24 #for the message header
readPosition += 8 #for the nonce
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
if embeddedTime < int(time.time())-lengthOfTimeToHoldOnToAllPubkeys-86400: #If the pubkey is more than a month old then reject it. (the 86400 is included to give an extra day of wiggle-room. If the wiggle-room is actually of any use, everyone on the network will delete this pubkey from their database the next time the cleanerThread cleans anyway- except for the node that actually wants the pubkey.)
printLock.acquire()
print 'The embedded time in this pubkey message is too old. Ignoring. Embedded time is:', embeddedTime
printLock.release()
return
if embeddedTime > int(time.time()) + 10800:
printLock.acquire()
print 'The embedded time in this pubkey message more than several hours in the future. This is irrational. Ignoring message.'
printLock.release()
return
readPosition += 4 #for the time
addressVersion, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
if self.streamNumber != streamNumber:
print 'stream number embedded in this pubkey doesn\'t match our stream number. Ignoring.'
return
inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if inventoryHash in inventory:
print 'We have already received this pubkey. Ignoring it.'
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
print 'We have already received this pubkey (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], int(time.time()))
inventoryLock.release()
self.broadcastinv(inventoryHash)
self.emit(SIGNAL("incrementNumberOfPubkeysProcessed()"))
self.processpubkey()
lengthOfTimeWeShouldUseToProcessThisMessage = .2
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.pubkeyProcessingStartTime)
if sleepTime > 0:
#printLock.acquire()
#print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
#printLock.release()
time.sleep(sleepTime)
#printLock.acquire()
#print 'Total pubkey processing time:', time.time()- self.pubkeyProcessingStartTime, 'seconds.'
#printLock.release()
def processpubkey(self):
readPosition = 24 #for the message header
readPosition += 8 #for the nonce
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
readPosition += 4 #for the time
addressVersion, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
if addressVersion == 0:
print '(Within processpubkey) addressVersion of 0 doesn\'t make sense.'
return
if addressVersion >= 3 or addressVersion == 1:
printLock.acquire()
print 'This version of Bitmessage cannot handle version', addressVersion,'addresses.'
printLock.release()
return
if addressVersion == 2:
if self.payloadLength < 146: #sanity check. This is the minimum possible length.
print 'payloadLength less than 146. Sanity check failed.'
return
bitfieldBehaviors = self.data[readPosition:readPosition+4]
readPosition += 4
publicSigningKey = self.data[readPosition:readPosition+64]
#Is it possible for a public key to be invalid such that trying to encrypt or sign with it will cause an error? If it is, we should probably test these keys here.
readPosition += 64
publicEncryptionKey = self.data[readPosition:readPosition+64]
if len(publicEncryptionKey) < 64:
print 'publicEncryptionKey length less than 64. Sanity check failed.'
return
sha = hashlib.new('sha512')
sha.update('\x04'+publicSigningKey+'\x04'+publicEncryptionKey)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
ripe = ripeHasher.digest()
printLock.acquire()
print 'within recpubkey, addressVersion:', addressVersion, ', streamNumber:', streamNumber
print 'ripe', ripe.encode('hex')
print 'publicSigningKey in hex:', publicSigningKey.encode('hex')
print 'publicEncryptionKey in hex:', publicEncryptionKey.encode('hex')
printLock.release()
t = (ripe,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally='yes' ''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []: #if this pubkey is already in our database and if we have used it personally:
print 'We HAVE used this pubkey personally. Updating time.'
t = (ripe,True,self.data[24:24+self.payloadLength],embeddedTime,'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
else:
print 'We have NOT used this pubkey personally. Inserting in database.'
t = (ripe,True,self.data[24:24+self.payloadLength],embeddedTime,'no') #This will also update the embeddedTime.
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
#This code which deals with old RSA addresses will soon be removed.
"""elif addressVersion == 1:
nLength, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
nString = self.data[readPosition:readPosition+nLength]
readPosition += nLength
eLength, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
eString = self.data[readPosition:readPosition+eLength]
readPosition += eLength
sha = hashlib.new('sha512')
sha.update(nString+eString)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
ripe = ripeHasher.digest()
print 'within recpubkey, addressVersion', addressVersion
print 'streamNumber', streamNumber
print 'ripe', repr(ripe)
print 'n=', convertStringToInt(nString)
print 'e=', convertStringToInt(eString)
t = (ripe,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally='yes' ''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []: #if this pubkey is already in our database and if we have used it personally:
print 'We HAVE used this pubkey personally. Updating time.'
t = (ripe,True,self.data[24:24+self.payloadLength],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
else:
print 'We have NOT used this pubkey personally. Inserting in database.'
t = (ripe,True,self.data[24:24+self.payloadLength],int(time.time()),'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))"""
#We have received a getpubkey message
def recgetpubkey(self):
if not self.isProofOfWorkSufficient():
print 'Proof of work in getpubkey message insufficient.'
return
embeddedTime, = unpack('>I',self.data[32:36])
if embeddedTime > int(time.time())+10800:
print 'The time in this getpubkey message is too new. Ignoring it. Time:', embeddedTime
return
if embeddedTime < int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in this getpubkey message is too old. Ignoring it. Time:', embeddedTime
return
addressVersionNumber, addressVersionLength = decodeVarint(self.data[36:42])
streamNumber, streamNumberLength = decodeVarint(self.data[36+addressVersionLength:42+addressVersionLength])
if streamNumber <> self.streamNumber:
print 'The streamNumber', streamNumber, 'doesn\'t match our stream number:', self.streamNumber
return
inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if inventoryHash in inventory:
print 'We have already received this getpubkey request. Ignoring it.'
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
print 'We have already received this getpubkey request (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[inventoryHash] = 0
objectType = 'getpubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
#This getpubkey request is valid so far. Forward to peers.
self.broadcastinv(inventoryHash)
if addressVersionNumber == 0:
print 'The addressVersionNumber of the pubkey request is zero. That doesn\'t make any sense. Ignoring it.'
return
elif addressVersionNumber == 1:
print 'The addressVersionNumber of the pubkey request is 1 which isn\'t supported anymore. Ignoring it.'
return
elif addressVersionNumber > 2:
print 'The addressVersionNumber of the pubkey request is too high. Can\'t understand. Ignoring it.'
return
print 'the hash requested in this getpubkey request is:', self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength].encode('hex')
sqlLock.acquire()
t = (self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength],int(time.time())-lengthOfTimeToHoldOnToAllPubkeys) #this prevents SQL injection
sqlSubmitQueue.put('''SELECT hash, transmitdata, time FROM pubkeys WHERE hash=? AND havecorrectnonce=1 AND time>?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []:
for row in queryreturn:
hash, payload, timeEncodedInPubkey = row
printLock.acquire()
print 'We have the requested pubkey stored in our database of pubkeys. Sending it.'
printLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, payload, timeEncodedInPubkey)#If the time embedded in this pubkey is more than 3 days old then this object isn't going to last very long in the inventory- the cleanerThread is going to come along and move it from the inventory in memory to the SQL inventory and then delete it from the SQL inventory. It should still find its way back to the original requestor if he is online however.
self.broadcastinv(inventoryHash)
else: #the pubkey is not in our database of pubkeys. Let's check if the requested key is ours (which would mean we should do the POW, put it in the pubkey table, and broadcast out the pubkey.)
if self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength] in myECAddressHashes: #if this address hash is one of mine
printLock.acquire()
print 'Found getpubkey-requested-hash in my list of EC hashes. Telling Worker thread to do the POW for a pubkey message and send it out.'
printLock.release()
myAddress = encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength])
workerQueue.put(('doPOWForMyV2Pubkey',myAddress))
#This code which deals with old RSA addresses will soon be removed.
"""elif self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength] in myRSAAddressHashes:
print 'Found getpubkey requested hash in my list of RSA hashes.'
payload = '\x00\x00\x00\x01' #bitfield of features supported by me (see the wiki).
payload += self.data[36:36+addressVersionLength+streamNumberLength]
#print int(config.get(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'n'))
nString = convertIntToString(int(config.get(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'n')))
eString = convertIntToString(config.getint(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'e'))
payload += encodeVarint(len(nString))
payload += nString
payload += encodeVarint(len(eString))
payload += eString
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
t = (self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength],True,payload,int(time.time())+1209600) #after two weeks (1,209,600 seconds), we may remove our own pub key from our database. It will be regenerated and put back in the database if it is requested.
sqlLock.acquire()
#** pubkeys insert query not yet fixed! **
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, payload, int(time.time()))
self.broadcastinv(inventoryHash) """
else:
printLock.acquire()
print 'This getpubkey request is not for any of my keys.'
printLock.release()
#We have received an inv message
def recinv(self):
numberOfItemsInInv, lengthOfVarint = decodeVarint(self.data[24:34])
if numberOfItemsInInv == 1: #we'll just request this data from the person who advertised the object.
for i in range(numberOfItemsInInv):
if len(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]) == 32: #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
if self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)] in inventory:
printLock.acquire()
print 'Inventory (in memory) has inventory item already.'
printLock.release()
elif isInSqlInventory(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]):
print 'Inventory (SQL on disk) has inventory item already.'
else:
self.sendgetdata(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)])
else:
print 'inv message lists', numberOfItemsInInv, 'objects.'
for i in range(numberOfItemsInInv): #upon finishing dealing with an incoming message, the receiveDataThread will request a random object from the peer. This way if we get multiple inv messages from multiple peers which list mostly the same objects, we will make getdata requests for different random objects from the various peers.
if len(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]) == 32: #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
#Send a getdata message to our peer to request the object with the given hash
def sendgetdata(self,hash):
print 'sending getdata to retrieve object with hash:', hash.encode('hex')
payload = '\x01' + hash
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getdata\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
try:
self.sock.send(headerData + payload)
except Exception, err:
if not 'Bad file descriptor' in err:
printLock.acquire()
sys.stderr.write('sock.send error: %s\n' % err)
printLock.release()
#We have received a getdata request from our peer
def recgetdata(self):
value, lengthOfVarint = decodeVarint(self.data[24:34])
#print 'Number of items in getdata request:', value
try:
for i in xrange(value):
hash = self.data[24+lengthOfVarint+(i*32):56+lengthOfVarint+(i*32)]
printLock.acquire()
print 'received getdata request for item:', hash.encode('hex')
printLock.release()
#print 'inventory is', inventory
if hash in inventory:
objectType, streamNumber, payload, receivedTime = inventory[hash]
self.sendData(objectType,payload)
else:
t = (hash,)
sqlLock.acquire()
sqlSubmitQueue.put('''select objecttype, payload from inventory where hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
objectType, payload = row
self.sendData(objectType,payload)
else:
print 'Someone asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. That shouldn\'t have happened.'
except:
pass #someone is probably trying to cause a program error by, for example, making a request for 10 items but only including the hashes for 5.
#Our peer has requested (in a getdata message) that we send an object.
def sendData(self,objectType,payload):
if objectType == 'pubkey':
print 'sending pubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'pubkey\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'getpubkey':
print 'sending getpubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getpubkey\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'msg':
print 'sending msg'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'broadcast':
print 'sending broadcast'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'broadcast\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'getpubkey' or objectType == 'pubkeyrequest':
print 'sending getpubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getpubkey\x00\x00\x00' #version command
headerData += pack('>L',len(payload)) #payload length
headerData += hashlib.sha512(payload).digest()[0:4]
self.sock.send(headerData + payload)
else:
sys.stderr.write('Error: sendData has been asked to send a strange objectType: %s\n' % str(objectType))
#Send an inv message with just one hash to all of our peers
def broadcastinv(self,hash):
printLock.acquire()
print 'broadcasting inv with hash:', hash.encode('hex')
printLock.release()
broadcastToSendDataQueues((self.streamNumber, 'sendinv', hash))
#We have received an addr message.
def recaddr(self):
listOfAddressDetailsToBroadcastToPeers = []
numberOfAddressesIncluded = 0
numberOfAddressesIncluded, lengthOfNumberOfAddresses = decodeVarint(self.data[24:29])
if verbose >= 1:
printLock.acquire()
print 'addr message contains', numberOfAddressesIncluded, 'IP addresses.'
printLock.release()
#print 'lengthOfNumberOfAddresses', lengthOfNumberOfAddresses
if numberOfAddressesIncluded > 1000 or numberOfAddressesIncluded == 0:
return
if self.payloadLength < lengthOfNumberOfAddresses + (34 * numberOfAddressesIncluded):
print 'addr message does not contain enough data. Ignoring.'
return
needToWriteKnownNodesToDisk = False
for i in range(0,numberOfAddressesIncluded):
try:
if self.data[40+lengthOfNumberOfAddresses+(34*i):52+lengthOfNumberOfAddresses+(34*i)] != '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF':
printLock.acquire()
print 'Skipping IPv6 address.', repr(self.data[40+lengthOfNumberOfAddresses+(34*i):56+lengthOfNumberOfAddresses+(34*i)])
printLock.release()
continue
#print repr(self.data[6+lengthOfNumberOfAddresses+(34*i):18+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (to test for an IPv6 address). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrStream, = unpack('>I',self.data[28+lengthOfNumberOfAddresses+(34*i):32+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
if recaddrStream == 0:
continue
if recaddrStream != self.streamNumber and recaddrStream != (self.streamNumber * 2) and recaddrStream != ((self.streamNumber * 2) + 1): #if the embedded stream number is not in my stream or either of my child streams then ignore it. Someone might be trying funny business.
continue
try:
recaddrServices, = unpack('>Q',self.data[32+lengthOfNumberOfAddresses+(34*i):40+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrServices). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrPort, = unpack('>H',self.data[56+lengthOfNumberOfAddresses+(34*i):58+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrPort). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
#print 'Within recaddr(): IP', recaddrIP, ', Port', recaddrPort, ', i', i
hostFromAddrMessage = socket.inet_ntoa(self.data[52+lengthOfNumberOfAddresses+(34*i):56+lengthOfNumberOfAddresses+(34*i)])
#print 'hostFromAddrMessage', hostFromAddrMessage
if self.data[52+lengthOfNumberOfAddresses+(34*i)] == '\x7F':
print 'Ignoring IP address in loopback range:', hostFromAddrMessage
continue
timeSomeoneElseReceivedMessageFromThisNode, = unpack('>I',self.data[24+lengthOfNumberOfAddresses+(34*i):28+lengthOfNumberOfAddresses+(34*i)]) #This is the 'time' value in the received addr message.
if recaddrStream not in knownNodes: #knownNodes is a dictionary of dictionaries with one outer dictionary for each stream. If the outer stream dictionary doesn't exist yet then we must make it.
knownNodes[recaddrStream] = {}
if hostFromAddrMessage not in knownNodes[recaddrStream]:
if len(knownNodes[recaddrStream]) < 20000 and timeSomeoneElseReceivedMessageFromThisNode > (int(time.time())-10800) and timeSomeoneElseReceivedMessageFromThisNode < (int(time.time()) + 10800): #If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
knownNodes[recaddrStream][hostFromAddrMessage] = (recaddrPort, timeSomeoneElseReceivedMessageFromThisNode)
print 'added new node', hostFromAddrMessage, 'to knownNodes in stream', recaddrStream
needToWriteKnownNodesToDisk = True
hostDetails = (timeSomeoneElseReceivedMessageFromThisNode, recaddrStream, recaddrServices, hostFromAddrMessage, recaddrPort)
listOfAddressDetailsToBroadcastToPeers.append(hostDetails)
else:
PORT, timeLastReceivedMessageFromThisNode = knownNodes[recaddrStream][hostFromAddrMessage]#PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
if (timeLastReceivedMessageFromThisNode < timeSomeoneElseReceivedMessageFromThisNode) and (timeSomeoneElseReceivedMessageFromThisNode < int(time.time())):
knownNodes[recaddrStream][hostFromAddrMessage] = (PORT, timeSomeoneElseReceivedMessageFromThisNode)
if PORT != recaddrPort:
print 'Strange occurance: The port specified in an addr message', str(recaddrPort),'does not match the port',str(PORT),'that this program (or some other peer) used to connect to it',str(hostFromAddrMessage),'. Perhaps they changed their port or are using a strange NAT configuration.'
if needToWriteKnownNodesToDisk: #Runs if any nodes were new to us. Also, share those nodes with our peers.
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
self.broadcastaddr(listOfAddressDetailsToBroadcastToPeers)
printLock.acquire()
print 'knownNodes currently has', len(knownNodes[self.streamNumber]), 'nodes for this stream.'
printLock.release()
#Function runs when we want to broadcast an addr message to all of our peers. Runs when we learn of nodes that we didn't previously know about and want to share them with our peers.
def broadcastaddr(self,listOfAddressDetailsToBroadcastToPeers):
numberOfAddressesInAddrMessage = len(listOfAddressDetailsToBroadcastToPeers)
payload = ''
for hostDetails in listOfAddressDetailsToBroadcastToPeers:
timeLastReceivedMessageFromThisNode, streamNumber, services, host, port = hostDetails
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',streamNumber)
payload += pack('>q',services) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(host)
payload += pack('>H',port)#remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
datatosend = '\xE9\xBE\xB4\xD9addr\x00\x00\x00\x00\x00\x00\x00\x00'
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
if verbose >= 2:
printLock.acquire()
print 'Broadcasting addr with', numberOfAddressesInAddrMessage, 'entries.'
printLock.release()
broadcastToSendDataQueues((self.streamNumber, 'sendaddr', datatosend))
#Send a big addr message to our peer
def sendaddr(self):
addrsInMyStream = {}
addrsInChildStreamLeft = {}
addrsInChildStreamRight = {}
#print 'knownNodes', knownNodes
#We are going to share a maximum number of 1000 addrs with our peer. 500 from this stream, 250 from the left child stream, and 250 from the right child stream.
if len(knownNodes[self.streamNumber]) > 0:
for i in range(500):
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
addrsInMyStream[HOST] = knownNodes[self.streamNumber][HOST]
if len(knownNodes[self.streamNumber*2]) > 0:
for i in range(250):
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber*2], 1)
addrsInChildStreamLeft[HOST] = knownNodes[self.streamNumber*2][HOST]
if len(knownNodes[(self.streamNumber*2)+1]) > 0:
for i in range(250):
random.seed()
HOST, = random.sample(knownNodes[(self.streamNumber*2)+1], 1)
addrsInChildStreamRight[HOST] = knownNodes[(self.streamNumber*2)+1][HOST]
numberOfAddressesInAddrMessage = 0
payload = ''
#print 'addrsInMyStream.items()', addrsInMyStream.items()
for HOST, value in addrsInMyStream.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',self.streamNumber)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
for HOST, value in addrsInChildStreamLeft.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',self.streamNumber*2)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
for HOST, value in addrsInChildStreamRight.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',(self.streamNumber*2)+1)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
datatosend = '\xE9\xBE\xB4\xD9addr\x00\x00\x00\x00\x00\x00\x00\x00'
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
if verbose >= 2:
printLock.acquire()
print 'Sending addr with', numberOfAddressesInAddrMessage, 'entries.'
printLock.release()
self.sock.send(datatosend)
#We have received a version message
def recversion(self):
if self.payloadLength < 83:
#This version message is unreasonably short. Forget it.
return
elif not self.verackSent: #There is a potential exploit if we don't check to make sure that we have not already received and accepted a version message: An attacker could connect directly to us, send a msg message with the ackdata set to an invalid version message which would cause us to close the connection to the attacker thus proving that we were able to decode the message. Checking the connectionIsOrWasFullyEstablished variable would also suffice.
self.remoteProtocolVersion, = unpack('>L',self.data[24:28])
#print 'remoteProtocolVersion', self.remoteProtocolVersion
self.myExternalIP = socket.inet_ntoa(self.data[64:68])
#print 'myExternalIP', self.myExternalIP
self.remoteNodeIncomingPort, = unpack('>H',self.data[94:96])
#print 'remoteNodeIncomingPort', self.remoteNodeIncomingPort
#print 'self.data[96:104]', repr(self.data[96:104])
#print 'eightBytesOfRandomDataUsedToDetectConnectionsToSelf', repr(eightBytesOfRandomDataUsedToDetectConnectionsToSelf)
useragentLength, lengthOfUseragentVarint = decodeVarint(self.data[104:108])
readPosition = 104 + lengthOfUseragentVarint
useragent = self.data[readPosition:readPosition+useragentLength]
readPosition += useragentLength
numberOfStreamsInVersionMessage, lengthOfNumberOfStreamsInVersionMessage = decodeVarint(self.data[readPosition:])
readPosition += lengthOfNumberOfStreamsInVersionMessage
self.streamNumber, lengthOfRemoteStreamNumber = decodeVarint(self.data[readPosition:])
printLock.acquire()
print 'Remote node useragent:', useragent, ' stream number:', self.streamNumber
printLock.release()
if self.streamNumber != 1:
self.sock.close()
printLock.acquire()
print 'Closed connection to', self.HOST, 'because they are interested in stream', self.streamNumber,'.'
printLock.release()
self.data = ''
return
#If this was an incoming connection, then the sendData thread doesn't know the stream. We have to set it.
if not self.initiatedConnection:
broadcastToSendDataQueues((0,'setStreamNumber',(self.HOST,self.streamNumber)))
if self.data[96:104] == eightBytesOfRandomDataUsedToDetectConnectionsToSelf:
self.sock.close()
printLock.acquire()
print 'Closing connection to myself: ', self.HOST
printLock.release()
self.data = ''
return
knownNodes[self.streamNumber][self.HOST] = (self.remoteNodeIncomingPort, int(time.time()))
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
#I've commented out this code because it should be up to the newer node to decide whether their protocol version is incompatiable with the remote node's version.
'''if self.remoteProtocolVersion > 1:
print 'The remote node''s protocol version is too new for this program to understand. Disconnecting. It is:', self.remoteProtocolVersion
self.sock.close()
self.selfInitiatedConnectionList.remove(self)
else:'''
self.sendverack()
if self.initiatedConnection == False:
self.sendversion()
#Sends a version message
def sendversion(self):
global softwareVersion
payload = ''
payload += pack('>L',1) #protocol version.
payload += pack('>q',1) #bitflags of the services I offer.
payload += pack('>q',int(time.time()))
payload += pack('>q',1) #boolservices offered by the remote node. This data is ignored by the remote host because how could We know what Their services are without them telling us?
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(self.HOST)
payload += pack('>H',self.PORT)#remote IPv6 and port
payload += pack('>q',1) #bitflags of the services I offer.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L',2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
payload += pack('>H',config.getint('bitmessagesettings', 'port'))#my external IPv6 and port
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + softwareVersion + '/' #Length of userAgent must be less than 253.
payload += pack('>B',len(userAgent)) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload += userAgent
payload += encodeVarint(1) #The number of streams about which I care. PyBitmessage currently only supports 1.
payload += encodeVarint(self.streamNumber)
datatosend = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + 'version\x00\x00\x00\x00\x00' #version command
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
printLock.acquire()
print 'Sending version message'
printLock.release()
self.sock.send(datatosend)
#self.versionSent = 1
#Sends a verack message
def sendverack(self):
printLock.acquire()
print 'Sending verack'
printLock.release()
self.sock.sendall('\xE9\xBE\xB4\xD9\x76\x65\x72\x61\x63\x6B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
#cf 83 e1 35
self.verackSent = True
if self.verackReceived == True:
self.connectionFullyEstablished()
#Every connection to a peer has a sendDataThread (and also a receiveDataThread).
class sendDataThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.mailbox = Queue.Queue()
sendDataQueues.append(self.mailbox)
self.data = ''
def setup(self,sock,HOST,PORT,streamNumber,objectsOfWhichThisRemoteNodeIsAlreadyAware):
self.sock = sock
self.HOST = HOST
self.PORT = PORT
self.streamNumber = streamNumber
self.lastTimeISentData = int(time.time()) #If this value increases beyond five minutes ago, we'll send a pong message to keep the connection alive.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
printLock.acquire()
print 'The streamNumber of this sendDataThread (ID:', id(self),') at setup() is', self.streamNumber
printLock.release()
def sendVersionMessage(self):
#Note that there is another copy of this version-sending code in the receiveData class which would need to be changed if you make changes here.
global softwareVersion
payload = ''
payload += pack('>L',1) #protocol version.
payload += pack('>q',1) #bitflags of the services I offer.
payload += pack('>q',int(time.time()))
payload += pack('>q',1) #boolservices of remote connection. How can I even know this for sure? This is probably ignored by the remote host.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(self.HOST)
payload += pack('>H',self.PORT)#remote IPv6 and port
payload += pack('>q',1) #bitflags of the services I offer.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L',2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
payload += pack('>H',config.getint('bitmessagesettings', 'port'))#my external IPv6 and port
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + softwareVersion + '/' #Length of userAgent must be less than 253.
payload += pack('>B',len(userAgent)) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload += userAgent
payload += encodeVarint(1) #The number of streams about which I care. PyBitmessage currently only supports 1 per connection.
payload += encodeVarint(self.streamNumber)
datatosend = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + 'version\x00\x00\x00\x00\x00' #version command
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
printLock.acquire()
print 'Sending version packet: ', repr(datatosend)
printLock.release()
self.sock.send(datatosend)
self.versionSent = 1
def run(self):
while True:
deststream,command,data = self.mailbox.get()
#printLock.acquire()
#print 'sendDataThread, destream:', deststream, ', Command:', command, ', ID:',id(self), ', HOST:', self.HOST
#printLock.release()
if deststream == self.streamNumber or deststream == 0:
if command == 'shutdown':
if data == self.HOST or data == 'all':
printLock.acquire()
print 'sendDataThread thread (associated with', self.HOST,') ID:',id(self), 'shutting down now.'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'len of sendDataQueues', len(sendDataQueues)
printLock.release()
break
#When you receive an incoming connection, a sendDataThread is created even though you don't yet know what stream number the remote peer is interested in. They will tell you in a version message and if you too are interested in that stream then you will continue on with the connection and will set the streamNumber of this send data thread here:
elif command == 'setStreamNumber':
hostInMessage, specifiedStreamNumber = data
if hostInMessage == self.HOST:
printLock.acquire()
print 'setting the stream number in the sendData thread (ID:',id(self), ') to', specifiedStreamNumber
printLock.release()
self.streamNumber = specifiedStreamNumber
elif command == 'sendaddr':
try:
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time unless we have a long list of messages in our queue to send.
random.seed()
time.sleep(random.randrange(0, 10))
self.sock.sendall(data)
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.sendall failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
elif command == 'sendinv':
if data not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
payload = '\x01' + data
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time
random.seed()
time.sleep(random.randrange(0, 10))
try:
self.sock.sendall(headerData + payload)
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.sendall failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
elif command == 'pong':
if self.lastTimeISentData < (int(time.time()) - 298):
#Send out a pong message to keep the connection alive.
printLock.acquire()
print 'Sending pong to', self.HOST, 'to keep connection alive.'
printLock.release()
try:
self.sock.sendall('\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.send pong failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
else:
printLock.acquire()
print 'sendDataThread ID:',id(self),'ignoring command', command,'because it is not in stream',deststream
printLock.release()
#Wen you want to command a sendDataThread to do something, like shutdown or send some data, this function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are responsible for putting their queue into (and out of) the sendDataQueues list.
def broadcastToSendDataQueues(data):
#print 'running broadcastToSendDataQueues'
for q in sendDataQueues:
q.put((data))
def flushInventory():
#Note that the singleCleanerThread clears out the inventory dictionary from time to time, although it only clears things that have been in the dictionary for a long time. This clears the inventory dictionary Now.
sqlLock.acquire()
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, receivedTime = storedValue
t = (hash,objectType,streamNumber,payload,receivedTime)
sqlSubmitQueue.put('''INSERT INTO inventory VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
del inventory[hash]
sqlLock.release()
def isInSqlInventory(hash):
t = (hash,)
sqlLock.acquire()
sqlSubmitQueue.put('''select hash from inventory where hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
return False
else:
return True
def convertIntToString(n):
a = __builtins__.hex(n)
if a[-1:] == 'L':
a = a[:-1]
if (len(a) % 2) == 0:
return a[2:].decode('hex')
else:
return ('0'+a[2:]).decode('hex')
def convertStringToInt(s):
return int(s.encode('hex'), 16)
def decodeWalletImportFormat(WIFstring):
fullString = arithmetic.changebase(WIFstring,58,256)
privkey = fullString[:-4]
if fullString[-4:] != hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]:
sys.stderr.write('Major problem! When trying to decode one of your private keys, the checksum failed. Here is the PRIVATE key: %s\n' % str(WIFstring))
return ""
else:
#checksum passed
if privkey[0] == '\x80':
return privkey[1:]
else:
sys.stderr.write('Major problem! When trying to decode one of your private keys, the checksum passed but the key doesn\'t begin with hex 80. Here is the PRIVATE key: %s\n' % str(WIFstring))
return ""
def reloadMyAddressHashes():
printLock.acquire()
print 'reloading keys from keys.dat file'
printLock.release()
myRSAAddressHashes.clear()
myECAddressHashes.clear()
#myPrivateKeys.clear()
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
if isEnabled:
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 2:
privEncryptionKey = decodeWalletImportFormat(config.get(addressInKeysFile, 'privencryptionkey')).encode('hex') #returns a simple 32 bytes of information encoded in 64 Hex characters, or null if there was an error
if len(privEncryptionKey) == 64:#It is 32 bytes encoded as 64 hex characters
myECAddressHashes[hash] = highlevelcrypto.makeCryptor(privEncryptionKey)
elif addressVersionNumber == 1:
n = config.getint(addressInKeysFile, 'n')
e = config.getint(addressInKeysFile, 'e')
d = config.getint(addressInKeysFile, 'd')
p = config.getint(addressInKeysFile, 'p')
q = config.getint(addressInKeysFile, 'q')
myRSAAddressHashes[hash] = rsa.PrivateKey(n,e,d,p,q)
#This function expects that pubkey begin with \x04
def calculateBitcoinAddressFromPubkey(pubkey):
if len(pubkey)!= 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey),'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x00' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress,256,58)
return "1"*numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def calculateTestnetAddressFromPubkey(pubkey):
if len(pubkey)!= 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey),'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x6F' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress,256,58)
return "1"*numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def safeConfigGetBoolean(section,field):
try:
if config.getboolean(section,field):
return True
else:
return False
except:
return False
def lookupAppdataFolder():
APPNAME = "PyBitmessage"
from os import path, environ
if sys.platform == 'darwin':
if "HOME" in environ:
appdata = path.join(os.environ["HOME"], "Library/Application support/", APPNAME) + '/'
else:
print 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
sys.exit()
elif 'win32' in sys.platform or 'win64' in sys.platform:
appdata = path.join(environ['APPDATA'], APPNAME) + '\\'
else:
appdata = path.expanduser(path.join("~", "." + APPNAME + "/"))
return appdata
#This thread exists because SQLITE3 is so un-threadsafe that we must submit queries to it and it puts results back in a different queue. They won't let us just use locks.
class sqlThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
self.conn = sqlite3.connect(appdata + 'messages.dat' )
self.conn.text_factory = str
self.cur = self.conn.cursor()
try:
self.cur.execute( '''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text, received text, message text, folder text, UNIQUE(msgid) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, lastactiontime integer, status text, pubkeyretrynumber integer, msgretrynumber integer, folder text)''' )
self.cur.execute( '''CREATE TABLE subscriptions (label text, address text, enabled bool)''' )
self.cur.execute( '''CREATE TABLE addressbook (label text, address text)''' )
self.cur.execute( '''CREATE TABLE blacklist (label text, address text, enabled bool)''' )
self.cur.execute( '''CREATE TABLE whitelist (label text, address text, enabled bool)''' )
#Explanation of what is in the pubkeys table:
# The hash is the RIPEMD160 hash that is encoded in the Bitmessage address.
# If you or someone else did the POW for this pubkey, then havecorrectnonce will be true. If you received the pubkey in a msg message then havecorrectnonce will be false. You won't have the correct nonce and won't be able to send the message to peers if they request the pubkey.
# transmitdata is literally the data that was included in the Bitmessage pubkey message when it arrived, except for the 24 byte protocol header- ie, it starts with the POW nonce.
# time is the time that the pubkey was broadcast on the network same as with every other type of Bitmessage object.
# usedpersonally is set to "yes" if we have used the key personally. This keeps us from deleting it because we may want to reply to a message in the future. This field is not a bool because we may need more flexability in the future and it doesn't take up much more space anyway.
self.cur.execute( '''CREATE TABLE pubkeys (hash blob, havecorrectnonce bool, transmitdata blob, time blob, usedpersonally text, UNIQUE(hash, havecorrectnonce) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE knownnodes (timelastseen int, stream int, services blob, host blob, port blob, UNIQUE(host, stream, port) ON CONFLICT REPLACE)''' ) #This table isn't used in the program yet but I have a feeling that we'll need it.
self.cur.execute( '''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx',1)''')
self.conn.commit()
print 'Created messages database file'
except Exception, err:
if str(err) == 'table inbox already exists':
print 'Database file already exists.'
else:
sys.stderr.write('ERROR trying to create database file (message.dat). Error message: %s\n' % str(err))
sys.exit()
#People running earlier versions of PyBitmessage do not have the usedpersonally field in their pubkeys table. Let's add it.
if config.getint('bitmessagesettings','settingsversion') == 2:
item = '''ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' '''
parameters = ''
self.cur.execute(item, parameters)
self.conn.commit()
config.set('bitmessagesettings','settingsversion','3')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
try:
testpayload = '\x00\x00'
t = ('1234','True',testpayload,'12345678','no')
self.cur.execute( '''INSERT INTO pubkeys VALUES(?,?,?,?,?)''',t)
self.conn.commit()
self.cur.execute('''SELECT transmitdata FROM pubkeys WHERE hash='1234' ''')
queryreturn = self.cur.fetchall()
for row in queryreturn:
transmitdata, = row
self.cur.execute('''DELETE FROM pubkeys WHERE hash='1234' ''')
self.conn.commit()
if transmitdata == '':
sys.stderr.write('Problem: The version of SQLite you have cannot store Null values. Please download and install the latest revision of your version of Python (for example, the latest Python 2.7 revision) and try again.\n')
sys.stderr.write('PyBitmessage will now exist very abruptly. You may now see threading errors related to this abrupt exit but the problem you need to solve is related to SQLite.\n\n')
sys.exit()
except Exception, err:
print err
while True:
item = sqlSubmitQueue.get()
parameters = sqlSubmitQueue.get()
#print 'item', item
#print 'parameters', parameters
self.cur.execute(item, parameters)
sqlReturnQueue.put(self.cur.fetchall())
sqlSubmitQueue.task_done()
self.conn.commit()
'''The singleCleaner class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy.
It cleans these data structures in memory:
inventory (moves data to the on-disk sql database)
It cleans these tables on the disk:
inventory (clears data more than 2 days and 12 hours old)
pubkeys (clears pubkeys older than 4 weeks old which we have not used personally)
It resends messages when there has been no response:
resends getpubkey messages in 4 days (then 8 days, then 16 days, etc...)
resends msg messages in 4 days (then 8 days, then 16 days, etc...)
'''
class singleCleaner(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
timeWeLastClearedInventoryAndPubkeysTables = 0
while True:
time.sleep(300)
sqlLock.acquire()
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Doing housekeeping (Flushing inventory in memory to disk...)")
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, receivedTime = storedValue
if int(time.time())- 3600 > receivedTime:
t = (hash,objectType,streamNumber,payload,receivedTime)
sqlSubmitQueue.put('''INSERT INTO inventory VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
del inventory[hash]
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"")
sqlLock.release()
broadcastToSendDataQueues((0, 'pong', 'no data')) #commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
#inventory (moves data from the inventory data structure to the on-disk sql database)
sqlLock.acquire()
#inventory (clears data more than 2 days and 12 hours old)
t = (int(time.time())-lengthOfTimeToLeaveObjectsInInventory,)
sqlSubmitQueue.put('''DELETE FROM inventory WHERE receivedtime<?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#pubkeys
t = (int(time.time())-lengthOfTimeToHoldOnToAllPubkeys,)
sqlSubmitQueue.put('''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
t = ()
sqlSubmitQueue.put('''select toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber FROM sent WHERE ((status='findingpubkey' OR status='sentmessage') AND folder='sent') ''') #If the message's folder='trash' then we'll ignore it.
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber = row
if status == 'findingpubkey':
if int(time.time()) - lastactiontime > (maximumAgeOfAnObjectThatIAmWillingToAccept * (2 ** (pubkeyretrynumber))):
print 'It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.'
try:
del neededPubkeys[toripe] #We need to take this entry out of the neededPubkeys structure because the workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
except:
pass
workerQueue.put(('sendmessage',toaddress))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Doing work necessary to again attempt to request a public key...")
t = (int(time.time()),pubkeyretrynumber+1,toripe)
sqlSubmitQueue.put('''UPDATE sent SET lastactiontime=?, pubkeyretrynumber=? WHERE toripe=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),toripe,'Public key requested again. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
else:# status == sentmessage
if int(time.time()) - lastactiontime > (maximumAgeOfAnObjectThatIAmWillingToAccept * (2 ** (msgretrynumber))):
print 'It has been a long time and we haven\'t heard an acknowledgement to our msg. Sending again.'
t = (int(time.time()),msgretrynumber+1,'findingpubkey',ackdata)
sqlSubmitQueue.put('''UPDATE sent SET lastactiontime=?, msgretrynumber=?, status=? WHERE ackdata=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Message sent again because the acknowledgement was never received. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
workerQueue.put(('sendmessage',toaddress))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Doing work necessary to again attempt to deliver a message...")
sqlLock.release()
#This thread, of which there is only one, does the heavy lifting: calculating POWs.
class singleWorker(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT toripe FROM sent WHERE (status=? AND folder='sent')''')
sqlSubmitQueue.put(('findingpubkey',))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toripe, = row
neededPubkeys[toripe] = 0
self.sendBroadcast() #just in case there are any proof of work tasks for Broadcasts that have yet to be sent.
#Now let us see if there are any proofs of work for msg messages that we have yet to complete..
sqlLock.acquire()
t = ('doingpow',)
sqlSubmitQueue.put('SELECT toripe FROM sent WHERE status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toripe, = row
self.sendMsg(toripe)
while True:
command, data = workerQueue.get()
#statusbar = 'The singleWorker thread is working on work.'
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
if command == 'sendmessage':
toAddress = data
toStatus,toAddressVersionNumber,toStreamNumber,toRipe = decodeAddress(toAddress)
#print 'message type', type(message)
#print repr(message.toUtf8())
#print str(message.toUtf8())
sqlLock.acquire()
sqlSubmitQueue.put('SELECT * FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
#print 'queryreturn', queryreturn
if queryreturn == []:
#We'll need to request the pub key because we don't have it.
if not toRipe in neededPubkeys:
neededPubkeys[toRipe] = 0
print 'requesting pubkey:', toRipe.encode('hex')
self.requestPubKey(toAddressVersionNumber,toStreamNumber,toRipe)
else:
print 'We have already requested this pubkey (the ripe hash is in neededPubkeys). We will re-request again soon.'
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),toRipe,'Public key was requested earlier. Receiver must be offline. Will retry.')
else:
print 'We already have the necessary public key.'
self.sendMsg(toRipe) #by calling this function, we are asserting that we already have the pubkey for toRipe
elif command == 'sendbroadcast':
print 'Within WorkerThread, processing sendbroadcast command.'
fromAddress,subject,message = data
self.sendBroadcast()
elif command == 'doPOWForMyV2Pubkey':
self.doPOWForMyV2Pubkey(data)
elif command == 'newpubkey':
toAddressVersion,toStreamNumber,toRipe = data
if toRipe in neededPubkeys:
print 'We have been awaiting the arrival of this pubkey.'
del neededPubkeys[toRipe]
self.sendMsg(toRipe)
else:
print 'We don\'t need this pub key. We didn\'t ask for it. Pubkey hash:', toRipe.encode('hex')
else:
printLock.acquire()
sys.stderr.write('Probable programming error: The command sent to the workerThread is weird. It is: %s\n' % command)
printLock.release()
workerQueue.task_done()
def doPOWForMyV2Pubkey(self,myAddress): #This function also broadcasts out the pubkey message once it is done with the POW
status,addressVersionNumber,streamNumber,hash = decodeAddress(myAddress)
embeddedTime = int(time.time())+random.randrange(-300, 300) #the current time plus or minus five minutes
payload = pack('>I',(embeddedTime))
payload += encodeVarint(2) #Address version number
payload += encodeVarint(streamNumber)
payload += '\x00\x00\x00\x01' #bitfield of features supported by me (see the wiki).
try:
privSigningKeyBase58 = config.get(myAddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(myAddress, 'privencryptionkey')
except Exception, err:
printLock.acquire()
sys.stderr.write('Error within doPOWForMyV2Pubkey. Could not read the keys from the keys.dat file for a requested address. %s\n' % err)
printLock.release()
return
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload += pubSigningKey[1:]
payload += pubEncryptionKey[1:]
#Do the POW for this pubkey message
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
t = (hash,True,payload,embeddedTime,'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, streamNumber, payload, embeddedTime)
printLock.acquire()
print 'broadcasting inv with hash:', inventoryHash.encode('hex')
printLock.release()
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"")
def sendBroadcast(self):
sqlLock.acquire()
t = ('broadcastpending',)
sqlSubmitQueue.put('SELECT fromaddress, subject, message, ackdata FROM sent WHERE status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
fromaddress, subject, body, ackdata = row
status,addressVersionNumber,streamNumber,ripe = decodeAddress(fromaddress)
if addressVersionNumber == 2:
#We need to convert our private keys to public keys in order to include them.
try:
privSigningKeyBase58 = config.get(fromaddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(fromaddress, 'privencryptionkey')
except:
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Error! Could not find sender address (your address) in the keys.dat file.')
continue
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex') #At this time these pubkeys are 65 bytes long because they include the encoding byte which we won't be sending in the broadcast message.
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload = pack('>I',(int(time.time())+random.randrange(-300, 300)))#the current time plus or minus five minutes
payload += encodeVarint(1) #broadcast version
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += '\x00\x00\x00\x01' #behavior bitfield
payload += pubSigningKey[1:]
payload += pubEncryptionKey[1:]
payload += ripe
payload += '\x02' #message encoding type
payload += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding.
payload += 'Subject:' + subject + '\n' + 'Body:' + body
signature = highlevelcrypto.sign(payload,privSigningKeyHex)
payload += encodeVarint(len(signature))
payload += signature
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For broadcast message) Doing proof of work...'
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Doing work necessary to send broadcast...')
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (within sendBroadcast function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Broadcast sent at '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
sqlLock.acquire()
t = ('broadcastsent',int(time.time()),fromaddress, subject, body,'broadcastpending')
sqlSubmitQueue.put('UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
"""elif addressVersionNumber == 1: #This whole section can be taken out soon because we aren't supporting v1 addresses for much longer.
messageToTransmit = '\x02' #message encoding type
messageToTransmit += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding.
messageToTransmit += 'Subject:' + subject + '\n' + 'Body:' + body
#We need the all the integers for our private key in order to sign our message, and we need our public key to send with the message.
n = config.getint(fromaddress, 'n')
e = config.getint(fromaddress, 'e')
d = config.getint(fromaddress, 'd')
p = config.getint(fromaddress, 'p')
q = config.getint(fromaddress, 'q')
nString = convertIntToString(n)
eString = convertIntToString(e)
#myPubkey = rsa.PublicKey(n,e)
myPrivatekey = rsa.PrivateKey(n,e,d,p,q)
#The payload of the broadcast message starts with a POW, but that will be added later.
payload = pack('>I',(int(time.time())))
payload += encodeVarint(1) #broadcast version
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += ripe
payload += encodeVarint(len(nString))
payload += nString
payload += encodeVarint(len(eString))
payload += eString
payload += messageToTransmit
signature = rsa.sign(messageToTransmit,myPrivatekey,'SHA-512')
#print 'signature', signature.encode('hex')
payload += signature
#print 'nString', repr(nString)
#print 'eString', repr(eString)
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For broadcast message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (within sendBroadcast function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Broadcast sent at '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
sqlLock.acquire()
t = ('broadcastsent',int(time.time()),fromaddress, subject, body,'broadcastpending')
sqlSubmitQueue.put('UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()"""
else:
printLock.acquire()
print 'In the singleWorker thread, the sendBroadcast function doesn\'t understand the address version'
printLock.release()
def sendMsg(self,toRipe):
sqlLock.acquire()
t = ('doingpow','findingpubkey',toRipe)
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE status=? AND toripe=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
t = ('doingpow',toRipe)
sqlSubmitQueue.put('SELECT toaddress, fromaddress, subject, message, ackdata FROM sent WHERE status=? AND toripe=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toaddress, fromaddress, subject, message, ackdata = row
ackdataForWhichImWatching[ackdata] = 0
toStatus,toAddressVersionNumber,toStreamNumber,toHash = decodeAddress(toaddress)
fromStatus,fromAddressVersionNumber,fromStreamNumber,fromHash = decodeAddress(fromaddress)
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Doing work necessary to send the message.')
printLock.acquire()
print 'Found a message in our database that needs to be sent with this pubkey.'
print 'First 150 characters of message:', message[:150]
printLock.release()
embeddedTime = pack('>I',(int(time.time())+random.randrange(-300, 300)))#the current time plus or minus five minutes. We will use this time both for our message and for the ackdata packed within our message.
if fromAddressVersionNumber == 2:
payload = '\x01' #Message version.
payload += encodeVarint(fromAddressVersionNumber)
payload += encodeVarint(fromStreamNumber)
payload += '\x00\x00\x00\x01' #Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
#We need to convert our private keys to public keys in order to include them.
try:
privSigningKeyBase58 = config.get(fromaddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(fromaddress, 'privencryptionkey')
except:
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Error! Could not find sender address (your address) in the keys.dat file.')
continue
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload += pubSigningKey[1:] #The \x04 on the beginning of the public keys are not sent. This way there is only one acceptable way to encode and send a public key.
payload += pubEncryptionKey[1:]
payload += toHash #This hash will be checked by the receiver of the message to verify that toHash belongs to them. This prevents a Surreptitious Forwarding Attack.
payload += '\x02' #Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
messageToTransmit = 'Subject:' + subject + '\n' + 'Body:' + message
payload += encodeVarint(len(messageToTransmit))
payload += messageToTransmit
fullAckPayload = self.generateFullAckMessage(ackdata,toStreamNumber,embeddedTime)#The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
payload += encodeVarint(len(fullAckPayload))
payload += fullAckPayload
signature = highlevelcrypto.sign(payload,privSigningKeyHex)
payload += encodeVarint(len(signature))
payload += signature
"""elif fromAddressVersionNumber == 1: #This code is for old version 1 (RSA) addresses. It will soon be removed.
payload = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' #this run of nulls allows the true message receiver to identify his message
payload += '\x01' #Message version.
payload += '\x00\x00\x00\x01'
payload += encodeVarint(fromAddressVersionNumber)
payload += encodeVarint(fromStreamNumber)
try:
sendersN = convertIntToString(config.getint(fromaddress, 'n'))
except:
printLock.acquire()
print 'Error: Could not find', fromaddress, 'in our keys.dat file. You must have deleted it. Aborting the send.'
printLock.release()
return
payload += encodeVarint(len(sendersN))
payload += sendersN
sendersE = convertIntToString(config.getint(fromaddress, 'e'))
payload += encodeVarint(len(sendersE))
payload += sendersE
payload += '\x02' #Type 2 is simple UTF-8 message encoding.
messageToTransmit = 'Subject:' + subject + '\n' + 'Body:' + message
payload += encodeVarint(len(messageToTransmit))
payload += messageToTransmit
#Later, if anyone impliments clients that don't send the ack_data, then we should probably check here to make sure that the receiver will make use of this ack_data and not attach it if not.
fullAckPayload = self.generateFullAckMessage(ackdata,toStreamNumber,embeddedTime)
payload += encodeVarint(len(fullAckPayload))
payload += fullAckPayload
sendersPrivKey = rsa.PrivateKey(config.getint(fromaddress, 'n'),config.getint(fromaddress, 'e'),config.getint(fromaddress, 'd'),config.getint(fromaddress, 'p'),config.getint(fromaddress, 'q'))
payload += rsa.sign(payload,sendersPrivKey,'SHA-512')"""
#We have assembled the data that will be encrypted. Now let us fetch the recipient's public key out of our database and do the encryption.
if toAddressVersionNumber == 2:
sqlLock.acquire()
sqlSubmitQueue.put('SELECT transmitdata FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
pubkeyPayload, = row
#The pubkey is stored the way we originally received it which means that we need to read beyond things like the nonce and time to get to the public keys.
readPosition = 8 #to bypass the nonce
readPosition += 4 #to bypass the embedded time
readPosition += 1 #to bypass the address version whose length is definitely 1
streamNumber, streamNumberLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += streamNumberLength
behaviorBitfield = pubkeyPayload[readPosition:readPosition+4]
readPosition += 4 #to bypass the bitfield of behaviors
#pubSigningKeyBase256 = pubkeyPayload[readPosition:readPosition+64] #We don't use this key for anything here.
readPosition += 64
pubEncryptionKeyBase256 = pubkeyPayload[readPosition:readPosition+64]
readPosition += 64
encrypted = highlevelcrypto.encrypt(payload,"04"+pubEncryptionKeyBase256.encode('hex'))
"""elif toAddressVersionNumber == 1:
sqlLock.acquire()
sqlSubmitQueue.put('SELECT transmitdata FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
pubkeyPayload, = row
readPosition = 8 #to bypass the nonce
behaviorBitfield = pubkeyPayload[8:12]
readPosition += 4 #to bypass the bitfield of behaviors
addressVersion, addressVersionLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += addressVersionLength
streamNumber, streamNumberLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += streamNumberLength
nLength, nLengthLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += nLengthLength
n = convertStringToInt(pubkeyPayload[readPosition:readPosition+nLength])
readPosition += nLength
eLength, eLengthLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += eLengthLength
e = convertStringToInt(pubkeyPayload[readPosition:readPosition+eLength])
receiversPubkey = rsa.PublicKey(n,e)
infile = cStringIO.StringIO(payload)
outfile = cStringIO.StringIO()
#print 'Encrypting using public key:', receiversPubkey
encrypt_bigfile(infile,outfile,receiversPubkey)
encrypted = outfile.getvalue()
infile.close()
outfile.close()"""
nonce = 0
trialValue = 99999999999999999999
encodedStreamNumber = encodeVarint(toStreamNumber)
#We are now dropping the unencrypted data in payload since it has already been encrypted and replacing it with the encrypted payload that we will send out.
payload = embeddedTime + encodedStreamNumber + encrypted
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For msg message) Doing proof of work. Target:', target
powStartTime = time.time()
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For msg message) Found proof of work', trialValue, 'Nonce:', nonce
try:
print 'POW took', int(time.time()-powStartTime), 'seconds.', nonce/(time.time()-powStartTime), 'nonce trials per second.'
except:
pass
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'msg'
inventory[inventoryHash] = (objectType, toStreamNumber, payload, int(time.time()))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Message sent. Waiting on acknowledgement. Sent on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
print 'sending inv (within sendmsg function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
#Update the status of the message in the 'sent' table to have a 'sent' status
sqlLock.acquire()
t = ('sentmessage',toaddress, fromaddress, subject, message,'doingpow')
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE toaddress=? AND fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
t = (toRipe,)
sqlSubmitQueue.put('''UPDATE pubkeys SET usedpersonally='yes' WHERE hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
def requestPubKey(self,addressVersionNumber,streamNumber,ripe):
payload = pack('>I',int(time.time()))
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += ripe
printLock.acquire()
print 'making request for pubkey with ripe:', ripe.encode('hex')
printLock.release()
nonce = 0
trialValue = 99999999999999999999
#print 'trial value', trialValue
statusbar = 'Doing the computations necessary to request the recipient\'s public key.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),ripe,'Doing work necessary to request public key.')
print 'Doing proof-of-work necessary to send getpubkey message.'
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
printLock.acquire()
print 'Found proof of work', trialValue, 'Nonce:', nonce
printLock.release()
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'getpubkey'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (for the getpubkey message)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Broacasting the public key request. This program will auto-retry if they are offline.')
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),ripe,'Sending public key request. Waiting for reply. Requested at ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
def generateFullAckMessage(self,ackdata,toStreamNumber,embeddedTime):
nonce = 0
trialValue = 99999999999999999999
encodedStreamNumber = encodeVarint(toStreamNumber)
payload = embeddedTime + encodedStreamNumber + ackdata
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
printLock.acquire()
print '(For ack message) Doing proof of work...'
printLock.release()
powStartTime = time.time()
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
printLock.acquire()
print '(For ack message) Found proof of work', trialValue, 'Nonce:', nonce
try:
print 'POW took', int(time.time()-powStartTime), 'seconds.', nonce/(time.time()-powStartTime), 'nonce trials per second.'
except:
pass
printLock.release()
payload = pack('>Q',nonce) + payload
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
return headerData + payload
class addressGenerator(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def setup(self,addressVersionNumber,streamNumber,label="(no label)",numberOfAddressesToMake=1,deterministicPassphrase="",eighteenByteRipe=False):
self.addressVersionNumber = addressVersionNumber
self.streamNumber = streamNumber
self.label = label
self.numberOfAddressesToMake = numberOfAddressesToMake
self.deterministicPassphrase = deterministicPassphrase
self.eighteenByteRipe = eighteenByteRipe
def run(self):
if self.addressVersionNumber == 2:
if self.deterministicPassphrase == "":
statusbar = 'Generating one new address'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that starts with either \x00 or \x00\x00. Then when we pack them into a Bitmessage address,
#we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time.time()
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
potentialPrivSigningKey = OpenSSL.rand(32)
potentialPubSigningKey = self.pointMult(potentialPrivSigningKey)
while True:
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
potentialPrivEncryptionKey = OpenSSL.rand(32)
potentialPubEncryptionKey = self.pointMult(potentialPrivEncryptionKey)
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(potentialPubSigningKey+potentialPubEncryptionKey)
ripe.update(sha.digest())
#print 'potential ripe.digest', ripe.digest().encode('hex')
if self.eighteenByteRipe:
if ripe.digest()[:2] == '\x00\x00':
break
else:
if ripe.digest()[:1] == '\x00':
break
print 'Generated address with ripe digest:', ripe.digest().encode('hex')
print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix/(time.time()-startTime),'addresses per second before finding one with the correct ripe-prefix.'
if ripe.digest()[:2] == '\x00\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[2:])
elif ripe.digest()[:1] == '\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[1:])
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = '\x80'+potentialPrivSigningKey
checksum = hashlib.sha256(hashlib.sha256(privSigningKey).digest()).digest()[0:4]
privSigningKeyWIF = arithmetic.changebase(privSigningKey + checksum,256,58)
#print 'privSigningKeyWIF',privSigningKeyWIF
privEncryptionKey = '\x80'+potentialPrivEncryptionKey
checksum = hashlib.sha256(hashlib.sha256(privEncryptionKey).digest()).digest()[0:4]
privEncryptionKeyWIF = arithmetic.changebase(privEncryptionKey + checksum,256,58)
#print 'privEncryptionKeyWIF',privEncryptionKeyWIF
config.add_section(address)
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'privSigningKey',privSigningKeyWIF)
config.set(address,'privEncryptionKey',privEncryptionKeyWIF)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#It may be the case that this address is being generated as a result of a call to the API. Let us put the result in the necessary queue.
apiAddressGeneratorReturnQueue.put(address)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address. Doing work necessary to broadcast it...')
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
reloadMyAddressHashes()
workerQueue.put(('doPOWForMyV2Pubkey',address))
else: #There is something in the deterministicPassphrase variable thus we are going to do this deterministically.
statusbar = 'Generating '+str(self.numberOfAddressesToMake) + ' new addresses.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
signingKeyNonce = 0
encryptionKeyNonce = 1
listOfNewAddressesToSendOutThroughTheAPI = [] #We fill out this list no matter what although we only need it if we end up passing the info to the API.
for i in range(self.numberOfAddressesToMake):
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that has a RIPEMD hash that starts with either \x00 or \x00\x00. Then when we pack them
#into a Bitmessage address, we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time.time()
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
while True:
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
potentialPrivSigningKey = hashlib.sha512(self.deterministicPassphrase + encodeVarint(signingKeyNonce)).digest()[:32]
potentialPrivEncryptionKey = hashlib.sha512(self.deterministicPassphrase + encodeVarint(encryptionKeyNonce)).digest()[:32]
potentialPubSigningKey = self.pointMult(potentialPrivSigningKey)
potentialPubEncryptionKey = self.pointMult(potentialPrivEncryptionKey)
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
signingKeyNonce += 2
encryptionKeyNonce += 2
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(potentialPubSigningKey+potentialPubEncryptionKey)
ripe.update(sha.digest())
#print 'potential ripe.digest', ripe.digest().encode('hex')
if self.eighteenByteRipe:
if ripe.digest()[:2] == '\x00\x00':
break
else:
if ripe.digest()[:1] == '\x00':
break
print 'ripe.digest', ripe.digest().encode('hex')
print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix/(time.time()-startTime),'keys per second.'
if ripe.digest()[:2] == '\x00\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[2:])
elif ripe.digest()[:1] == '\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[1:])
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = '\x80'+potentialPrivSigningKey
checksum = hashlib.sha256(hashlib.sha256(privSigningKey).digest()).digest()[0:4]
privSigningKeyWIF = arithmetic.changebase(privSigningKey + checksum,256,58)
privEncryptionKey = '\x80'+potentialPrivEncryptionKey
checksum = hashlib.sha256(hashlib.sha256(privEncryptionKey).digest()).digest()[0:4]
privEncryptionKeyWIF = arithmetic.changebase(privEncryptionKey + checksum,256,58)
try:
config.add_section(address)
print 'self.label', self.label
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'privSigningKey',privSigningKeyWIF)
config.set(address,'privEncryptionKey',privEncryptionKeyWIF)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
listOfNewAddressesToSendOutThroughTheAPI.append(address)
except:
print address,'already exists. Not adding it again.'
#It may be the case that this address is being generated as a result of a call to the API. Let us put the result in the necessary queue.
apiAddressGeneratorReturnQueue.put(listOfNewAddressesToSendOutThroughTheAPI)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address')
reloadMyAddressHashes()
#This code which deals with old RSA addresses will soon be removed.
"""elif self.addressVersionNumber == 1:
statusbar = 'Generating new ' + str(config.getint('bitmessagesettings', 'bitstrength')) + ' bit RSA key. This takes a minute on average. If you want to generate multiple addresses now, you can; they will queue.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
(pubkey, privkey) = rsa.newkeys(config.getint('bitmessagesettings', 'bitstrength'))
print privkey['n']
print privkey['e']
print privkey['d']
print privkey['p']
print privkey['q']
sha = hashlib.new('sha512')
#sha.update(str(pubkey.n)+str(pubkey.e))
sha.update(convertIntToString(pubkey.n)+convertIntToString(pubkey.e))
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
address = encodeAddress(1,self.streamNumber,ripe.digest())
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
config.add_section(address)
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'n',str(privkey['n']))
config.set(address,'e',str(privkey['e']))
config.set(address,'d',str(privkey['d']))
config.set(address,'p',str(privkey['p']))
config.set(address,'q',str(privkey['q']))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address')
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
reloadMyAddressHashes()"""
#Does an EC point multiplication; turns a private key into a public key.
def pointMult(self,secret):
#ctx = OpenSSL.BN_CTX_new() #This value proved to cause Seg Faults on Linux. It turns out that it really didn't speed up EC_POINT_mul anyway.
k = OpenSSL.EC_KEY_new_by_curve_name(OpenSSL.get_curve('secp256k1'))
priv_key = OpenSSL.BN_bin2bn(secret, 32, 0)
group = OpenSSL.EC_KEY_get0_group(k)
pub_key = OpenSSL.EC_POINT_new(group)
OpenSSL.EC_POINT_mul(group, pub_key, priv_key, None, None, None)
OpenSSL.EC_KEY_set_private_key(k, priv_key)
OpenSSL.EC_KEY_set_public_key(k, pub_key)
#print 'priv_key',priv_key
#print 'pub_key',pub_key
size = OpenSSL.i2o_ECPublicKey(k, 0)
mb = ctypes.create_string_buffer(size)
OpenSSL.i2o_ECPublicKey(k, ctypes.byref(ctypes.pointer(mb)))
#print 'mb.raw', mb.raw.encode('hex'), 'length:', len(mb.raw)
#print 'mb.raw', mb.raw, 'length:', len(mb.raw)
OpenSSL.EC_POINT_free(pub_key)
#OpenSSL.BN_CTX_free(ctx)
OpenSSL.BN_free(priv_key)
OpenSSL.EC_KEY_free(k)
return mb.raw
#This is one of several classes that constitute the API
#This class was written by Vaibhav Bhatia. Modified by Jonathan Warren (Atheros).
#http://code.activestate.com/recipes/501148-xmlrpc-serverclient-which-does-cookie-handling-and/
class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
def do_POST(self):
#Handles the HTTP POST request.
#Attempts to interpret all HTTP POST requests as XML-RPC calls,
#which are forwarded to the server's _dispatch method for handling.
#Note: this method is the same as in SimpleXMLRPCRequestHandler,
#just hacked to handle cookies
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
# HACK :start -> sends cookies here
if self.cookies:
for cookie in self.cookies:
self.send_header('Set-Cookie',cookie.output(header=''))
# HACK :end
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def APIAuthenticateClient(self):
if self.headers.has_key('Authorization'):
# handle Basic authentication
(enctype, encstr) = self.headers.get('Authorization').split()
(emailid, password) = encstr.decode('base64').split(':')
if emailid == config.get('bitmessagesettings', 'apiusername') and password == config.get('bitmessagesettings', 'apipassword'):
return True
else:
return False
else:
print 'Authentication failed because header lacks Authentication field'
time.sleep(2)
return False
return False
def _dispatch(self, method, params):
self.cookies = []
validuser = self.APIAuthenticateClient()
if not validuser:
time.sleep(2)
return "RPC Username or password incorrect or HTTP header lacks authentication at all."
# handle request
if method == 'helloWorld':
(a,b) = params
return a+'-'+b
elif method == 'add':
(a,b) = params
return a+b
elif method == 'statusBar':
message, = params
apiSignalQueue.put(('updateStatusBar',message))
elif method == 'listAddresses':
data = '{"addresses":['
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
data
if len(data) > 20:
data += ','
data += json.dumps({'label':config.get(addressInKeysFile,'label'),'address':addressInKeysFile,'stream':streamNumber,'enabled':config.getboolean(addressInKeysFile,'enabled')},indent=4, separators=(',', ': '))
data += ']}'
return data
elif method == 'createRandomAddress':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
elif len(params) == 1:
label, = params
eighteenByteRipe = False
elif len(params) == 2:
label, eighteenByteRipe = params
label = label.decode('base64')
apiAddressGeneratorReturnQueue.queue.clear()
apiSignalQueue.put(('createRandomAddress',(label, eighteenByteRipe))) #params should be a twopul which equals (eighteenByteRipe, label)
return apiAddressGeneratorReturnQueue.get()
elif method == 'createDeterministicAddresses':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
elif len(params) == 1:
passphrase, = params
numberOfAddresses = 1
addressVersionNumber = 0
streamNumber = 0
eighteenByteRipe = False
elif len(params) == 2:
passphrase, numberOfAddresses = params
addressVersionNumber = 0
streamNumber = 0
eighteenByteRipe = False
elif len(params) == 3:
passphrase, numberOfAddresses, addressVersionNumber = params
streamNumber = 0
eighteenByteRipe = False
elif len(params) == 4:
passphrase, numberOfAddresses, addressVersionNumber, streamNumber = params
eighteenByteRipe = False
elif len(params) == 5:
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe = params
if len(passphrase) == 0:
return 'API Error 0001: the specified passphrase is blank.'
passphrase = passphrase.decode('base64')
if addressVersionNumber == 0: #0 means "just use the proper addressVersionNumber"
addressVersionNumber == 2
if addressVersionNumber != 2:
return 'API Error 0002: the address version number currently must be 2 (or 0 which means auto-select). Others aren\'t supported.'
if streamNumber == 0: #0 means "just use the most available stream"
streamNumber = 1
if streamNumber != 1:
return 'API Error 0003: the stream number must be 1 (or 0 which means auto-select). Others aren\'t supported.'
if numberOfAddresses == 0:
return 'API Error 0004: Why would you ask me to generate 0 addresses for you?'
if numberOfAddresses > 9999:
return 'API Error 0005: You have (accidentially?) specified too many addresses to make. Maximum 9999. This check only exists to prevent mischief; if you really want to create more addresses than this, contact the Bitmessage developers and we can modify the check or you can do it yourself by searching the source code for this message.'
apiAddressGeneratorReturnQueue.queue.clear()
print 'about to send numberOfAddresses', numberOfAddresses
apiSignalQueue.put(('createDeterministicAddresses',(passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe)))
data = '{"addresses":['
queueReturn = apiAddressGeneratorReturnQueue.get()
for item in queueReturn:
if len(data) > 20:
data += ','
data += "\""+item+ "\""
data += ']}'
return data
elif method == 'getAllInboxMessages':
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT msgid, toaddress, fromaddress, subject, received, message FROM inbox where folder='inbox' ORDER BY received''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
sqlLock.release()
data = '{"inboxMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, = row
if len(data) > 25:
data += ','
data += json.dumps({'msgid':msgid.encode('hex'),'toAddress':toAddress,'fromAddress':fromAddress,'subject':subject.encode('base64'),'message':message.encode('base64'),'encodingType':2,'receivedTime':received},indent=4, separators=(',', ': '))
data += ']}'
return data
elif method == 'trashMessage':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
msgid = params[0].decode('hex')
t = (msgid,)
sqlLock.acquire()
sqlSubmitQueue.put('''UPDATE inbox SET folder='trash' WHERE msgid=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
apiSignalQueue.put(('updateStatusBar','Per API: Trashed message (assuming message existed). UI not updated.'))
return 'Trashed message (assuming message existed). UI not updated. To double check, run getAllInboxMessages to see that the message disappeared, or restart Bitmessage and look in the normal Bitmessage GUI.'
elif method == 'sendMessage':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
elif len(params) == 4:
toAddress, fromAddress, subject, message = params
encodingType = 2
elif len(params) == 5:
toAddress, fromAddress, subject, message, encodingType = params
if encodingType != 2:
return 'API Error 0006: The encoding type must be 2 because that is the only one this program currently supports.'
subject = subject.decode('base64')
message = message.decode('base64')
status,addressVersionNumber,streamNumber,toRipe = decodeAddress(toAddress)
if status <> 'success':
printLock.acquire()
print 'API Error 0007: Could not decode address:', toAddress, ':', status
printLock.release()
if status == 'checksumfailed':
return 'API Error 0008: Checksum failed for address: ' + toAddress
if status == 'invalidcharacters':
return 'API Error 0009: Invalid characters in address: '+ toAddress
if status == 'versiontoohigh':
return 'API Error 0010: Address version number too high (or zero) in address: ' + toAddress
if addressVersionNumber != 2:
return 'API Error 0011: the address version number currently must be 2. Others aren\'t supported. Check the toAddress.'
if streamNumber != 1:
return 'API Error 0012: the stream number must be 1. Others aren\'t supported. Check the toAddress.'
status,addressVersionNumber,streamNumber,fromRipe = decodeAddress(fromAddress)
if status <> 'success':
printLock.acquire()
print 'API Error 0007: Could not decode address:', fromAddress, ':', status
printLock.release()
if status == 'checksumfailed':
return 'API Error 0008: Checksum failed for address: ' + fromAddress
if status == 'invalidcharacters':
return 'API Error 0009: Invalid characters in address: '+ fromAddress
if status == 'versiontoohigh':
return 'API Error 0010: Address version number too high (or zero) in address: ' + fromAddress
if addressVersionNumber != 2:
return 'API Error 0011: the address version number currently must be 2. Others aren\'t supported. Check the fromAddress.'
if streamNumber != 1:
return 'API Error 0012: the stream number must be 1. Others aren\'t supported. Check the fromAddress.'
toAddress = addBMIfNotPresent(toAddress)
fromAddress = addBMIfNotPresent(fromAddress)
try:
fromAddressEnabled = config.getboolean(fromAddress,'enabled')
except:
return 'API Error 0013: could not find your fromAddress in the keys.dat file.'
if not fromAddressEnabled:
return 'API Error 0014: your fromAddress is disabled. Cannot send.'
ackdata = OpenSSL.rand(32)
sqlLock.acquire()
t = ('',toAddress,toRipe,fromAddress,subject,message,ackdata,int(time.time()),'findingpubkey',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
toLabel = ''
t = (toAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
apiSignalQueue.put(('displayNewSentMessage',(toAddress,toLabel,fromAddress,subject,message,ackdata)))
workerQueue.put(('sendmessage',toAddress))
return ackdata.encode('hex')
elif method == 'sendBroadcast':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
if len(params) == 3:
fromAddress, subject, message = params
encodingType = 2
elif len(params) == 4:
fromAddress, subject, message, encodingType = params
if encodingType != 2:
return 'API Error 0006: The encoding type must be 2 because that is the only one this program currently supports.'
subject = subject.decode('base64')
message = message.decode('base64')
status,addressVersionNumber,streamNumber,fromRipe = decodeAddress(fromAddress)
if status <> 'success':
printLock.acquire()
print 'API Error 0007: Could not decode address:', fromAddress, ':', status
printLock.release()
if status == 'checksumfailed':
return 'API Error 0008: Checksum failed for address: ' + fromAddress
if status == 'invalidcharacters':
return 'API Error 0009: Invalid characters in address: '+ fromAddress
if status == 'versiontoohigh':
return 'API Error 0010: Address version number too high (or zero) in address: ' + fromAddress
if addressVersionNumber != 2:
return 'API Error 0011: the address version number currently must be 2. Others aren\'t supported. Check the fromAddress.'
if streamNumber != 1:
return 'API Error 0012: the stream number must be 1. Others aren\'t supported. Check the fromAddress.'
fromAddress = addBMIfNotPresent(fromAddress)
try:
fromAddressEnabled = config.getboolean(fromAddress,'enabled')
except:
return 'API Error 0013: could not find your fromAddress in the keys.dat file.'
ackdata = OpenSSL.rand(32)
toAddress = '[Broadcast subscribers]'
ripe = ''
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'broadcastpending',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
toLabel = '[Broadcast subscribers]'
apiSignalQueue.put(('displayNewSentMessage',(toAddress,toLabel,fromAddress,subject,message,ackdata)))
workerQueue.put(('sendbroadcast',(fromAddress,subject,message)))
return ackdata.encode('hex')
else:
return 'Invalid Method: %s'%method
#This thread, of which there is only one, runs the API.
class singleAPI(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
se = SimpleXMLRPCServer((config.get('bitmessagesettings', 'apiinterface'),config.getint('bitmessagesettings', 'apiport')), MySimpleXMLRPCRequestHandler, True, True)
se.register_introspection_functions()
se.serve_forever()
#The MySimpleXMLRPCRequestHandler class cannot emit signals (or at least I don't know how) because it is not a QT thread. It therefore puts data in a queue which this thread monitors and emits the signals on its behalf.
class singleAPISignalHandler(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
while True:
command, data = apiSignalQueue.get()
if command == 'updateStatusBar':
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),data)
elif command == 'createRandomAddress':
label, eighteenByteRipe = data
streamNumberForAddress = 1
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,label,1,"",eighteenByteRipe)
self.emit(SIGNAL("passAddressGeneratorObjectThrough(PyQt_PyObject)"),self.addressGenerator)
self.addressGenerator.start()
elif command == 'createDeterministicAddresses':
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe = data
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(addressVersionNumber,streamNumber,'unused API address',numberOfAddresses,passphrase,eighteenByteRipe)
self.emit(SIGNAL("passAddressGeneratorObjectThrough(PyQt_PyObject)"),self.addressGenerator)
self.addressGenerator.start()
elif command == 'displayNewSentMessage':
toAddress,toLabel,fromAddress,subject,message,ackdata = data
self.emit(SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),toAddress,toLabel,fromAddress,subject,message,ackdata)
class iconGlossaryDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_iconGlossaryDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelPortNumber.setText('You are using TCP port ' + str(config.getint('bitmessagesettings', 'port')) + '. (This can be changed in the settings).')
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class helpDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_helpDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelHelpURI.setOpenExternalLinks(True)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class aboutDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_aboutDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelVersion.setText('version ' + softwareVersion)
class regenerateAddressesDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_regenerateAddressesDialog()
self.ui.setupUi(self)
self.parent = parent
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class settingsDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_settingsDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.checkBoxStartOnLogon.setChecked(config.getboolean('bitmessagesettings', 'startonlogon'))
self.ui.checkBoxMinimizeToTray.setChecked(config.getboolean('bitmessagesettings', 'minimizetotray'))
self.ui.checkBoxShowTrayNotifications.setChecked(config.getboolean('bitmessagesettings', 'showtraynotifications'))
self.ui.checkBoxStartInTray.setChecked(config.getboolean('bitmessagesettings', 'startintray'))
if appdata == '':
self.ui.checkBoxPortableMode.setChecked(True)
if 'darwin' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxShowTrayNotifications.setDisabled(True)
self.ui.checkBoxStartInTray.setDisabled(True)
self.ui.labelSettingsNote.setText('Options have been disabled because they either arn\'t applicable or because they haven\'t yet been implimented for your operating system.')
elif 'linux' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxStartInTray.setDisabled(True)
self.ui.labelSettingsNote.setText('Options have been disabled because they either arn\'t applicable or because they haven\'t yet been implimented for your operating system.')
#On the Network settings tab:
self.ui.lineEditTCPPort.setText(str(config.get('bitmessagesettings', 'port')))
self.ui.checkBoxAuthentication.setChecked(config.getboolean('bitmessagesettings', 'socksauthentication'))
if str(config.get('bitmessagesettings', 'socksproxytype')) == 'none':
self.ui.comboBoxProxyType.setCurrentIndex(0)
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
elif str(config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS4a':
self.ui.comboBoxProxyType.setCurrentIndex(1)
self.ui.lineEditTCPPort.setEnabled(False)
elif str(config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS5':
self.ui.comboBoxProxyType.setCurrentIndex(2)
self.ui.lineEditTCPPort.setEnabled(False)
self.ui.lineEditSocksHostname.setText(str(config.get('bitmessagesettings', 'sockshostname')))
self.ui.lineEditSocksPort.setText(str(config.get('bitmessagesettings', 'socksport')))
self.ui.lineEditSocksUsername.setText(str(config.get('bitmessagesettings', 'socksusername')))
self.ui.lineEditSocksPassword.setText(str(config.get('bitmessagesettings', 'sockspassword')))
QtCore.QObject.connect(self.ui.comboBoxProxyType, QtCore.SIGNAL("currentIndexChanged(int)"), self.comboBoxProxyTypeChanged)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
def comboBoxProxyTypeChanged(self,comboBoxIndex):
if comboBoxIndex == 0:
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
self.ui.lineEditTCPPort.setEnabled(True)
elif comboBoxIndex == 1 or comboBoxIndex == 2:
self.ui.lineEditSocksHostname.setEnabled(True)
self.ui.lineEditSocksPort.setEnabled(True)
self.ui.checkBoxAuthentication.setEnabled(True)
if self.ui.checkBoxAuthentication.isChecked():
self.ui.lineEditSocksUsername.setEnabled(True)
self.ui.lineEditSocksPassword.setEnabled(True)
self.ui.lineEditTCPPort.setEnabled(False)
class SpecialAddressBehaviorDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_SpecialAddressBehaviorDialog()
self.ui.setupUi(self)
self.parent = parent
currentRow = parent.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(parent.ui.tableWidgetYourIdentities.item(currentRow,1).text())
if safeConfigGetBoolean(addressAtCurrentRow,'mailinglist'):
self.ui.radioButtonBehaviorMailingList.click()
else:
self.ui.radioButtonBehaveNormalAddress.click()
try:
mailingListName = config.get(addressAtCurrentRow, 'mailinglistname')
except:
mailingListName = ''
self.ui.lineEditMailingListName.setText(unicode(mailingListName,'utf-8'))
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class NewSubscriptionDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewSubscriptionDialog()
self.ui.setupUi(self)
self.parent = parent
QtCore.QObject.connect(self.ui.lineEditSubscriptionAddress, QtCore.SIGNAL("textChanged(QString)"), self.subscriptionAddressChanged)
def subscriptionAddressChanged(self,QString):
status,a,b,c = decodeAddress(str(QString))
if status == 'missingbm':
self.ui.labelSubscriptionAddressCheck.setText('The address should start with ''BM-''')
elif status == 'checksumfailed':
self.ui.labelSubscriptionAddressCheck.setText('The address is not typed or copied correctly (the checksum failed).')
elif status == 'versiontoohigh':
self.ui.labelSubscriptionAddressCheck.setText('The version number of this address is higher than this software can support. Please upgrade Bitmessage.')
elif status == 'invalidcharacters':
self.ui.labelSubscriptionAddressCheck.setText('The address contains invalid characters.')
elif status == 'success':
self.ui.labelSubscriptionAddressCheck.setText('Address is valid.')
class NewAddressDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewAddressDialog()
self.ui.setupUi(self)
self.parent = parent
row = 1
#Let's fill out the 'existing address' combo box with addresses from the 'Your Identities' tab.
while self.parent.ui.tableWidgetYourIdentities.item(row-1,1):
self.ui.radioButtonExisting.click()
#print self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text()
self.ui.comboBoxExisting.addItem(self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text())
row += 1
self.ui.groupBoxDeterministic.setHidden(True)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class MyForm(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#Ask the user if we may delete their old version 1 addresses if they have any.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 1:
displayMsg = "One of your addresses, "+addressInKeysFile+", is an old version 1 address. Version 1 addresses are no longer supported. May we delete it now?"
reply = QtGui.QMessageBox.question(self, 'Message',displayMsg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
config.remove_section(addressInKeysFile)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#Configure Bitmessage to start on startup (or remove the configuration) based on the setting in the keys.dat file
if 'win32' in sys.platform or 'win64' in sys.platform:
#Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
self.settings.remove("PyBitmessage") #In case the user moves the program and the registry entry is no longer valid, this will delete the old registry entry.
if config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage",sys.argv[0])
elif 'darwin' in sys.platform:
#startup for mac
pass
elif 'linux' in sys.platform:
#startup for linux
pass
self.trayIcon = QtGui.QSystemTrayIcon(self)
self.trayIcon.setIcon( QtGui.QIcon(':/newPrefix/images/can-icon-16px.png') )
traySignal = "activated(QSystemTrayIcon::ActivationReason)"
QtCore.QObject.connect(self.trayIcon, QtCore.SIGNAL(traySignal), self.__icon_activated)
menu = QtGui.QMenu()
self.exitAction = menu.addAction("Exit", self.close)
self.trayIcon.setContextMenu(menu)
#I'm currently under the impression that Mac users have different expectations for the tray icon. They don't necessairly expect it to open the main window when clicked and they still expect a program showing a tray icon to also be in the dock.
if 'darwin' in sys.platform:
self.trayIcon.show()
#FILE MENU and other buttons
QtCore.QObject.connect(self.ui.actionExit, QtCore.SIGNAL("triggered()"), self.close)
QtCore.QObject.connect(self.ui.actionManageKeys, QtCore.SIGNAL("triggered()"), self.click_actionManageKeys)
QtCore.QObject.connect(self.ui.actionRegenerateDeterministicAddresses, QtCore.SIGNAL("triggered()"), self.click_actionRegenerateDeterministicAddresses)
QtCore.QObject.connect(self.ui.pushButtonNewAddress, QtCore.SIGNAL("clicked()"), self.click_NewAddressDialog)
QtCore.QObject.connect(self.ui.comboBoxSendFrom, QtCore.SIGNAL("activated(int)"),self.redrawLabelFrom)
QtCore.QObject.connect(self.ui.pushButtonAddAddressBook, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddAddressBook)
QtCore.QObject.connect(self.ui.pushButtonAddSubscription, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddSubscription)
QtCore.QObject.connect(self.ui.pushButtonAddBlacklist, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddBlacklist)
QtCore.QObject.connect(self.ui.pushButtonSend, QtCore.SIGNAL("clicked()"), self.click_pushButtonSend)
QtCore.QObject.connect(self.ui.pushButtonLoadFromAddressBook, QtCore.SIGNAL("clicked()"), self.click_pushButtonLoadFromAddressBook)
QtCore.QObject.connect(self.ui.radioButtonBlacklist, QtCore.SIGNAL("clicked()"), self.click_radioButtonBlacklist)
QtCore.QObject.connect(self.ui.radioButtonWhitelist, QtCore.SIGNAL("clicked()"), self.click_radioButtonWhitelist)
QtCore.QObject.connect(self.ui.pushButtonStatusIcon, QtCore.SIGNAL("clicked()"), self.click_pushButtonStatusIcon)
QtCore.QObject.connect(self.ui.actionSettings, QtCore.SIGNAL("triggered()"), self.click_actionSettings)
QtCore.QObject.connect(self.ui.actionAbout, QtCore.SIGNAL("triggered()"), self.click_actionAbout)
QtCore.QObject.connect(self.ui.actionHelp, QtCore.SIGNAL("triggered()"), self.click_actionHelp)
#Popup menu for the Inbox tab
self.ui.inboxContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionReply = self.ui.inboxContextMenuToolbar.addAction("Reply", self.on_action_InboxReply)
self.actionAddSenderToAddressBook = self.ui.inboxContextMenuToolbar.addAction("Add sender to your Address Book", self.on_action_InboxAddSenderToAddressBook)
self.actionTrashInboxMessage = self.ui.inboxContextMenuToolbar.addAction("Move to Trash", self.on_action_InboxTrash)
self.ui.tableWidgetInbox.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuInbox)
self.popMenuInbox = QtGui.QMenu( self )
self.popMenuInbox.addAction( self.actionReply )
self.popMenuInbox.addAction( self.actionAddSenderToAddressBook )
self.popMenuInbox.addSeparator()
self.popMenuInbox.addAction( self.actionTrashInboxMessage )
#Popup menu for the Your Identities tab
self.ui.addressContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionNew = self.ui.addressContextMenuToolbar.addAction("New", self.on_action_YourIdentitiesNew)
self.actionEnable = self.ui.addressContextMenuToolbar.addAction("Enable", self.on_action_YourIdentitiesEnable)
self.actionDisable = self.ui.addressContextMenuToolbar.addAction("Disable", self.on_action_YourIdentitiesDisable)
self.actionClipboard = self.ui.addressContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_YourIdentitiesClipboard)
self.actionSpecialAddressBehavior = self.ui.addressContextMenuToolbar.addAction("Special address behavior...", self.on_action_SpecialAddressBehaviorDialog)
self.ui.tableWidgetYourIdentities.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuYourIdentities)
self.popMenu = QtGui.QMenu( self )
self.popMenu.addAction( self.actionNew )
self.popMenu.addSeparator()
self.popMenu.addAction( self.actionClipboard )
self.popMenu.addSeparator()
self.popMenu.addAction( self.actionEnable )
self.popMenu.addAction( self.actionDisable )
self.popMenu.addAction( self.actionSpecialAddressBehavior )
#Popup menu for the Address Book page
self.ui.addressBookContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionAddressBookSend = self.ui.addressBookContextMenuToolbar.addAction("Send message to this address", self.on_action_AddressBookSend)
self.actionAddressBookClipboard = self.ui.addressBookContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_AddressBookClipboard)
self.actionAddressBookNew = self.ui.addressBookContextMenuToolbar.addAction("Add New Address", self.on_action_AddressBookNew)
self.actionAddressBookDelete = self.ui.addressBookContextMenuToolbar.addAction("Delete", self.on_action_AddressBookDelete)
self.ui.tableWidgetAddressBook.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuAddressBook)
self.popMenuAddressBook = QtGui.QMenu( self )
self.popMenuAddressBook.addAction( self.actionAddressBookSend )
self.popMenuAddressBook.addAction( self.actionAddressBookClipboard )
self.popMenuAddressBook.addSeparator()
self.popMenuAddressBook.addAction( self.actionAddressBookNew )
self.popMenuAddressBook.addAction( self.actionAddressBookDelete )
#Popup menu for the Subscriptions page
self.ui.subscriptionsContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionsubscriptionsNew = self.ui.subscriptionsContextMenuToolbar.addAction("New", self.on_action_SubscriptionsNew)
self.actionsubscriptionsDelete = self.ui.subscriptionsContextMenuToolbar.addAction("Delete", self.on_action_SubscriptionsDelete)
self.actionsubscriptionsClipboard = self.ui.subscriptionsContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_SubscriptionsClipboard)
self.ui.tableWidgetSubscriptions.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuSubscriptions)
self.popMenuSubscriptions = QtGui.QMenu( self )
self.popMenuSubscriptions.addAction( self.actionsubscriptionsNew )
self.popMenuSubscriptions.addAction( self.actionsubscriptionsDelete )
self.popMenuSubscriptions.addSeparator()
self.popMenuSubscriptions.addAction( self.actionsubscriptionsClipboard )
#Popup menu for the Sent page
self.ui.sentContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionTrashSentMessage = self.ui.sentContextMenuToolbar.addAction("Move to Trash", self.on_action_SentTrash)
self.actionSentClipboard = self.ui.sentContextMenuToolbar.addAction("Copy destination address to clipboard", self.on_action_SentClipboard)
self.ui.tableWidgetSent.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetSent, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuSent)
self.popMenuSent = QtGui.QMenu( self )
self.popMenuSent.addAction( self.actionSentClipboard )
self.popMenuSent.addAction( self.actionTrashSentMessage )
#Popup menu for the Blacklist page
self.ui.blacklistContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionBlacklistNew = self.ui.blacklistContextMenuToolbar.addAction("Add new entry", self.on_action_BlacklistNew)
self.actionBlacklistDelete = self.ui.blacklistContextMenuToolbar.addAction("Delete", self.on_action_BlacklistDelete)
self.actionBlacklistClipboard = self.ui.blacklistContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_BlacklistClipboard)
self.actionBlacklistEnable = self.ui.blacklistContextMenuToolbar.addAction("Enable", self.on_action_BlacklistEnable)
self.actionBlacklistDisable = self.ui.blacklistContextMenuToolbar.addAction("Disable", self.on_action_BlacklistDisable)
self.ui.tableWidgetBlacklist.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetBlacklist, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuBlacklist)
self.popMenuBlacklist = QtGui.QMenu( self )
#self.popMenuBlacklist.addAction( self.actionBlacklistNew )
self.popMenuBlacklist.addAction( self.actionBlacklistDelete )
self.popMenuBlacklist.addSeparator()
self.popMenuBlacklist.addAction( self.actionBlacklistClipboard )
self.popMenuBlacklist.addSeparator()
self.popMenuBlacklist.addAction( self.actionBlacklistEnable )
self.popMenuBlacklist.addAction( self.actionBlacklistDisable )
#Initialize the user's list of addresses on the 'Your Identities' tab.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
newItem = QtGui.QTableWidgetItem(unicode(config.get(addressInKeysFile, 'label'),'utf-8)'))
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.insertRow(0)
self.ui.tableWidgetYourIdentities.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(addressInKeysFile)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
if safeConfigGetBoolean(addressInKeysFile,'mailinglist'):
newItem.setTextColor(QtGui.QColor(137,04,177))#magenta
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(str(addressStream(addressInKeysFile)))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
if isEnabled:
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
self.sqlLookup = sqlThread()
self.sqlLookup.start()
reloadMyAddressHashes()
self.reloadBroadcastSendersForWhichImWatching()
#Load inbox from messages database file
sqlSubmitQueue.put('''SELECT msgid, toaddress, fromaddress, subject, received, message FROM inbox where folder='inbox' ORDER BY received''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, = row
try:
if toAddress == '[Broadcast subscribers]':
toLabel = '[Broadcast subscribers]'
else:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
fromLabel = ''
t = (fromAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
newItem.setData(Qt.UserRole,str(toAddress))
if safeConfigGetBoolean(toAddress,'mailinglist'):
newItem.setTextColor(QtGui.QColor(137,04,177))
self.ui.tableWidgetInbox.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetInbox.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetInbox.setItem(0,2,newItem)
newItem = myTableWidgetItem(strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(received))))
newItem.setData(Qt.UserRole,QByteArray(msgid))
newItem.setData(33,int(received))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetInbox.setItem(0,3,newItem)
#self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(0,2).data(Qt.UserRole).toPyObject())
#Load Sent items from database
sqlSubmitQueue.put('''SELECT toaddress, fromaddress, subject, message, status, ackdata, lastactiontime FROM sent where folder = 'sent' ORDER BY lastactiontime''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
toAddress, fromAddress, subject, message, status, ackdata, lastactiontime = row
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = ''
t = (toAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,2,newItem)
if status == 'findingpubkey':
newItem = myTableWidgetItem('Waiting on their public key. Will request it again soon.')
elif status == 'sentmessage':
newItem = myTableWidgetItem('Message sent. Waiting on acknowledgement. Sent at ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(lastactiontime)))
elif status == 'doingpow':
newItem = myTableWidgetItem('Need to do work to send message. Work is queued.')
elif status == 'ackreceived':
newItem = myTableWidgetItem('Acknowledgement of the message received ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
elif status == 'broadcastpending':
newItem = myTableWidgetItem('Doing the work necessary to send broadcast...')
elif status == 'broadcastsent':
newItem = myTableWidgetItem('Broadcast on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
else:
newItem = myTableWidgetItem('Unknown status. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(lastactiontime))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,3,newItem)
#Initialize the address book
sqlSubmitQueue.put('SELECT * FROM addressbook')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
#Initialize the Subscriptions
sqlSubmitQueue.put('SELECT label, address FROM subscriptions')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
#Initialize the Blacklist or Whitelist
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
self.loadBlackWhiteList()
else:
self.ui.tabWidget.setTabText(6,'Whitelist')
self.ui.radioButtonWhitelist.click()
self.loadBlackWhiteList()
#Initialize the ackdataForWhichImWatching data structure using data from the sql database.
sqlSubmitQueue.put('''SELECT ackdata FROM sent where (status='sentmessage' OR status='doingpow')''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
ackdata, = row
print 'Watching for ackdata', ackdata.encode('hex')
ackdataForWhichImWatching[ackdata] = 0
QtCore.QObject.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetYourIdentitiesItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetAddressBookItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetSubscriptionsItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL("itemSelectionChanged ()"), self.tableWidgetInboxItemClicked)
QtCore.QObject.connect(self.ui.tableWidgetSent, QtCore.SIGNAL("itemSelectionChanged ()"), self.tableWidgetSentItemClicked)
#Put the colored icon on the status bar
#self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
self.statusbar = self.statusBar()
self.statusbar.insertPermanentWidget(0,self.ui.pushButtonStatusIcon)
self.ui.labelStartupTime.setText('Since startup on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
self.numberOfMessagesProcessed = 0
self.numberOfBroadcastsProcessed = 0
self.numberOfPubkeysProcessed = 0
#Below this point, it would be good if all of the necessary global data structures were initialized.
self.rerenderComboBoxSendFrom()
self.listOfOutgoingSynSenderThreads = [] #if we don't maintain this list, the threads will get garbage-collected.
self.connectToStream(1)
self.singleListenerThread = singleListener()
self.singleListenerThread.start()
QtCore.QObject.connect(self.singleListenerThread, QtCore.SIGNAL("passObjectThrough(PyQt_PyObject)"), self.connectObjectToSignals)
self.singleCleanerThread = singleCleaner()
self.singleCleanerThread.start()
QtCore.QObject.connect(self.singleCleanerThread, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.singleCleanerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.workerThread = singleWorker()
self.workerThread.start()
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
if safeConfigGetBoolean('bitmessagesettings','apienabled'):
try:
apiNotifyPath = config.get('bitmessagesettings','apinotifypath')
except:
apiNotifyPath = ''
if apiNotifyPath != '':
printLock.acquire()
print 'Trying to call', apiNotifyPath
printLock.release()
call([apiNotifyPath, "startingUp"])
self.singleAPIThread = singleAPI()
self.singleAPIThread.start()
self.singleAPISignalHandlerThread = singleAPISignalHandler()
self.singleAPISignalHandlerThread.start()
QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("passAddressGeneratorObjectThrough(PyQt_PyObject)"), self.connectObjectToAddressGeneratorSignals)
QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewSentMessage)
def click_actionManageKeys(self):
if 'darwin' in sys.platform or 'linux' in sys.platform:
if appdata == '':
reply = QtGui.QMessageBox.information(self, 'keys.dat?','You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file.', QMessageBox.Ok)
else:
QtGui.QMessageBox.information(self, 'keys.dat?','You may manage your keys by editing the keys.dat file stored in\n' + appdata + '\nIt is important that you back up this file.', QMessageBox.Ok)
elif sys.platform == 'win32' or sys.platform == 'win64':
if appdata == '':
reply = QtGui.QMessageBox.question(self, 'Open keys.dat?','You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
else:
reply = QtGui.QMessageBox.question(self, 'Open keys.dat?','You may manage your keys by editing the keys.dat file stored in\n' + appdata + '\nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.openKeysFile()
def click_actionRegenerateDeterministicAddresses(self):
self.regenerateAddressesDialogInstance = regenerateAddressesDialog(self)
if self.regenerateAddressesDialogInstance.exec_():
if self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, "bad passphrase", "You must type your passphrase. If you don\'t have one then this is not the form for you.")
else:
streamNumberForAddress = int(self.regenerateAddressesDialogInstance.ui.lineEditStreamNumber.text())
addressVersionNumber = int(self.regenerateAddressesDialogInstance.ui.lineEditAddressVersionNumber.text())
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(addressVersionNumber,streamNumberForAddress,"unused address",self.regenerateAddressesDialogInstance.ui.spinBoxNumberOfAddressesToMake.value(),self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text().toUtf8(),self.regenerateAddressesDialogInstance.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
self.ui.tabWidget.setCurrentIndex(3)
def openKeysFile(self):
if 'linux' in sys.platform:
subprocess.call(["xdg-open", appdata + 'keys.dat'])
else:
os.startfile(appdata + 'keys.dat')
def changeEvent(self, event):
if config.getboolean('bitmessagesettings', 'minimizetotray') and not 'darwin' in sys.platform:
if event.type() == QtCore.QEvent.WindowStateChange:
if self.windowState() & QtCore.Qt.WindowMinimized:
self.hide()
self.trayIcon.show()
#self.hidden = True
if 'win32' in sys.platform or 'win64' in sys.platform:
self.setWindowFlags(Qt.ToolTip)
elif event.oldState() & QtCore.Qt.WindowMinimized:
#The window state has just been changed to Normal/Maximised/FullScreen
pass
#QtGui.QWidget.changeEvent(self, event)
def __icon_activated(self, reason):
if reason == QtGui.QSystemTrayIcon.Trigger:
if 'linux' in sys.platform:
self.trayIcon.hide()
self.setWindowFlags(Qt.Window)
self.show()
elif 'win32' in sys.platform or 'win64' in sys.platform:
self.trayIcon.hide()
self.setWindowFlags(Qt.Window)
self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
elif 'darwin' in sys.platform:
#self.trayIcon.hide() #this line causes a segmentation fault
#self.setWindowFlags(Qt.Window)
#self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
def incrementNumberOfMessagesProcessed(self):
self.numberOfMessagesProcessed += 1
self.ui.labelMessageCount.setText('Processed ' + str(self.numberOfMessagesProcessed) + ' person-to-person messages.')
def incrementNumberOfBroadcastsProcessed(self):
self.numberOfBroadcastsProcessed += 1
self.ui.labelBroadcastCount.setText('Processed ' + str(self.numberOfBroadcastsProcessed) + ' broadcast messages.')
def incrementNumberOfPubkeysProcessed(self):
self.numberOfPubkeysProcessed += 1
self.ui.labelPubkeyCount.setText('Processed ' + str(self.numberOfPubkeysProcessed) + ' public keys.')
def updateNetworkStatusTab(self,streamNumber,connectionCount):
global statusIconColor
#print 'updating network status tab'
totalNumberOfConnectionsFromAllStreams = 0 #One would think we could use len(sendDataQueues) for this, but sendData threads don't remove themselves from sendDataQueues fast enough for len(sendDataQueues) to be accurate here.
for currentRow in range(self.ui.tableWidgetConnectionCount.rowCount()):
rowStreamNumber = int(self.ui.tableWidgetConnectionCount.item(currentRow,0).text())
if streamNumber == rowStreamNumber:
self.ui.tableWidgetConnectionCount.item(currentRow,1).setText(str(connectionCount))
totalNumberOfConnectionsFromAllStreams += connectionCount
self.ui.labelTotalConnections.setText('Total Connections: ' + str(totalNumberOfConnectionsFromAllStreams))
if totalNumberOfConnectionsFromAllStreams > 0 and statusIconColor == 'red': #FYI: The 'singlelistener' thread sets the icon color to green when it receives an incoming connection, meaning that the user's firewall is configured correctly.
self.setStatusIcon('yellow')
elif totalNumberOfConnectionsFromAllStreams == 0:
self.setStatusIcon('red')
def setStatusIcon(self,color):
global statusIconColor
#print 'setting status icon color'
if color == 'red':
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/redicon.png"))
statusIconColor = 'red'
if color == 'yellow':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
statusIconColor = 'yellow'
if color == 'green':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/greenicon.png"))
statusIconColor = 'green'
def updateSentItemStatusByHash(self,toRipe,textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if ripe == toRipe:
self.ui.tableWidgetSent.item(i,3).setText(unicode(textToDisplay,'utf-8'))
def updateSentItemStatusByAckdata(self,ackdata,textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
tableAckdata = self.ui.tableWidgetSent.item(i,3).data(Qt.UserRole).toPyObject()
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if ackdata == tableAckdata:
self.ui.tableWidgetSent.item(i,3).setText(unicode(textToDisplay,'utf-8'))
def rerenderInboxFromLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
addressToLookup = str(self.ui.tableWidgetInbox.item(i,1).data(Qt.UserRole).toPyObject())
fromLabel = ''
t = (addressToLookup,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.item(i,1).setText(unicode(fromLabel,'utf-8'))
else:
#It might be a broadcast message. We should check for that label.
sqlLock.acquire()
sqlSubmitQueue.put('''select label from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.item(i,1).setText(unicode(fromLabel,'utf-8'))
def rerenderInboxToLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
toAddress = str(self.ui.tableWidgetInbox.item(i,0).data(Qt.UserRole).toPyObject())
try:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
self.ui.tableWidgetInbox.item(i,0).setText(unicode(toLabel,'utf-8'))
#Set the color according to whether it is the address of a mailing list or not.
if safeConfigGetBoolean(toAddress,'mailinglist'):
self.ui.tableWidgetInbox.item(i,0).setTextColor(QtGui.QColor(137,04,177))
else:
self.ui.tableWidgetInbox.item(i,0).setTextColor(QtGui.QColor(0,0,0))
def rerenderSentFromLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
fromAddress = str(self.ui.tableWidgetSent.item(i,1).data(Qt.UserRole).toPyObject())
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
self.ui.tableWidgetSent.item(i,1).setText(unicode(fromLabel,'utf-8'))
def rerenderSentToLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
addressToLookup = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
toLabel = ''
t = (addressToLookup,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.item(i,0).setText(unicode(toLabel,'utf-8'))
def click_pushButtonSend(self):
self.statusBar().showMessage('')
toAddresses = str(self.ui.lineEditTo.text())
fromAddress = str(self.ui.labelFrom.text())
subject = str(self.ui.lineEditSubject.text().toUtf8())
message = str(self.ui.textEditMessage.document().toPlainText().toUtf8())
if self.ui.radioButtonSpecific.isChecked(): #To send a message to specific people (rather than broadcast)
toAddressesList = [s.strip() for s in toAddresses.replace(',', ';').split(';')]
toAddressesList = list(set(toAddressesList)) #remove duplicate addresses. If the user has one address with a BM- and the same address without the BM-, this will not catch it. They'll send the message to the person twice.
for toAddress in toAddressesList:
if toAddress <> '':
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if status <> 'success':
printLock.acquire()
print 'Error: Could not decode', toAddress, ':', status
printLock.release()
if status == 'missingbm':
self.statusBar().showMessage('Error: Bitmessage addresses start with BM- Please check ' + toAddress)
if status == 'checksumfailed':
self.statusBar().showMessage('Error: The address ' + toAddress+' is not typed or copied correctly. Please check it.')
if status == 'invalidcharacters':
self.statusBar().showMessage('Error: The address '+ toAddress+ ' contains invalid characters. Please check it.')
if status == 'versiontoohigh':
self.statusBar().showMessage('Error: The address version in '+ toAddress+ ' is too high. Either you need to upgrade your Bitmessage software or your acquaintance is being clever.')
elif fromAddress == '':
self.statusBar().showMessage('Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab.')
else:
toAddress = addBMIfNotPresent(toAddress)
try:
config.get(toAddress, 'enabled')
#The toAddress is one owned by me. We cannot send messages to ourselves without significant changes to the codebase.
QMessageBox.about(self, "Sending to your address", "Error: One of the addresses to which you are sending a message, "+toAddress+", is yours. Unfortunately the Bitmessage client cannot process its own messages. Please try running a second client on a different computer or within a VM.")
continue
except:
pass
if addressVersionNumber > 2 or addressVersionNumber == 0:
QMessageBox.about(self, "Address version number", "Concerning the address "+toAddress+", Bitmessage cannot understand address version numbers of "+str(addressVersionNumber)+". Perhaps upgrade Bitmessage to the latest version.")
continue
if streamNumber > 1 or streamNumber == 0:
QMessageBox.about(self, "Stream number", "Concerning the address "+toAddress+", Bitmessage cannot handle stream numbers of "+str(streamNumber)+". Perhaps upgrade Bitmessage to the latest version.")
continue
self.statusBar().showMessage('')
try:
if connectionsCount[streamNumber] == 0:
self.statusBar().showMessage('Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.')
except:
self.statusBar().showMessage('Warning: The address uses a stream number currently not supported by this Bitmessage version. Perhaps upgrade.')
ackdata = OpenSSL.rand(32)
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'findingpubkey',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
"""try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress"""
toLabel = ''
t = (toAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.displayNewSentMessage(toAddress,toLabel,fromAddress, subject, message, ackdata)
workerQueue.put(('sendmessage',toAddress))
"""self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
newItem = myTableWidgetItem('Just pressed ''send'' '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())"""
self.ui.comboBoxSendFrom.setCurrentIndex(0)
self.ui.labelFrom.setText('')
self.ui.lineEditTo.setText('')
self.ui.lineEditSubject.setText('')
self.ui.textEditMessage.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0,0)
else:
self.statusBar().showMessage('Your \'To\' field is empty.')
else: #User selected 'Broadcast'
if fromAddress == '':
self.statusBar().showMessage('Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab.')
else:
self.statusBar().showMessage('')
#We don't actually need the ackdata for acknowledgement since this is a broadcast message, but we can use it to update the user interface when the POW is done generating.
ackdata = OpenSSL.rand(32)
toAddress = '[Broadcast subscribers]'
ripe = ''
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'broadcastpending',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('sendbroadcast',(fromAddress,subject,message)))
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = '[Broadcast subscribers]'
self.ui.tableWidgetSent.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
#newItem = QtGui.QTableWidgetItem('Doing work necessary to send broadcast...'+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem = myTableWidgetItem('Work is queued.')
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.comboBoxSendFrom.setCurrentIndex(0)
self.ui.labelFrom.setText('')
self.ui.lineEditTo.setText('')
self.ui.lineEditSubject.setText('')
self.ui.textEditMessage.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0,0)
def click_pushButtonLoadFromAddressBook(self):
self.ui.tabWidget.setCurrentIndex(5)
for i in range(4):
time.sleep(0.1)
self.statusBar().showMessage('')
time.sleep(0.1)
self.statusBar().showMessage('Right click an entry in your address book and select \'Send message to this address\'.')
def redrawLabelFrom(self,index):
self.ui.labelFrom.setText(self.ui.comboBoxSendFrom.itemData(index).toPyObject())
def rerenderComboBoxSendFrom(self):
self.ui.comboBoxSendFrom.clear()
self.ui.labelFrom.setText('')
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled') #I realize that this is poor programming practice but I don't care. It's easier for others to read.
if isEnabled:
self.ui.comboBoxSendFrom.insertItem(0,unicode(config.get(addressInKeysFile, 'label'),'utf-8'),addressInKeysFile)
self.ui.comboBoxSendFrom.insertItem(0,'','')
if(self.ui.comboBoxSendFrom.count() == 2):
self.ui.comboBoxSendFrom.setCurrentIndex(1)
self.redrawLabelFrom(self.ui.comboBoxSendFrom.currentIndex())
else:
self.ui.comboBoxSendFrom.setCurrentIndex(0)
def connectToStream(self,streamNumber):
connectionsCount[streamNumber] = 0
#Add a line to the Connection Count table on the Network Status tab with a 'zero' connection count. This will be updated as necessary by another function.
self.ui.tableWidgetConnectionCount.insertRow(0)
newItem = QtGui.QTableWidgetItem(str(streamNumber))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem('0')
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,1,newItem)
a = outgoingSynSender()
self.listOfOutgoingSynSenderThreads.append(a)
QtCore.QObject.connect(a, QtCore.SIGNAL("passObjectThrough(PyQt_PyObject)"), self.connectObjectToSignals)
QtCore.QObject.connect(a, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
a.setup(streamNumber)
a.start()
def connectObjectToSignals(self,object):
QtCore.QObject.connect(object, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
QtCore.QObject.connect(object, QtCore.SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewInboxMessage)
QtCore.QObject.connect(object, QtCore.SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewSentMessage)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"), self.updateNetworkStatusTab)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfMessagesProcessed()"), self.incrementNumberOfMessagesProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfPubkeysProcessed()"), self.incrementNumberOfPubkeysProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfBroadcastsProcessed()"), self.incrementNumberOfBroadcastsProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("setStatusIcon(PyQt_PyObject)"), self.setStatusIcon)
#This function exists because of the API. The API thread starts an address generator thread and must somehow connect the address generator's signals to the QApplication thread. This function is used to connect the slots and signals.
def connectObjectToAddressGeneratorSignals(self,object):
QtCore.QObject.connect(object, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
#This function is called by the processmsg function when that function receives a message to an address that is acting as a pseudo-mailing-list. The message will be broadcast out. This function puts the message on the 'Sent' tab.
def displayNewSentMessage(self,toAddress,toLabel,fromAddress,subject,message,ackdata):
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
#newItem = QtGui.QTableWidgetItem('Doing work necessary to send broadcast...'+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem = myTableWidgetItem('Work is queued. '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())
def displayNewInboxMessage(self,inventoryHash,toAddress,fromAddress,subject,message):
'''print 'test signals displayNewInboxMessage'
print 'toAddress', toAddress
print 'fromAddress', fromAddress
print 'message', message'''
fromLabel = ''
sqlLock.acquire()
t = (fromAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
else:
#There might be a label in the subscriptions table
sqlLock.acquire()
t = (fromAddress,)
sqlSubmitQueue.put('''select label from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
try:
if toAddress == '[Broadcast subscribers]':
toLabel = '[Broadcast subscribers]'
else:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
#msgid, toaddress, fromaddress, subject, received, message = row
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
if safeConfigGetBoolean(str(toAddress),'mailinglist'):
newItem.setTextColor(QtGui.QColor(137,04,177))
self.ui.tableWidgetInbox.insertRow(0)
self.ui.tableWidgetInbox.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
if config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.trayIcon.showMessage('New Message', 'New message from '+ fromAddress, 1, 2000)
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
if config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.trayIcon.showMessage('New Message', 'New message from '+fromLabel, 1, 2000)
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetInbox.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetInbox.setItem(0,2,newItem)
newItem = myTableWidgetItem(strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(inventoryHash))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetInbox.setItem(0,3,newItem)
self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.tableWidgetInbox.setCurrentCell(0,0)
def click_pushButtonAddAddressBook(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),)
sqlSubmitQueue.put('''select * from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
t = (str(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())))
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO addressbook VALUES (?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def click_pushButtonAddSubscription(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),)
sqlSubmitQueue.put('''select * from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
t = (str(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),True)
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO subscriptions VALUES (?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
self.reloadBroadcastSendersForWhichImWatching()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your subsciptions twice. Perhaps rename the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def loadBlackWhiteList(self):
#Initialize the Blacklist or Whitelist table
listType = config.get('bitmessagesettings', 'blackwhitelist')
if listType == 'black':
sqlSubmitQueue.put('''SELECT label, address, enabled FROM blacklist''')
else:
sqlSubmitQueue.put('''SELECT label, address, enabled FROM whitelist''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address, enabled = row
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
if not enabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetBlacklist.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not enabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetBlacklist.setItem(0,1,newItem)
def click_pushButtonStatusIcon(self):
print 'click_pushButtonStatusIcon'
self.iconGlossaryInstance = iconGlossaryDialog(self)
if self.iconGlossaryInstance.exec_():
pass
def click_actionHelp(self):
self.helpDialogInstance = helpDialog(self)
self.helpDialogInstance.exec_()
def click_actionAbout(self):
self.aboutDialogInstance = aboutDialog(self)
self.aboutDialogInstance.exec_()
def click_actionSettings(self):
global statusIconColor
global appdata
self.settingsDialogInstance = settingsDialog(self)
if self.settingsDialogInstance.exec_():
config.set('bitmessagesettings', 'startonlogon', str(self.settingsDialogInstance.ui.checkBoxStartOnLogon.isChecked()))
config.set('bitmessagesettings', 'minimizetotray', str(self.settingsDialogInstance.ui.checkBoxMinimizeToTray.isChecked()))
config.set('bitmessagesettings', 'showtraynotifications', str(self.settingsDialogInstance.ui.checkBoxShowTrayNotifications.isChecked()))
config.set('bitmessagesettings', 'startintray', str(self.settingsDialogInstance.ui.checkBoxStartInTray.isChecked()))
if int(config.get('bitmessagesettings','port')) != int(self.settingsDialogInstance.ui.lineEditTCPPort.text()):
QMessageBox.about(self, "Restart", "You must restart Bitmessage for the port number change to take effect.")
config.set('bitmessagesettings', 'port', str(self.settingsDialogInstance.ui.lineEditTCPPort.text()))
if config.get('bitmessagesettings', 'socksproxytype') == 'none' and str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText())[0:5] == 'SOCKS':
if statusIconColor != 'red':
QMessageBox.about(self, "Restart", "Bitmessage will use your proxy from now on now but you may want to manually restart Bitmessage now to close existing connections.")
if config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS' and str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText()) == 'none':
self.statusBar().showMessage('')
config.set('bitmessagesettings', 'socksproxytype', str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText()))
config.set('bitmessagesettings', 'socksauthentication', str(self.settingsDialogInstance.ui.checkBoxAuthentication.isChecked()))
config.set('bitmessagesettings', 'sockshostname', str(self.settingsDialogInstance.ui.lineEditSocksHostname.text()))
config.set('bitmessagesettings', 'socksport', str(self.settingsDialogInstance.ui.lineEditSocksPort.text()))
config.set('bitmessagesettings', 'socksusername', str(self.settingsDialogInstance.ui.lineEditSocksUsername.text()))
config.set('bitmessagesettings', 'sockspassword', str(self.settingsDialogInstance.ui.lineEditSocksPassword.text()))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
if 'win32' in sys.platform or 'win64' in sys.platform:
#Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
if config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage",sys.argv[0])
else:
self.settings.remove("PyBitmessage")
elif 'darwin' in sys.platform:
#startup for mac
pass
elif 'linux' in sys.platform:
#startup for linux
pass
if appdata != '' and self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): #If we are NOT using portable mode now but the user selected that we should...
config.set('bitmessagesettings','movemessagstoprog','true') #Tells bitmessage to move the messages.dat file to the program directory the next time the program starts.
#Write the keys.dat file to disk in the new location
with open('keys.dat', 'wb') as configfile:
config.write(configfile)
#Write the knownnodes.dat file to disk in the new location
output = open('knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
os.remove(appdata + 'keys.dat')
os.remove(appdata + 'knownnodes.dat')
appdata = ''
QMessageBox.about(self, "Restart", "Bitmessage has moved most of your config files to the program directory but you must restart Bitmessage to move the last file (the file which holds messages).")
if appdata == '' and not self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): #If we ARE using portable mode now but the user selected that we shouldn't...
appdata = lookupAppdataFolder()
if not os.path.exists(appdata):
os.makedirs(appdata)
config.set('bitmessagesettings','movemessagstoappdata','true') #Tells bitmessage to move the messages.dat file to the appdata directory the next time the program starts.
#Write the keys.dat file to disk in the new location
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#Write the knownnodes.dat file to disk in the new location
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
os.remove('keys.dat')
os.remove('knownnodes.dat')
QMessageBox.about(self, "Restart", "Bitmessage has moved most of your config files to the application data directory but you must restart Bitmessage to move the last file (the file which holds messages).")
def click_radioButtonBlacklist(self):
if config.get('bitmessagesettings', 'blackwhitelist') == 'white':
config.set('bitmessagesettings','blackwhitelist','black')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6,'Blacklist')
def click_radioButtonWhitelist(self):
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
config.set('bitmessagesettings','blackwhitelist','white')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6,'Whitelist')
def click_pushButtonAddBlacklist(self):
self.NewBlacklistDialogInstance = NewSubscriptionDialog(self)
if self.NewBlacklistDialogInstance.exec_():
if self.NewBlacklistDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text())),)
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''select * from blacklist where address=?''')
else:
sqlSubmitQueue.put('''select * from whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewBlacklistDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetBlacklist.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetBlacklist.setItem(0,1,newItem)
t = (str(self.NewBlacklistDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text())),True)
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''INSERT INTO blacklist VALUES (?,?,?)''')
else:
sqlSubmitQueue.put('''INSERT INTO whitelist VALUES (?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your list twice. Perhaps rename the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def on_action_SpecialAddressBehaviorDialog(self):
self.dialog = SpecialAddressBehaviorDialog(self)
# For Modal dialogs
if self.dialog.exec_():
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetYourIdentities.item(currentRow,1).text())
if self.dialog.ui.radioButtonBehaveNormalAddress.isChecked():
config.set(str(addressAtCurrentRow),'mailinglist','false')
#Set the color to either black or grey
if config.getboolean(addressAtCurrentRow,'enabled'):
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(0,0,0))
else:
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(128,128,128))
else:
config.set(str(addressAtCurrentRow),'mailinglist','true')
config.set(str(addressAtCurrentRow),'mailinglistname',str(self.dialog.ui.lineEditMailingListName.text().toUtf8()))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(137,04,177))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.rerenderInboxToLabels()
def click_NewAddressDialog(self):
self.dialog = NewAddressDialog(self)
# For Modal dialogs
if self.dialog.exec_():
#self.dialog.ui.buttonBox.enabled = False
if self.dialog.ui.radioButtonRandomAddress.isChecked():
if self.dialog.ui.radioButtonMostAvailable.isChecked():
streamNumberForAddress = 1
else:
#User selected 'Use the same stream as an existing address.'
streamNumberForAddress = addressStream(self.dialog.ui.comboBoxExisting.currentText())
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,str(self.dialog.ui.newaddresslabel.text().toUtf8()),1,"",self.dialog.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
else:
if self.dialog.ui.lineEditPassphrase.text() != self.dialog.ui.lineEditPassphraseAgain.text():
QMessageBox.about(self, "Passphrase mismatch", "The passphrase you entered twice doesn\'t match. Try again.")
elif self.dialog.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, "Choose a passphrase", "You really do need a passphrase.")
else:
streamNumberForAddress = 1 #this will eventually have to be replaced by logic to determine the most available stream number.
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,"unused address",self.dialog.ui.spinBoxNumberOfAddressesToMake.value(),self.dialog.ui.lineEditPassphrase.text().toUtf8(),self.dialog.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
else:
print 'new address dialog box rejected'
def closeEvent(self, event):
'''quit_msg = "Are you sure you want to exit Bitmessage?"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()'''
broadcastToSendDataQueues((0, 'shutdown', 'all'))
printLock.acquire()
print 'Closing. Flushing inventory in memory out to disk...'
printLock.release()
self.statusBar().showMessage('Flushing inventory in memory out to disk.')
flushInventory()
#This one last useless query will guarantee that the previous query committed before we close the program.
sqlLock.acquire()
sqlSubmitQueue.put('SELECT address FROM subscriptions')
sqlSubmitQueue.put('')
sqlReturnQueue.get()
sqlLock.release()
self.statusBar().showMessage('Saving the knownNodes list of peers to disk...')
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
self.trayIcon.hide()
printLock.acquire()
print 'Done.'
printLock.release()
self.statusBar().showMessage('All done. Closing user interface...')
event.accept()
raise SystemExit
def on_action_InboxReply(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
toAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,0).data(Qt.UserRole).toPyObject())
fromAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,1).data(Qt.UserRole).toPyObject())
if toAddressAtCurrentInboxRow == '[Broadcast subscribers]':
self.ui.labelFrom.setText('')
else:
if not config.get(toAddressAtCurrentInboxRow,'enabled'):
self.statusBar().showMessage('Error: The address from which you are trying to send is disabled. Enable it from the \'Your Identities\' tab first.')
return
self.ui.labelFrom.setText(toAddressAtCurrentInboxRow)
self.ui.lineEditTo.setText(str(fromAddressAtCurrentInboxRow))
self.ui.comboBoxSendFrom.setCurrentIndex(0)
#self.ui.comboBoxSendFrom.setEditText(str(self.ui.tableWidgetInbox.item(currentInboxRow,0).text))
self.ui.textEditMessage.setText('\n\n------------------------------------------------------\n'+self.ui.tableWidgetInbox.item(currentInboxRow,2).data(Qt.UserRole).toPyObject())
if self.ui.tableWidgetInbox.item(currentInboxRow,2).text()[0:3] == 'Re:':
self.ui.lineEditSubject.setText(self.ui.tableWidgetInbox.item(currentInboxRow,2).text())
else:
self.ui.lineEditSubject.setText('Re: '+self.ui.tableWidgetInbox.item(currentInboxRow,2).text())
self.ui.radioButtonSpecific.setChecked(True)
self.ui.tabWidget.setCurrentIndex(1)
def on_action_InboxAddSenderToAddressBook(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
#self.ui.tableWidgetInbox.item(currentRow,1).data(Qt.UserRole).toPyObject()
addressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,1).data(Qt.UserRole).toPyObject())
#Let's make sure that it isn't already in the address book
sqlLock.acquire()
t = (addressAtCurrentInboxRow,)
sqlSubmitQueue.put('''select * from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem('--New entry. Change label in Address Book.--')
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addressAtCurrentInboxRow)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
t = ('--New entry. Change label in Address Book.--',addressAtCurrentInboxRow)
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO addressbook VALUES (?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.ui.tabWidget.setCurrentIndex(5)
self.ui.tableWidgetAddressBook.setCurrentCell(0,0)
self.statusBar().showMessage('Entry added to the Address Book. Edit the label to your liking.')
else:
self.statusBar().showMessage('Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want.')
#Send item on the Inbox tab to trash
def on_action_InboxTrash(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
inventoryHashToTrash = str(self.ui.tableWidgetInbox.item(currentRow,3).data(Qt.UserRole).toPyObject())
t = (inventoryHashToTrash,)
sqlLock.acquire()
#sqlSubmitQueue.put('''delete from inbox where msgid=?''')
sqlSubmitQueue.put('''UPDATE inbox SET folder='trash' WHERE msgid=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.textEditInboxMessage.setText("")
self.ui.tableWidgetInbox.removeRow(currentRow)
self.statusBar().showMessage('Moved item to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back.')
#Send item on the Sent tab to trash
def on_action_SentTrash(self):
currentRow = self.ui.tableWidgetSent.currentRow()
ackdataToTrash = str(self.ui.tableWidgetSent.item(currentRow,3).data(Qt.UserRole).toPyObject())
t = (ackdataToTrash,)
sqlLock.acquire()
sqlSubmitQueue.put('''UPDATE sent SET folder='trash' WHERE ackdata=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.textEditSentMessage.setText("")
self.ui.tableWidgetSent.removeRow(currentRow)
self.statusBar().showMessage('Moved item to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back.')
def on_action_SentClipboard(self):
currentRow = self.ui.tableWidgetSent.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetSent.item(currentRow,0).data(Qt.UserRole).toPyObject())
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
#Group of functions for the Address Book dialog box
def on_action_AddressBookNew(self):
self.click_pushButtonAddAddressBook()
def on_action_AddressBookDelete(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
labelAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
sqlSubmitQueue.put('''DELETE FROM addressbook WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetAddressBook.removeRow(currentRow)
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
self.reloadBroadcastSendersForWhichImWatching()
def on_action_AddressBookClipboard(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_action_AddressBookSend(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
if self.ui.lineEditTo.text() == '':
self.ui.lineEditTo.setText(str(addressAtCurrentRow))
else:
self.ui.lineEditTo.setText(str(self.ui.lineEditTo.text()) + '; '+ str(addressAtCurrentRow))
self.statusBar().showMessage('You have added the address to the \'To\' field on the \'Send\' tab. You may add more recipients if you want. When you are done, go to the \'Send\' tab.')
def on_context_menuAddressBook(self, point):
self.popMenuAddressBook.exec_( self.ui.tableWidgetAddressBook.mapToGlobal(point) )
#Group of functions for the Subscriptions dialog box
def on_action_SubscriptionsNew(self):
self.click_pushButtonAddSubscription()
def on_action_SubscriptionsDelete(self):
print 'clicked Delete'
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
labelAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
sqlSubmitQueue.put('''DELETE FROM subscriptions WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetSubscriptions.removeRow(currentRow)
self.rerenderInboxFromLabels()
self.reloadBroadcastSendersForWhichImWatching()
def on_action_SubscriptionsClipboard(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuSubscriptions(self, point):
self.popMenuSubscriptions.exec_( self.ui.tableWidgetSubscriptions.mapToGlobal(point) )
#Group of functions for the Blacklist dialog box
def on_action_BlacklistNew(self):
self.click_pushButtonAddBlacklist()
def on_action_BlacklistDelete(self):
print 'clicked Delete'
currentRow = self.ui.tableWidgetBlacklist.currentRow()
labelAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''DELETE FROM blacklist WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
else:
sqlSubmitQueue.put('''DELETE FROM whitelist WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetBlacklist.removeRow(currentRow)
def on_action_BlacklistClipboard(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuBlacklist(self, point):
self.popMenuBlacklist.exec_( self.ui.tableWidgetBlacklist.mapToGlobal(point) )
def on_action_BlacklistEnable(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,1).text()
self.ui.tableWidgetBlacklist.item(currentRow,0).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetBlacklist.item(currentRow,1).setTextColor(QtGui.QColor(0,0,0))
t = (str(addressAtCurrentRow),)
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''UPDATE blacklist SET enabled=1 WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
else:
sqlSubmitQueue.put('''UPDATE whitelist SET enabled=1 WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
def on_action_BlacklistDisable(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,1).text()
self.ui.tableWidgetBlacklist.item(currentRow,0).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetBlacklist.item(currentRow,1).setTextColor(QtGui.QColor(128,128,128))
t = (str(addressAtCurrentRow),)
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''UPDATE blacklist SET enabled=0 WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
else:
sqlSubmitQueue.put('''UPDATE whitelist SET enabled=0 WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
#Group of functions for the Your Identities dialog box
def on_action_YourIdentitiesNew(self):
self.click_NewAddressDialog()
def on_action_YourIdentitiesEnable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetYourIdentities.item(currentRow,1).text())
config.set(addressAtCurrentRow,'enabled','true')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.ui.tableWidgetYourIdentities.item(currentRow,0).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetYourIdentities.item(currentRow,2).setTextColor(QtGui.QColor(0,0,0))
if safeConfigGetBoolean(addressAtCurrentRow,'mailinglist'):
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(137,04,177))
reloadMyAddressHashes()
def on_action_YourIdentitiesDisable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetYourIdentities.item(currentRow,1).text())
config.set(str(addressAtCurrentRow),'enabled','false')
self.ui.tableWidgetYourIdentities.item(currentRow,0).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.item(currentRow,2).setTextColor(QtGui.QColor(128,128,128))
if safeConfigGetBoolean(addressAtCurrentRow,'mailinglist'):
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(137,04,177))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
reloadMyAddressHashes()
def on_action_YourIdentitiesClipboard(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuYourIdentities(self, point):
self.popMenu.exec_( self.ui.tableWidgetYourIdentities.mapToGlobal(point) )
def on_context_menuInbox(self, point):
self.popMenuInbox.exec_( self.ui.tableWidgetInbox.mapToGlobal(point) )
def on_context_menuSent(self, point):
self.popMenuSent.exec_( self.ui.tableWidgetSent.mapToGlobal(point) )
def tableWidgetInboxItemClicked(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
if currentRow >= 0:
self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(currentRow,2).data(Qt.UserRole).toPyObject())
def tableWidgetSentItemClicked(self):
currentRow = self.ui.tableWidgetSent.currentRow()
if currentRow >= 0:
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(currentRow,2).data(Qt.UserRole).toPyObject())
def tableWidgetYourIdentitiesItemChanged(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
config.set(str(addressAtCurrentRow),'label',str(self.ui.tableWidgetYourIdentities.item(currentRow,0).text().toUtf8()))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.rerenderComboBoxSendFrom()
#self.rerenderInboxFromLabels()
self.rerenderInboxToLabels()
self.rerenderSentFromLabels()
#self.rerenderSentToLabels()
def tableWidgetAddressBookItemChanged(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
sqlLock.acquire()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
t = (str(self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()),str(addressAtCurrentRow))
sqlSubmitQueue.put('''UPDATE addressbook set label=? WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def tableWidgetSubscriptionsItemChanged(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
sqlLock.acquire()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
t = (str(self.ui.tableWidgetSubscriptions.item(currentRow,0).text().toUtf8()),str(addressAtCurrentRow))
sqlSubmitQueue.put('''UPDATE subscriptions set label=? WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def writeNewAddressToTable(self,label,address,streamNumber):
self.ui.tableWidgetYourIdentities.insertRow(0)
self.ui.tableWidgetYourIdentities.setItem(0, 0, QtGui.QTableWidgetItem(unicode(label,'utf-8')))
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(streamNumber)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
self.rerenderComboBoxSendFrom()
def updateStatusBar(self,data):
if data != "":
printLock.acquire()
print 'Status bar:', data
printLock.release()
self.statusBar().showMessage(data)
def reloadBroadcastSendersForWhichImWatching(self):
broadcastSendersForWhichImWatching.clear()
sqlLock.acquire()
sqlSubmitQueue.put('SELECT address FROM subscriptions')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
address, = row
status,addressVersionNumber,streamNumber,hash = decodeAddress(address)
broadcastSendersForWhichImWatching[hash] = 0
#In order for the time columns on the Inbox and Sent tabs to be sorted correctly (rather than alphabetically), we need to overload the < operator and use this class instead of QTableWidgetItem.
class myTableWidgetItem(QTableWidgetItem):
def __lt__(self,other):
return int(self.data(33).toPyObject()) < int(other.data(33).toPyObject())
sendDataQueues = [] #each sendData thread puts its queue in this list.
myRSAAddressHashes = {}
myECAddressHashes = {}
#myPrivateKeys = {}
inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
workerQueue = Queue.Queue()
sqlSubmitQueue = Queue.Queue() #SQLITE3 is so thread-unsafe that they won't even let you call it from different threads using your own locks. SQL objects can only be called from one thread.
sqlReturnQueue = Queue.Queue()
sqlLock = threading.Lock()
printLock = threading.Lock()
ackdataForWhichImWatching = {}
broadcastSendersForWhichImWatching = {}
statusIconColor = 'red'
connectionsCount = {} #Used for the 'network status' tab.
connectionsCountLock = threading.Lock()
inventoryLock = threading.Lock() #Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack('>Q',random.randrange(1, 18446744073709551615))
connectedHostsList = {} #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender thread won't connect to the same remote node twice.
neededPubkeys = {}
successfullyDecryptMessageTimings = [] #A list of the amounts of time it took to successfully decrypt msg messages
apiSignalQueue = Queue.Queue() #The singleAPI thread uses this queue to pass messages to a QT thread which can emit signals to do things like display a message in the UI.
apiAddressGeneratorReturnQueue = Queue.Queue() #The address generator thread uses this queue to get information back to the API thread.
#These constants are not at the top because if changed they will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
averageProofOfWorkNonceTrialsPerByte = 320 #The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work.
payloadLengthExtraBytes = 14000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
if useVeryEasyProofOfWorkForTesting:
averageProofOfWorkNonceTrialsPerByte = averageProofOfWorkNonceTrialsPerByte / 16
payloadLengthExtraBytes = payloadLengthExtraBytes / 7000
if __name__ == "__main__":
# Check the Major version, the first element in the array
if sqlite3.sqlite_version_info[0] < 3:
print 'This program requires sqlite version 3 or higher because 2 and lower cannot store NULL values. I see version:', sqlite3.sqlite_version_info
sys.exit()
#First try to load the config file (the keys.dat file) from the program directory
config = ConfigParser.SafeConfigParser()
config.read('keys.dat')
try:
config.get('bitmessagesettings', 'settingsversion')
#settingsFileExistsInProgramDirectory = True
print 'Loading config files from same directory as program'
appdata = ''
except:
#Could not load the keys.dat file in the program directory. Perhaps it is in the appdata directory.
appdata = lookupAppdataFolder()
#if not os.path.exists(appdata):
# os.makedirs(appdata)
config = ConfigParser.SafeConfigParser()
config.read(appdata + 'keys.dat')
try:
config.get('bitmessagesettings', 'settingsversion')
print 'Loading existing config files from', appdata
except:
#This appears to be the first time running the program; there is no config file (or it cannot be accessed). Create config file.
config.add_section('bitmessagesettings')
config.set('bitmessagesettings','settingsversion','1')
config.set('bitmessagesettings','port','8444')
config.set('bitmessagesettings','timeformat','%%a, %%d %%b %%Y %%I:%%M %%p')
config.set('bitmessagesettings','blackwhitelist','black')
config.set('bitmessagesettings','startonlogon','false')
if 'linux' in sys.platform:
config.set('bitmessagesettings','minimizetotray','false')#This isn't implimented yet and when True on Ubuntu causes Bitmessage to disappear while running when minimized.
else:
config.set('bitmessagesettings','minimizetotray','true')
config.set('bitmessagesettings','showtraynotifications','true')
config.set('bitmessagesettings','startintray','false')
if storeConfigFilesInSameDirectoryAsProgramByDefault:
#Just use the same directory as the program and forget about the appdata folder
appdata = ''
print 'Creating new config files in same directory as program.'
else:
print 'Creating new config files in', appdata
if not os.path.exists(appdata):
os.makedirs(appdata)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
if config.getint('bitmessagesettings','settingsversion') == 1:
config.set('bitmessagesettings','settingsversion','3') #If the settings version is equal to 2 then the sqlThread will modify the pubkeys table and change the settings version to 3.
config.set('bitmessagesettings','socksproxytype','none')
config.set('bitmessagesettings','sockshostname','localhost')
config.set('bitmessagesettings','socksport','9050')
config.set('bitmessagesettings','socksauthentication','false')
config.set('bitmessagesettings','socksusername','')
config.set('bitmessagesettings','sockspassword','')
config.set('bitmessagesettings','keysencrypted','false')
config.set('bitmessagesettings','messagesencrypted','false')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#Let us now see if we should move the messages.dat file. There is an option in the settings to switch 'Portable Mode' on or off. Most of the files are moved instantly, but the messages.dat file cannot be moved while it is open. Now that it is not open we can move it now!
try:
config.getboolean('bitmessagesettings', 'movemessagstoprog')
#If we have reached this point then we must move the messages.dat file from the appdata folder to the program folder
print 'Moving messages.dat from its old location in the application data folder to its new home along side the program.'
shutil.move(lookupAppdataFolder()+'messages.dat','messages.dat')
config.remove_option('bitmessagesettings', 'movemessagstoprog')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
except:
pass
try:
config.getboolean('bitmessagesettings', 'movemessagstoappdata')
#If we have reached this point then we must move the messages.dat file from the appdata folder to the program folder
print 'Moving messages.dat from its old location next to the program to its new home in the application data folder.'
shutil.move('messages.dat',lookupAppdataFolder()+'messages.dat')
config.remove_option('bitmessagesettings', 'movemessagstoappdata')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
except:
pass
try:
pickleFile = open(appdata + 'knownnodes.dat', 'rb')
knownNodes = pickle.load(pickleFile)
pickleFile.close()
except:
createDefaultKnownNodes(appdata)
pickleFile = open(appdata + 'knownnodes.dat', 'rb')
knownNodes = pickle.load(pickleFile)
pickleFile.close()
if config.getint('bitmessagesettings', 'settingsversion') > 3:
print 'Bitmessage cannot read future versions of the keys file (keys.dat). Run the newer version of Bitmessage.'
raise SystemExit
#DNS bootstrap. This could be programmed to use the SOCKS proxy to do the DNS lookup some day but for now we will just rely on the entries in defaultKnownNodes.py. Hopefully either they are up to date or the user has run Bitmessage recently without SOCKS turned on and received good bootstrap nodes using that method.
if config.get('bitmessagesettings', 'socksproxytype') == 'none':
try:
for item in socket.getaddrinfo('bootstrap8080.bitmessage.org',80):
print 'Adding', item[4][0],'to knownNodes based on DNS boostrap method'
knownNodes[1][item[4][0]] = (8080,int(time.time()))
except:
print 'bootstrap8080.bitmessage.org DNS bootstraping failed.'
try:
for item in socket.getaddrinfo('bootstrap8444.bitmessage.org',80):
print 'Adding', item[4][0],'to knownNodes based on DNS boostrap method'
knownNodes[1][item[4][0]] = (8444,int(time.time()))
except:
print 'bootstrap8444.bitmessage.org DNS bootstrapping failed.'
else:
print 'DNS bootstrap skipped because SOCKS is used.'
app = QtGui.QApplication(sys.argv)
app.setStyleSheet("QStatusBar::item { border: 0px solid black }")
myapp = MyForm()
myapp.show()
if config.getboolean('bitmessagesettings', 'startintray'):
myapp.hide()
myapp.trayIcon.show()
#self.hidden = True
#self.setWindowState(self.windowState() & QtCore.Qt.WindowMinimized)
#self.hide()
if 'win32' in sys.platform or 'win64' in sys.platform:
myapp.setWindowFlags(Qt.ToolTip)
sys.exit(app.exec_())
# So far, the Bitmessage protocol, this client, the Wiki, and the forums
# are all a one-man operation. Bitcoin tips are quite appreciated!
# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u
Do not share or accept IPs which are in the private ranges
#!/usr/bin/env python2.7
# Copyright (c) 2012 Jonathan Warren
# Copyright (c) 2012 The Bitmessage developers
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#Right now, PyBitmessage only support connecting to stream 1. It doesn't yet contain logic to expand into further streams.
softwareVersion = '0.2.7'
verbose = 2
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 #Equals two days and 12 hours.
lengthOfTimeToLeaveObjectsInInventory = 237600 #Equals two days and 18 hours. This should be longer than maximumAgeOfAnObjectThatIAmWillingToAccept so that we don't process messages twice.
lengthOfTimeToHoldOnToAllPubkeys = 2419200 #Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
maximumAgeOfObjectsThatIAdvertiseToOthers = 216000 #Equals two days and 12 hours
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 #Equals three hours
storeConfigFilesInSameDirectoryAsProgramByDefault = False #The user may de-select Portable Mode in the settings if they want the config files to stay in the application data folder.
useVeryEasyProofOfWorkForTesting = False #If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
import sys
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
except Exception, err:
print 'PyBitmessage requires PyQt. You can download it from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\' (without quotes).'
print 'Error message:', err
sys.exit()
import ConfigParser
from bitmessageui import *
from newaddressdialog import *
from newsubscriptiondialog import *
from regenerateaddresses import *
from specialaddressbehavior import *
from settings import *
from about import *
from help import *
from iconglossary import *
from addresses import *
import Queue
from defaultKnownNodes import *
import time
import socket
import threading
#import rsa
#from rsa.bigfile import *
import hashlib
from struct import *
import pickle
import random
import sqlite3
import threading #used for the locks, not for the threads
from time import strftime, localtime
import os
import shutil #used for moving the messages.dat file
import string
import socks
import highlevelcrypto
from pyelliptic.openssl import OpenSSL
import ctypes
from pyelliptic import arithmetic
#The next 3 are used for the API
from SimpleXMLRPCServer import *
import json
from subprocess import call #used when the API must execute an outside program
#For each stream to which we connect, one outgoingSynSender thread will exist and will create 8 connections with peers.
class outgoingSynSender(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.selfInitiatedConnectionList = [] #This is a list of current connections (the thread pointers at least)
self.alreadyAttemptedConnectionsList = [] #This is a list of nodes to which we have already attempted a connection
def setup(self,streamNumber):
self.streamNumber = streamNumber
def run(self):
time.sleep(1)
resetTime = int(time.time()) #used below to clear out the alreadyAttemptedConnectionsList periodically so that we will retry connecting to hosts to which we have already tried to connect.
while True:
#time.sleep(999999)#I sometimes use this to prevent connections for testing.
if len(self.selfInitiatedConnectionList) < 8: #maximum number of outgoing connections = 8
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
while HOST in self.alreadyAttemptedConnectionsList or HOST in connectedHostsList:
#print 'choosing new sample'
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
time.sleep(1)
#Clear out the alreadyAttemptedConnectionsList every half hour so that this program will again attempt a connection to any nodes, even ones it has already tried.
if (int(time.time()) - resetTime) > 1800:
self.alreadyAttemptedConnectionsList = []
resetTime = int(time.time())
self.alreadyAttemptedConnectionsList.append(HOST)
PORT, timeNodeLastSeen = knownNodes[self.streamNumber][HOST]
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(20)
if config.get('bitmessagesettings', 'socksproxytype') == 'none':
printLock.acquire()
print 'Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS4a':
printLock.acquire()
print '(Using SOCKS4a) Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
proxytype = socks.PROXY_TYPE_SOCKS4
sockshostname = config.get('bitmessagesettings', 'sockshostname')
socksport = config.getint('bitmessagesettings', 'socksport')
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = config.get('bitmessagesettings', 'socksusername')
sockspassword = config.get('bitmessagesettings', 'sockspassword')
sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(proxytype, sockshostname, socksport, rdns)
elif config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5':
printLock.acquire()
print '(Using SOCKS5) Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
proxytype = socks.PROXY_TYPE_SOCKS5
sockshostname = config.get('bitmessagesettings', 'sockshostname')
socksport = config.getint('bitmessagesettings', 'socksport')
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = config.get('bitmessagesettings', 'socksusername')
sockspassword = config.get('bitmessagesettings', 'sockspassword')
sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(proxytype, sockshostname, socksport, rdns)
try:
sock.connect((HOST, PORT))
rd = receiveDataThread()
self.emit(SIGNAL("passObjectThrough(PyQt_PyObject)"),rd)
objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
rd.setup(sock,HOST,PORT,self.streamNumber,self.selfInitiatedConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware)
rd.start()
printLock.acquire()
print self, 'connected to', HOST, 'during outgoing attempt.'
printLock.release()
sd = sendDataThread()
sd.setup(sock,HOST,PORT,self.streamNumber,objectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
sd.sendVersionMessage()
except socks.GeneralProxyError, err:
printLock.acquire()
print 'Could NOT connect to', HOST, 'during outgoing attempt.', err
printLock.release()
PORT, timeLastSeen = knownNodes[self.streamNumber][HOST]
if (int(time.time())-timeLastSeen) > 172800 and len(knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
del knownNodes[self.streamNumber][HOST]
print 'deleting ', HOST, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
except socks.Socks5AuthError, err:
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS5 Authentication problem: "+str(err))
except socks.Socks5Error, err:
pass
print 'SOCKS5 error. (It is possible that the server wants authentication).)' ,str(err)
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS5 error. Server might require authentication. "+str(err))
except socks.Socks4Error, err:
print 'Socks4Error:', err
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS4 error: "+str(err))
except socket.error, err:
if config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
print 'Bitmessage MIGHT be having trouble connecting to the SOCKS server. '+str(err)
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Problem: Bitmessage can not connect to the SOCKS server. "+str(err))
else:
printLock.acquire()
print 'Could NOT connect to', HOST, 'during outgoing attempt.', err
printLock.release()
PORT, timeLastSeen = knownNodes[self.streamNumber][HOST]
if (int(time.time())-timeLastSeen) > 172800 and len(knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
del knownNodes[self.streamNumber][HOST]
print 'deleting ', HOST, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
except Exception, err:
print 'An exception has occurred in the outgoingSynSender thread that was not caught by other exception types:', err
time.sleep(0.1)
#Only one singleListener thread will ever exist. It creates the receiveDataThread and sendDataThread for each incoming connection. Note that it cannot set the stream number because it is not known yet- the other node will have to tell us its stream number in a version message. If we don't care about their stream, we will close the connection (within the recversion function of the recieveData thread)
class singleListener(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If they eventually select proxy 'none' then this will start listening for connections.
while config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
time.sleep(300)
print 'Listening for incoming connections.'
HOST = '' # Symbolic name meaning all available interfaces
PORT = config.getint('bitmessagesettings', 'port')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#This option apparently avoids the TIME_WAIT state so that we can rebind faster
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(2)
self.incomingConnectionList = [] #This list isn't used for anything. The reason it exists is because receiveData threads expect that a list be passed to them. They expect this because the outgoingSynSender thread DOES use a similar list to keep track of the number of outgoing connections it has created.
while True:
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If the user eventually select proxy 'none' then this will start listening for connections.
while config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
time.sleep(10)
a,(HOST,PORT) = sock.accept()
#Users are finding that if they run more than one node in the same network (thus with the same public IP), they can not connect with the second node. This is because this section of code won't accept the connection from the same IP. This problem will go away when the Bitmessage network grows beyond being tiny but in the mean time I'll comment out this code section.
"""while HOST in connectedHostsList:
print 'incoming connection is from a host in connectedHostsList (we are already connected to it). Ignoring it.'
a.close()
a,(HOST,PORT) = sock.accept()"""
rd = receiveDataThread()
self.emit(SIGNAL("passObjectThrough(PyQt_PyObject)"),rd)
objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
rd.setup(a,HOST,PORT,-1,self.incomingConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware)
printLock.acquire()
print self, 'connected to', HOST,'during INCOMING request.'
printLock.release()
rd.start()
sd = sendDataThread()
sd.setup(a,HOST,PORT,-1,objectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
#This thread is created either by the synSenderThread(for outgoing connections) or the singleListenerThread(for incoming connectiosn).
class receiveDataThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.data = ''
self.verackSent = False
self.verackReceived = False
def setup(self,sock,HOST,port,streamNumber,selfInitiatedConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware):
self.sock = sock
self.HOST = HOST
self.PORT = port
self.sock.settimeout(600) #We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
self.streamNumber = streamNumber
self.selfInitiatedConnectionList = selfInitiatedConnectionList
self.selfInitiatedConnectionList.append(self)
self.payloadLength = 0 #This is the protocol payload length thus it doesn't include the 24 byte message header
self.receivedgetbiginv = False #Gets set to true once we receive a getbiginv message from our peer. An abusive peer might request it too much so we use this variable to check whether they have already asked for a big inv message.
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = {}
connectedHostsList[self.HOST] = 0 #The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that the outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished = False #set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
if self.streamNumber == -1: #This was an incoming connection. Send out a version message if we accept the other node's version message.
self.initiatedConnection = False
else:
self.initiatedConnection = True
self.ackDataThatWeHaveYetToSend = [] #When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here until we are done processing all other data received from this peer.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
def run(self):
while True:
try:
self.data = self.data + self.sock.recv(65536)
except socket.timeout:
printLock.acquire()
print 'Timeout occurred waiting for data. Closing receiveData thread.'
printLock.release()
break
except Exception, err:
printLock.acquire()
print 'sock.recv error. Closing receiveData thread.', err
printLock.release()
break
#print 'Received', repr(self.data)
if self.data == "":
printLock.acquire()
print 'Connection closed. Closing receiveData thread.'
printLock.release()
break
else:
self.processData()
try:
self.sock.close()
except Exception, err:
print 'Within receiveDataThread run(), self.sock.close() failed.', err
try:
self.selfInitiatedConnectionList.remove(self)
printLock.acquire()
print 'removed self (a receiveDataThread) from ConnectionList'
printLock.release()
except:
pass
broadcastToSendDataQueues((0, 'shutdown', self.HOST))
if self.connectionIsOrWasFullyEstablished: #We don't want to decrement the number of connections and show the result if we never incremented it in the first place (which we only do if the connection is fully established- meaning that both nodes accepted each other's version packets.)
connectionsCountLock.acquire()
connectionsCount[self.streamNumber] -= 1
self.emit(SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"),self.streamNumber,connectionsCount[self.streamNumber])
printLock.acquire()
print 'Updating network status tab with current connections count:', connectionsCount[self.streamNumber]
printLock.release()
connectionsCountLock.release()
try:
del connectedHostsList[self.HOST]
except Exception, err:
print 'Could not delete', self.HOST, 'from connectedHostsList.', err
def processData(self):
global verbose
#if verbose >= 2:
#printLock.acquire()
#print 'self.data is currently ', repr(self.data)
#printLock.release()
if len(self.data) < 20: #if so little of the data has arrived that we can't even unpack the payload length
pass
elif self.data[0:4] != '\xe9\xbe\xb4\xd9':
if verbose >= 2:
printLock.acquire()
sys.stderr.write('The magic bytes were not correct. First 40 bytes of data: %s\n' % repr(self.data[0:40]))
print 'self.data:', self.data.encode('hex')
printLock.release()
self.data = ""
else:
self.payloadLength, = unpack('>L',self.data[16:20])
if len(self.data) >= self.payloadLength+24: #check if the whole message has arrived yet. If it has,...
if self.data[20:24] == hashlib.sha512(self.data[24:self.payloadLength+24]).digest()[0:4]:#test the checksum in the message. If it is correct...
#print 'message checksum is correct'
#The time we've last seen this node is obviously right now since we just received valid data from it. So update the knownNodes list so that other peers can be made aware of its existance.
if self.initiatedConnection: #The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
knownNodes[self.streamNumber][self.HOST] = (self.PORT,int(time.time()))
if self.payloadLength <= 180000000: #If the size of the message is greater than 180MB, ignore it. (I get memory errors when processing messages much larger than this though it is concievable that this value will have to be lowered if some systems are less tolarant of large messages.)
remoteCommand = self.data[4:16]
printLock.acquire()
print 'remoteCommand', repr(remoteCommand.replace('\x00','')), ' from', self.HOST
printLock.release()
if remoteCommand == 'version\x00\x00\x00\x00\x00':
self.recversion()
elif remoteCommand == 'verack\x00\x00\x00\x00\x00\x00':
self.recverack()
elif remoteCommand == 'addr\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recaddr()
elif remoteCommand == 'getpubkey\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recgetpubkey()
elif remoteCommand == 'pubkey\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recpubkey()
elif remoteCommand == 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recinv()
elif remoteCommand == 'getdata\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recgetdata()
elif remoteCommand == 'getbiginv\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendBigInv()
elif remoteCommand == 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recmsg()
elif remoteCommand == 'broadcast\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recbroadcast()
elif remoteCommand == 'getaddr\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendaddr()
elif remoteCommand == 'ping\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendpong()
elif remoteCommand == 'pong\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
pass
elif remoteCommand == 'alert\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
pass
self.data = self.data[self.payloadLength+24:]#take this message out and then process the next message
if self.data == '':
while len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
random.seed()
objectHash, = random.sample(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave, 1)
if objectHash in inventory:
printLock.acquire()
print 'Inventory (in memory) already has object listed in inv message.'
printLock.release()
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]
elif isInSqlInventory(objectHash):
printLock.acquire()
print 'Inventory (SQL on disk) already has object listed in inv message.'
printLock.release()
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]
else:
#print 'processData function making request for object:', objectHash.encode('hex')
self.sendgetdata(objectHash)
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash] #It is possible that the remote node doesn't respond with the object. In that case, we'll very likely get it from someone else anyway.
break
if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
printLock.acquire()
print 'within processData, number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now', len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)
printLock.release()
if len(self.ackDataThatWeHaveYetToSend) > 0:
self.data = self.ackDataThatWeHaveYetToSend.pop()
self.processData()
else:
print 'Checksum incorrect. Clearing this message.'
self.data = self.data[self.payloadLength+24:]
def isProofOfWorkSufficient(self):
POW, = unpack('>Q',hashlib.sha512(hashlib.sha512(self.data[24:32]+ hashlib.sha512(self.data[32:24+self.payloadLength]).digest()).digest()).digest()[0:8])
#print 'POW:', POW
#Notice that I have divided the averageProofOfWorkNonceTrialsPerByte by two. This makes the POW requirement easier. This gives us wiggle-room: if we decide that we want to make the POW easier, the change won't obsolete old clients because they already expect a lower POW. If we decide that the current work done by clients feels approperate then we can remove this division by 2 and make the requirement match what is actually done by a sending node. If we want to raise the POW requirement then old nodes will HAVE to upgrade no matter what.
return POW < 2**64 / ((self.payloadLength+payloadLengthExtraBytes) * (averageProofOfWorkNonceTrialsPerByte/2))
def sendpong(self):
print 'Sending pong'
self.sock.sendall('\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
def recverack(self):
print 'verack received'
self.verackReceived = True
if self.verackSent == True:
#We have thus both sent and received a verack.
self.connectionFullyEstablished()
def connectionFullyEstablished(self):
self.connectionIsOrWasFullyEstablished = True
if not self.initiatedConnection:
self.emit(SIGNAL("setStatusIcon(PyQt_PyObject)"),'green')
#Update the 'Network Status' tab
connectionsCountLock.acquire()
connectionsCount[self.streamNumber] += 1
self.emit(SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"),self.streamNumber,connectionsCount[self.streamNumber])
connectionsCountLock.release()
remoteNodeIncomingPort, remoteNodeSeenTime = knownNodes[self.streamNumber][self.HOST]
printLock.acquire()
print 'Connection fully established with', self.HOST, remoteNodeIncomingPort
print 'broadcasting addr from within connectionFullyEstablished function.'
printLock.release()
self.broadcastaddr([(int(time.time()), self.streamNumber, 1, self.HOST, remoteNodeIncomingPort)]) #This lets all of our peers know about this new node.
self.sendaddr() #This is one large addr message to this one peer.
if connectionsCount[self.streamNumber] > 150:
printLock.acquire()
print 'We are connected to too many people. Closing connection.'
printLock.release()
self.sock.close()
return
self.sendBigInv()
def sendBigInv(self): #I used capitals in for this function name because there is no such Bitmessage command as 'biginv'.
if self.receivedgetbiginv:
print 'We have already sent a big inv message to this peer. Ignoring request.'
return
else:
self.receivedgetbiginv = True
sqlLock.acquire()
#Select all hashes which are younger than two days old and in this stream.
t = (int(time.time())-maximumAgeOfObjectsThatIAdvertiseToOthers,self.streamNumber)
sqlSubmitQueue.put('''SELECT hash FROM inventory WHERE receivedtime>? and streamnumber=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
bigInvList = {}
for row in queryreturn:
hash, = row
if hash not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
bigInvList[hash] = 0
else:
printLock.acquire()
print 'Not including an object hash in a big inv message because the remote node is already aware of it.'#This line is here to check that this feature is working.
printLock.release()
#We also have messages in our inventory in memory (which is a python dictionary). Let's fetch those too.
for hash, storedValue in inventory.items():
if hash not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
objectType, streamNumber, payload, receivedTime = storedValue
if streamNumber == self.streamNumber and receivedTime > int(time.time())-maximumAgeOfObjectsThatIAdvertiseToOthers:
bigInvList[hash] = 0
else:
printLock.acquire()
print 'Not including an object hash in a big inv message because the remote node is already aware of it.'#This line is here to check that this feature is working.
printLock.release()
numberOfObjectsInInvMessage = 0
payload = ''
#Now let us start appending all of these hashes together. They will be sent out in a big inv message to our new peer.
for hash, storedValue in bigInvList.items():
payload += hash
numberOfObjectsInInvMessage += 1
if numberOfObjectsInInvMessage >= 50000: #We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
self.sendinvMessageToJustThisOnePeer(numberOfObjectsInInvMessage,payload)
payload = ''
numberOfObjectsInInvMessage = 0
if numberOfObjectsInInvMessage > 0:
self.sendinvMessageToJustThisOnePeer(numberOfObjectsInInvMessage,payload)
#Self explanatory. Notice that there is also a broadcastinv function for broadcasting invs to everyone in our stream.
def sendinvMessageToJustThisOnePeer(self,numberOfObjects,payload):
payload = encodeVarint(numberOfObjects) + payload
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
printLock.acquire()
print 'Sending huge inv message with', numberOfObjects, 'objects to just this one peer'
printLock.release()
self.sock.send(headerData + payload)
#We have received a broadcast message
def recbroadcast(self):
self.messageProcessingStartTime = time.time()
#First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in broadcast message insufficient.'
return
embeddedTime, = unpack('>I',self.data[32:36])
if embeddedTime > (int(time.time())+10800): #prevent funny business
print 'The embedded time in this broadcast message is more than three hours in the future. That doesn\'t make sense. Ignoring message.'
return
if embeddedTime < (int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept):
print 'The embedded time in this broadcast message is too old. Ignoring message.'
return
if self.payloadLength < 66: #todo: When version 1 addresses are completely abandoned, this should be changed to 180
print 'The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.'
return
inventoryLock.acquire()
self.inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
if self.inventoryHash in inventory:
print 'We have already received this broadcast object. Ignoring.'
inventoryLock.release()
return
elif isInSqlInventory(self.inventoryHash):
print 'We have already received this broadcast object (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
#It is valid so far. Let's let our peers know about it.
objectType = 'broadcast'
inventory[self.inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
self.broadcastinv(self.inventoryHash)
self.emit(SIGNAL("incrementNumberOfBroadcastsProcessed()"))
self.processbroadcast()#When this function returns, we will have either successfully processed this broadcast because we are interested in it, ignored it because we aren't interested in it, or found problem with the broadcast that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are mostly the same values used for msg messages although broadcast messages are processed faster.
if self.payloadLength > 100000000: #Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds.
elif self.payloadLength > 10000000: #Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds.
elif self.payloadLength > 1000000: #Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds.
else: #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .1 #seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.messageProcessingStartTime)
if sleepTime > 0:
printLock.acquire()
print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
printLock.release()
time.sleep(sleepTime)
printLock.acquire()
print 'Total message processing time:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
#A broadcast message has a valid time and POW and requires processing. The recbroadcast function calls this one.
def processbroadcast(self):
readPosition = 36
broadcastVersion, broadcastVersionLength = decodeVarint(self.data[readPosition:readPosition+9])
if broadcastVersion <> 1:
#Cannot decode incoming broadcast versions higher than 1. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
return
readPosition += broadcastVersionLength
beginningOfPubkeyPosition = readPosition #used when we add the pubkey to our pubkey table
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersAddressVersion <= 1 or sendersAddressVersion >=3:
#Cannot decode senderAddressVersion higher than 2. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
return
readPosition += sendersAddressVersionLength
if sendersAddressVersion == 2:
sendersStream, sendersStreamLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersStream <= 0 or sendersStream <> self.streamNumber:
return
readPosition += sendersStreamLength
behaviorBitfield = self.data[readPosition:readPosition+4]
readPosition += 4
sendersPubSigningKey = '\x04' + self.data[readPosition:readPosition+64]
readPosition += 64
sendersPubEncryptionKey = '\x04' + self.data[readPosition:readPosition+64]
readPosition += 64
endOfPubkeyPosition = readPosition
sendersHash = self.data[readPosition:readPosition+20]
if sendersHash not in broadcastSendersForWhichImWatching:
#Display timing data
printLock.acquire()
print 'Time spent deciding that we are not interested in this broadcast:', time.time()- self.messageProcessingStartTime
printLock.release()
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition += 20
sha = hashlib.new('sha512')
sha.update(sendersPubSigningKey+sendersPubEncryptionKey)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
if ripe.digest() != sendersHash:
#The sender of this message lied.
return
messageEncodingType, messageEncodingTypeLength = decodeVarint(self.data[readPosition:readPosition+9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += messageLengthLength
message = self.data[readPosition:readPosition+messageLength]
readPosition += messageLength
readPositionAtBottomOfMessage = readPosition
signatureLength, signatureLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += signatureLengthLength
signature = self.data[readPosition:readPosition+signatureLength]
try:
highlevelcrypto.verify(self.data[36:readPositionAtBottomOfMessage],signature,sendersPubSigningKey.encode('hex'))
print 'ECDSA verify passed'
except Exception, err:
print 'ECDSA verify failed', err
return
#verify passed
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce or time (which would let us send out a pubkey message) so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+'\xFF\xFF\xFF\xFF'+self.data[beginningOfPubkeyPosition:endOfPubkeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('newpubkey',(sendersAddressVersion,sendersStream,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
fromAddress = encodeAddress(sendersAddressVersion,sendersStream,ripe.digest())
print 'fromAddress:', fromAddress
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#If we are behaving as an API then we might need to run an outside command to let some program know that a new message has arrived.
if safeConfigGetBoolean('bitmessagesettings','apienabled'):
try:
apiNotifyPath = config.get('bitmessagesettings','apinotifypath')
except:
apiNotifyPath = ''
if apiNotifyPath != '':
call([apiNotifyPath, "newBroadcast"])
#Display timing data
printLock.acquire()
print 'Time spent processing this interesting broadcast:', time.time()- self.messageProcessingStartTime
printLock.release()
"""elif sendersAddressVersion == 1:
sendersStream, sendersStreamLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersStream <= 0:
return
readPosition += sendersStreamLength
sendersHash = self.data[readPosition:readPosition+20]
if sendersHash not in broadcastSendersForWhichImWatching:
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition += 20
nLength, nLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
if nLength < 1:
return
readPosition += nLengthLength
nString = self.data[readPosition:readPosition+nLength]
readPosition += nLength
eLength, eLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
if eLength < 1:
return
readPosition += eLengthLength
eString = self.data[readPosition:readPosition+eLength]
#We are now ready to hash the public key and verify that its hash matches the hash claimed in the message
readPosition += eLength
sha = hashlib.new('sha512')
sha.update(nString+eString)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
if ripe.digest() != sendersHash:
#The sender of this message lied.
return
readPositionAtBeginningOfMessageEncodingType = readPosition
messageEncodingType, messageEncodingTypeLength = decodeVarint(self.data[readPosition:readPosition+9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += messageLengthLength
message = self.data[readPosition:readPosition+messageLength]
readPosition += messageLength
signature = self.data[readPosition:readPosition+nLength]
sendersPubkey = rsa.PublicKey(convertStringToInt(nString),convertStringToInt(eString))
#print 'senders Pubkey', sendersPubkey
try:
rsa.verify(self.data[readPositionAtBeginningOfMessageEncodingType:readPositionAtBeginningOfMessageEncodingType+messageEncodingTypeLength+messageLengthLength+messageLength],signature,sendersPubkey)
print 'verify passed'
except Exception, err:
print 'verify failed', err
return
#verify passed
fromAddress = encodeAddress(sendersAddressVersion,sendersStream,ripe.digest())
print 'fromAddress:', fromAddress
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)"""
#We have received a msg message.
def recmsg(self):
self.messageProcessingStartTime = time.time()
#First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in msg message insufficient.'
return
readPosition = 32
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
if embeddedTime > int(time.time())+10800:
print 'The time in the msg message is too new. Ignoring it. Time:', embeddedTime
return
if embeddedTime < int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in the msg message is too old. Ignoring it. Time:', embeddedTime
return
readPosition += 4
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint(self.data[readPosition:readPosition+9])
if streamNumberAsClaimedByMsg != self.streamNumber:
print 'The stream number encoded in this msg (' + str(streamNumberAsClaimedByMsg) + ') message does not match the stream number on which it was received. Ignoring it.'
return
readPosition += streamNumberAsClaimedByMsgLength
self.inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if self.inventoryHash in inventory:
print 'We have already received this msg message. Ignoring.'
inventoryLock.release()
return
elif isInSqlInventory(self.inventoryHash):
print 'We have already received this msg message (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
#This msg message is valid. Let's let our peers know about it.
objectType = 'msg'
inventory[self.inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
self.broadcastinv(self.inventoryHash)
self.emit(SIGNAL("incrementNumberOfMessagesProcessed()"))
self.processmsg(readPosition) #When this function returns, we will have either successfully processed the message bound for us, ignored it because it isn't bound for us, or found problem with the message that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are based on test timings and you may change them at-will.
if self.payloadLength > 100000000: #Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 MB message: 3.7 seconds.
elif self.payloadLength > 10000000: #Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 10 MB message: 0.53 seconds. Actual length of time it takes in practice when processing a real message: 1.44 seconds.
elif self.payloadLength > 1000000: #Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 1 MB message: 0.18 seconds. Actual length of time it takes in practice when processing a real message: 0.30 seconds.
else: #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .6 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 KB message: 0.15 seconds. Actual length of time it takes in practice when processing a real message: 0.25 seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.messageProcessingStartTime)
if sleepTime > 0:
printLock.acquire()
print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
printLock.release()
time.sleep(sleepTime)
printLock.acquire()
print 'Total message processing time:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
#This section is for my RSA keys (version 1 addresses). If we don't have any version 1 addresses it will never run. This code will soon be removed.
"""initialDecryptionSuccessful = False
infile = cStringIO.StringIO(self.data[readPosition:self.payloadLength+24])
outfile = cStringIO.StringIO()
#print 'len(myRSAAddressHashes.items()):', len(myRSAAddressHashes.items())
for key, value in myRSAAddressHashes.items():
try:
decrypt_bigfile(infile, outfile, value)
#The initial decryption passed though there is a small chance that the message isn't actually for me. We'll need to check that the 20 zeros are present.
#print 'initial decryption successful using key', repr(key)
initialDecryptionSuccessful = True
printLock.acquire()
print 'Initial decryption passed'
printLock.release()
break
except Exception, err:
infile.seek(0)
#print 'Exception:', err
#print 'outfile len is:', len(outfile.getvalue()),'data is:', repr(outfile.getvalue())
#print 'Initial decryption failed using key', value
#decryption failed for this key. The message is for someone else (or for a different key of mine).
if initialDecryptionSuccessful and outfile.getvalue()[:20] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00': #this run of 0s allows the true message receiver to identify his message
#This is clearly a message bound for me.
outfile.seek(0)
data = outfile.getvalue()
readPosition = 20 #To start reading past the 20 zero bytes
messageVersion, messageVersionLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageVersionLength
if messageVersion == 1:
bitfieldBehavior = data[readPosition:readPosition+4]
readPosition += 4
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersAddressVersionNumber == 1:
readPosition += sendersAddressVersionNumberLength
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersStreamNumber == 0:
print 'sendersStreamNumber = 0. Ignoring message'
else:
readPosition += sendersStreamNumberLength
sendersNLength, sendersNLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersNLengthLength
sendersN = data[readPosition:readPosition+sendersNLength]
readPosition += sendersNLength
sendersELength, sendersELengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersELengthLength
sendersE = data[readPosition:readPosition+sendersELength]
readPosition += sendersELength
endOfThePublicKeyPosition = readPosition
messageEncodingType, messageEncodingTypeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageEncodingTypeLength
print 'Message Encoding Type:', messageEncodingType
messageLength, messageLengthLength = decodeVarint(data[readPosition:readPosition+10])
print 'message length:', messageLength
readPosition += messageLengthLength
message = data[readPosition:readPosition+messageLength]
#print 'First 150 characters of message:', repr(message[:150])
readPosition += messageLength
ackLength, ackLengthLength = decodeVarint(data[readPosition:readPosition+10])
#print 'ackLength:', ackLength
readPosition += ackLengthLength
ackData = data[readPosition:readPosition+ackLength]
readPosition += ackLength
payloadSigniture = data[readPosition:readPosition+sendersNLength] #We're using the length of the sender's n because it should match the signiture size.
sendersPubkey = rsa.PublicKey(convertStringToInt(sendersN),convertStringToInt(sendersE))
print 'sender\'s Pubkey', sendersPubkey
#Check the cryptographic signiture
verifyPassed = False
try:
rsa.verify(data[:-len(payloadSigniture)],payloadSigniture, sendersPubkey)
print 'verify passed'
verifyPassed = True
except Exception, err:
print 'verify failed', err
if verifyPassed:
#calculate the fromRipe.
sha = hashlib.new('sha512')
sha.update(sendersN+sendersE)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce in order to send out a pubkey message so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+data[20+messageVersionLength:endOfThePublicKeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
fromAddress = encodeAddress(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest())
if config.get('bitmessagesettings', 'blackwhitelist') == 'black': #If we are using a blacklist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM blacklist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
label, enabled = row
if enabled:
print 'Message ignored because address is in blacklist.'
blockMessage = True
else: #We're using a whitelist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
print 'Message ignored because address not in whitelist.'
blockMessage = True
for row in queryreturn: #It could be in the whitelist but disabled. Let's check.
label, enabled = row
if not enabled:
print 'Message ignored because address in whitelist but not enabled.'
blockMessage = True
if not blockMessage:
print 'fromAddress:', fromAddress
print 'First 150 characters of message:', repr(message[:150])
#Look up the destination address (my address) based on the destination ripe hash.
#I realize that I could have a data structure devoted to this task, or maintain an indexed table
#in the sql database, but I would prefer to minimize the number of data structures this program
#uses. Searching linearly through the user's short list of addresses doesn't take very long anyway.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if hash == key:
toAddress = addressInKeysFile
toLabel = config.get(addressInKeysFile, 'label')
if toLabel == '':
toLabel = addressInKeysFile
break
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
print 'within recmsg, self.inventoryHash is', repr(self.inventoryHash)
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#Now let us worry about the acknowledgement data
#We'll need to make sure that our client will properly process the ackData; if the packet is malformed, it might cause us to clear out self.data and an attacker could use that behavior to determine that we decoded this message.
ackDataValidThusFar = True
if len(ackData) < 24:
print 'The length of ackData is unreasonably short. Not sending ackData.'
ackDataValidThusFar = False
if ackData[0:4] != '\xe9\xbe\xb4\xd9':
print 'Ackdata magic bytes were wrong. Not sending ackData.'
ackDataValidThusFar = False
if ackDataValidThusFar:
ackDataPayloadLength, = unpack('>L',ackData[16:20])
if len(ackData)-24 != ackDataPayloadLength: #This ackData includes the protocol header which is not counted in the payload length.
print 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.'
ackDataValidThusFar = False
if ackDataValidThusFar:
print 'ackData is valid. Will process it.'
self.ackDataThatWeHaveYetToSend.append(ackData) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
else:
print 'This program cannot decode messages from addresses with versions higher than 1. Ignoring.'
statusbar = 'This program cannot decode messages from addresses with versions higher than 1. Ignoring it.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
else:
statusbar = 'Error: Cannot decode incoming msg versions higher than 1. Assuming the sender isn\' being silly, you should upgrade Bitmessage. Ignoring message.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
else:
printLock.acquire()
print 'Could not decrypt with any RSA keys if you have any.'
printLock.release()
infile.close()
outfile.close()"""
#A msg message has a valid time and POW and requires processing. The recmsg function calls this one.
def processmsg(self,readPosition):
initialDecryptionSuccessful = False
#Let's check whether this is a message acknowledgement bound for us.
if self.data[readPosition:24+self.payloadLength] in ackdataForWhichImWatching:
printLock.acquire()
print 'This msg IS an acknowledgement bound for me.'
printLock.release()
del ackdataForWhichImWatching[self.data[readPosition:24+self.payloadLength]]
t = ('ackreceived',self.data[readPosition:24+self.payloadLength])
sqlLock.acquire()
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE ackdata=?')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),self.data[readPosition:24+self.payloadLength],'Acknowledgement of the message received just now.')
return
else:
printLock.acquire()
print 'This was NOT an acknowledgement bound for me.' #Msg potential ack data:', repr(self.data[readPosition:24+self.payloadLength])
#print 'ackdataForWhichImWatching', ackdataForWhichImWatching
printLock.release()
#This is not an acknowledgement bound for me. See if it is a message bound for me by trying to decrypt it with my private keys.
for key, cryptorObject in myECAddressHashes.items():
try:
data = cryptorObject.decrypt(self.data[readPosition:self.payloadLength+24])
toRipe = key #This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data.
initialDecryptionSuccessful = True
print 'EC decryption successful using key associated with ripe hash:', key.encode('hex')
break
except Exception, err:
pass
#print 'cryptorObject.decrypt Exception:', err
if not initialDecryptionSuccessful:
#This is not a message bound for me.
printLock.acquire()
print 'Length of time program spent failing to decrypt this message:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
else:
#This is a message bound for me.
readPosition = 0
messageVersion, messageVersionLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageVersionLength
if messageVersion != 1:
print 'Cannot understand message versions other than one. Ignoring message.'
return
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersAddressVersionNumberLength
if sendersAddressVersionNumber == 0:
print 'Cannot understand sendersAddressVersionNumber = 0. Ignoring message.'
return
if sendersAddressVersionNumber >= 3:
print 'Sender\'s address version number', sendersAddressVersionNumber, ' not yet supported. Ignoring message.'
return
if len(data) < 170:
print 'Length of the unencrypted data is unreasonably short. Sanity check failed. Ignoring message.'
return
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersStreamNumber == 0:
print 'sender\'s stream number is 0. Ignoring message.'
return
readPosition += sendersStreamNumberLength
behaviorBitfield = data[readPosition:readPosition+4]
readPosition += 4
pubSigningKey = '\x04' + data[readPosition:readPosition+64]
readPosition += 64
pubEncryptionKey = '\x04' + data[readPosition:readPosition+64]
readPosition += 64
endOfThePublicKeyPosition = readPosition #needed for when we store the pubkey in our database of pubkeys for later use.
if toRipe != data[readPosition:readPosition+20]:
printLock.acquire()
print 'The original sender of this message did not send it to you. Someone is attempting a Surreptitious Forwarding Attack.'
print 'See: http://tools.ietf.org/html/draft-ietf-smime-sender-auth-00'
print 'your toRipe:', toRipe.encode('hex')
print 'embedded destination toRipe:', data[readPosition:readPosition+20].encode('hex')
printLock.release()
return
readPosition += 20
messageEncodingType, messageEncodingTypeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageLengthLength
message = data[readPosition:readPosition+messageLength]
#print 'First 150 characters of message:', repr(message[:150])
readPosition += messageLength
ackLength, ackLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += ackLengthLength
ackData = data[readPosition:readPosition+ackLength]
readPosition += ackLength
positionOfBottomOfAckData = readPosition #needed to mark the end of what is covered by the signature
signatureLength, signatureLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += signatureLengthLength
signature = data[readPosition:readPosition+signatureLength]
try:
highlevelcrypto.verify(data[:positionOfBottomOfAckData],signature,pubSigningKey.encode('hex'))
print 'ECDSA verify passed'
except Exception, err:
print 'ECDSA verify failed', err
return
printLock.acquire()
print 'As a matter of intellectual curiosity, here is the Bitcoin address associated with the keys owned by the other person:', calculateBitcoinAddressFromPubkey(pubSigningKey), ' ..and here is the testnet address:',calculateTestnetAddressFromPubkey(pubSigningKey),'. The other person must take their private signing key from Bitmessage and import it into Bitcoin (or a service like Blockchain.info) for it to be of any use. Do not use this unless you know what you are doing.'
printLock.release()
#calculate the fromRipe.
sha = hashlib.new('sha512')
sha.update(pubSigningKey+pubEncryptionKey)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce or time (which would let us send out a pubkey message) so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+'\xFF\xFF\xFF\xFF'+data[messageVersionLength:endOfThePublicKeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('newpubkey',(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
fromAddress = encodeAddress(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest())
if config.get('bitmessagesettings', 'blackwhitelist') == 'black': #If we are using a blacklist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM blacklist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
label, enabled = row
if enabled:
printLock.acquire()
print 'Message ignored because address is in blacklist.'
printLock.release()
blockMessage = True
else: #We're using a whitelist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
print 'Message ignored because address not in whitelist.'
blockMessage = True
for row in queryreturn: #It could be in the whitelist but disabled. Let's check.
label, enabled = row
if not enabled:
print 'Message ignored because address in whitelist but not enabled.'
blockMessage = True
if not blockMessage:
print 'fromAddress:', fromAddress
print 'First 150 characters of message:', repr(message[:150])
#Look up the destination address (my address) based on the destination ripe hash.
#I realize that I could have a data structure devoted to this task, or maintain an indexed table
#in the sql database, but I would prefer to minimize the number of data structures this program
#uses. Searching linearly through the user's short list of addresses doesn't take very long anyway.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if hash == key:
toAddress = addressInKeysFile
toLabel = config.get(addressInKeysFile, 'label')
if toLabel == '':
toLabel = addressInKeysFile
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#If we are behaving as an API then we might need to run an outside command to let some program know that a new message has arrived.
if safeConfigGetBoolean('bitmessagesettings','apienabled'):
try:
apiNotifyPath = config.get('bitmessagesettings','apinotifypath')
except:
apiNotifyPath = ''
if apiNotifyPath != '':
call([apiNotifyPath, "newMessage"])
#Let us now check and see whether our receiving address is behaving as a mailing list
if safeConfigGetBoolean(toAddress,'mailinglist'):
try:
mailingListName = config.get(toAddress, 'mailinglistname')
except:
mailingListName = ''
#Let us send out this message as a broadcast
subject = self.addMailingListNameToSubject(subject,mailingListName)
#Let us now send this message out as a broadcast
message = 'Message ostensibly from ' + fromAddress + ':\n\n' + body
fromAddress = toAddress #The fromAddress for the broadcast is the toAddress (my address) for the msg message we are currently processing.
ackdata = OpenSSL.rand(32) #We don't actually need the ackdata for acknowledgement since this is a broadcast message but we can use it to update the user interface when the POW is done generating.
toAddress = '[Broadcast subscribers]'
ripe = ''
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'broadcastpending',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),toAddress,'[Broadcast subscribers]',fromAddress,subject,message,ackdata)
workerQueue.put(('sendbroadcast',(fromAddress,subject,message)))
#Now let's consider sending the acknowledgement. We'll need to make sure that our client will properly process the ackData; if the packet is malformed, we could clear out self.data and an attacker could use that behavior to determine that we were capable of decoding this message.
ackDataValidThusFar = True
if len(ackData) < 24:
print 'The length of ackData is unreasonably short. Not sending ackData.'
ackDataValidThusFar = False
elif ackData[0:4] != '\xe9\xbe\xb4\xd9':
print 'Ackdata magic bytes were wrong. Not sending ackData.'
ackDataValidThusFar = False
if ackDataValidThusFar:
ackDataPayloadLength, = unpack('>L',ackData[16:20])
if len(ackData)-24 != ackDataPayloadLength:
print 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.'
ackDataValidThusFar = False
if ackDataValidThusFar:
print 'ackData is valid. Will process it.'
self.ackDataThatWeHaveYetToSend.append(ackData) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
#Display timing data
timeRequiredToAttemptToDecryptMessage = time.time()- self.messageProcessingStartTime
successfullyDecryptMessageTimings.append(timeRequiredToAttemptToDecryptMessage)
sum = 0
for item in successfullyDecryptMessageTimings:
sum += item
printLock.acquire()
print 'Time to decrypt this message successfully:', timeRequiredToAttemptToDecryptMessage
print 'Average time for all message decryption successes since startup:', sum / len(successfullyDecryptMessageTimings)
printLock.release()
def addMailingListNameToSubject(self,subject,mailingListName):
subject = subject.strip()
if subject[:3] == 'Re:' or subject[:3] == 'RE:':
subject = subject[3:].strip()
if '['+mailingListName+']' in subject:
return subject
else:
return '['+mailingListName+'] ' + subject
#We have received a pubkey
def recpubkey(self):
self.pubkeyProcessingStartTime = time.time()
if self.payloadLength < 146: #sanity check
return
#We must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in pubkey message insufficient.'
return
readPosition = 24 #for the message header
readPosition += 8 #for the nonce
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
if embeddedTime < int(time.time())-lengthOfTimeToHoldOnToAllPubkeys-86400: #If the pubkey is more than a month old then reject it. (the 86400 is included to give an extra day of wiggle-room. If the wiggle-room is actually of any use, everyone on the network will delete this pubkey from their database the next time the cleanerThread cleans anyway- except for the node that actually wants the pubkey.)
printLock.acquire()
print 'The embedded time in this pubkey message is too old. Ignoring. Embedded time is:', embeddedTime
printLock.release()
return
if embeddedTime > int(time.time()) + 10800:
printLock.acquire()
print 'The embedded time in this pubkey message more than several hours in the future. This is irrational. Ignoring message.'
printLock.release()
return
readPosition += 4 #for the time
addressVersion, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
if self.streamNumber != streamNumber:
print 'stream number embedded in this pubkey doesn\'t match our stream number. Ignoring.'
return
inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if inventoryHash in inventory:
print 'We have already received this pubkey. Ignoring it.'
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
print 'We have already received this pubkey (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], int(time.time()))
inventoryLock.release()
self.broadcastinv(inventoryHash)
self.emit(SIGNAL("incrementNumberOfPubkeysProcessed()"))
self.processpubkey()
lengthOfTimeWeShouldUseToProcessThisMessage = .2
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.pubkeyProcessingStartTime)
if sleepTime > 0:
#printLock.acquire()
#print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
#printLock.release()
time.sleep(sleepTime)
#printLock.acquire()
#print 'Total pubkey processing time:', time.time()- self.pubkeyProcessingStartTime, 'seconds.'
#printLock.release()
def processpubkey(self):
readPosition = 24 #for the message header
readPosition += 8 #for the nonce
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
readPosition += 4 #for the time
addressVersion, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
if addressVersion == 0:
print '(Within processpubkey) addressVersion of 0 doesn\'t make sense.'
return
if addressVersion >= 3 or addressVersion == 1:
printLock.acquire()
print 'This version of Bitmessage cannot handle version', addressVersion,'addresses.'
printLock.release()
return
if addressVersion == 2:
if self.payloadLength < 146: #sanity check. This is the minimum possible length.
print 'payloadLength less than 146. Sanity check failed.'
return
bitfieldBehaviors = self.data[readPosition:readPosition+4]
readPosition += 4
publicSigningKey = self.data[readPosition:readPosition+64]
#Is it possible for a public key to be invalid such that trying to encrypt or sign with it will cause an error? If it is, we should probably test these keys here.
readPosition += 64
publicEncryptionKey = self.data[readPosition:readPosition+64]
if len(publicEncryptionKey) < 64:
print 'publicEncryptionKey length less than 64. Sanity check failed.'
return
sha = hashlib.new('sha512')
sha.update('\x04'+publicSigningKey+'\x04'+publicEncryptionKey)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
ripe = ripeHasher.digest()
printLock.acquire()
print 'within recpubkey, addressVersion:', addressVersion, ', streamNumber:', streamNumber
print 'ripe', ripe.encode('hex')
print 'publicSigningKey in hex:', publicSigningKey.encode('hex')
print 'publicEncryptionKey in hex:', publicEncryptionKey.encode('hex')
printLock.release()
t = (ripe,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally='yes' ''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []: #if this pubkey is already in our database and if we have used it personally:
print 'We HAVE used this pubkey personally. Updating time.'
t = (ripe,True,self.data[24:24+self.payloadLength],embeddedTime,'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
else:
print 'We have NOT used this pubkey personally. Inserting in database.'
t = (ripe,True,self.data[24:24+self.payloadLength],embeddedTime,'no') #This will also update the embeddedTime.
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
#This code which deals with old RSA addresses will soon be removed.
"""elif addressVersion == 1:
nLength, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
nString = self.data[readPosition:readPosition+nLength]
readPosition += nLength
eLength, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
eString = self.data[readPosition:readPosition+eLength]
readPosition += eLength
sha = hashlib.new('sha512')
sha.update(nString+eString)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
ripe = ripeHasher.digest()
print 'within recpubkey, addressVersion', addressVersion
print 'streamNumber', streamNumber
print 'ripe', repr(ripe)
print 'n=', convertStringToInt(nString)
print 'e=', convertStringToInt(eString)
t = (ripe,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally='yes' ''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []: #if this pubkey is already in our database and if we have used it personally:
print 'We HAVE used this pubkey personally. Updating time.'
t = (ripe,True,self.data[24:24+self.payloadLength],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
else:
print 'We have NOT used this pubkey personally. Inserting in database.'
t = (ripe,True,self.data[24:24+self.payloadLength],int(time.time()),'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))"""
#We have received a getpubkey message
def recgetpubkey(self):
if not self.isProofOfWorkSufficient():
print 'Proof of work in getpubkey message insufficient.'
return
embeddedTime, = unpack('>I',self.data[32:36])
if embeddedTime > int(time.time())+10800:
print 'The time in this getpubkey message is too new. Ignoring it. Time:', embeddedTime
return
if embeddedTime < int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in this getpubkey message is too old. Ignoring it. Time:', embeddedTime
return
addressVersionNumber, addressVersionLength = decodeVarint(self.data[36:42])
streamNumber, streamNumberLength = decodeVarint(self.data[36+addressVersionLength:42+addressVersionLength])
if streamNumber <> self.streamNumber:
print 'The streamNumber', streamNumber, 'doesn\'t match our stream number:', self.streamNumber
return
inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if inventoryHash in inventory:
print 'We have already received this getpubkey request. Ignoring it.'
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
print 'We have already received this getpubkey request (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[inventoryHash] = 0
objectType = 'getpubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
#This getpubkey request is valid so far. Forward to peers.
self.broadcastinv(inventoryHash)
if addressVersionNumber == 0:
print 'The addressVersionNumber of the pubkey request is zero. That doesn\'t make any sense. Ignoring it.'
return
elif addressVersionNumber == 1:
print 'The addressVersionNumber of the pubkey request is 1 which isn\'t supported anymore. Ignoring it.'
return
elif addressVersionNumber > 2:
print 'The addressVersionNumber of the pubkey request is too high. Can\'t understand. Ignoring it.'
return
print 'the hash requested in this getpubkey request is:', self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength].encode('hex')
sqlLock.acquire()
t = (self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength],int(time.time())-lengthOfTimeToHoldOnToAllPubkeys) #this prevents SQL injection
sqlSubmitQueue.put('''SELECT hash, transmitdata, time FROM pubkeys WHERE hash=? AND havecorrectnonce=1 AND time>?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []:
for row in queryreturn:
hash, payload, timeEncodedInPubkey = row
printLock.acquire()
print 'We have the requested pubkey stored in our database of pubkeys. Sending it.'
printLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, payload, timeEncodedInPubkey)#If the time embedded in this pubkey is more than 3 days old then this object isn't going to last very long in the inventory- the cleanerThread is going to come along and move it from the inventory in memory to the SQL inventory and then delete it from the SQL inventory. It should still find its way back to the original requestor if he is online however.
self.broadcastinv(inventoryHash)
else: #the pubkey is not in our database of pubkeys. Let's check if the requested key is ours (which would mean we should do the POW, put it in the pubkey table, and broadcast out the pubkey.)
if self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength] in myECAddressHashes: #if this address hash is one of mine
printLock.acquire()
print 'Found getpubkey-requested-hash in my list of EC hashes. Telling Worker thread to do the POW for a pubkey message and send it out.'
printLock.release()
myAddress = encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength])
workerQueue.put(('doPOWForMyV2Pubkey',myAddress))
#This code which deals with old RSA addresses will soon be removed.
"""elif self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength] in myRSAAddressHashes:
print 'Found getpubkey requested hash in my list of RSA hashes.'
payload = '\x00\x00\x00\x01' #bitfield of features supported by me (see the wiki).
payload += self.data[36:36+addressVersionLength+streamNumberLength]
#print int(config.get(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'n'))
nString = convertIntToString(int(config.get(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'n')))
eString = convertIntToString(config.getint(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'e'))
payload += encodeVarint(len(nString))
payload += nString
payload += encodeVarint(len(eString))
payload += eString
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
t = (self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength],True,payload,int(time.time())+1209600) #after two weeks (1,209,600 seconds), we may remove our own pub key from our database. It will be regenerated and put back in the database if it is requested.
sqlLock.acquire()
#** pubkeys insert query not yet fixed! **
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, payload, int(time.time()))
self.broadcastinv(inventoryHash) """
else:
printLock.acquire()
print 'This getpubkey request is not for any of my keys.'
printLock.release()
#We have received an inv message
def recinv(self):
numberOfItemsInInv, lengthOfVarint = decodeVarint(self.data[24:34])
if numberOfItemsInInv == 1: #we'll just request this data from the person who advertised the object.
for i in range(numberOfItemsInInv):
if len(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]) == 32: #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
if self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)] in inventory:
printLock.acquire()
print 'Inventory (in memory) has inventory item already.'
printLock.release()
elif isInSqlInventory(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]):
print 'Inventory (SQL on disk) has inventory item already.'
else:
self.sendgetdata(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)])
else:
print 'inv message lists', numberOfItemsInInv, 'objects.'
for i in range(numberOfItemsInInv): #upon finishing dealing with an incoming message, the receiveDataThread will request a random object from the peer. This way if we get multiple inv messages from multiple peers which list mostly the same objects, we will make getdata requests for different random objects from the various peers.
if len(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]) == 32: #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
#Send a getdata message to our peer to request the object with the given hash
def sendgetdata(self,hash):
print 'sending getdata to retrieve object with hash:', hash.encode('hex')
payload = '\x01' + hash
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getdata\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
try:
self.sock.send(headerData + payload)
except Exception, err:
if not 'Bad file descriptor' in err:
printLock.acquire()
sys.stderr.write('sock.send error: %s\n' % err)
printLock.release()
#We have received a getdata request from our peer
def recgetdata(self):
value, lengthOfVarint = decodeVarint(self.data[24:34])
#print 'Number of items in getdata request:', value
try:
for i in xrange(value):
hash = self.data[24+lengthOfVarint+(i*32):56+lengthOfVarint+(i*32)]
printLock.acquire()
print 'received getdata request for item:', hash.encode('hex')
printLock.release()
#print 'inventory is', inventory
if hash in inventory:
objectType, streamNumber, payload, receivedTime = inventory[hash]
self.sendData(objectType,payload)
else:
t = (hash,)
sqlLock.acquire()
sqlSubmitQueue.put('''select objecttype, payload from inventory where hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
objectType, payload = row
self.sendData(objectType,payload)
else:
print 'Someone asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. That shouldn\'t have happened.'
except:
pass #someone is probably trying to cause a program error by, for example, making a request for 10 items but only including the hashes for 5.
#Our peer has requested (in a getdata message) that we send an object.
def sendData(self,objectType,payload):
if objectType == 'pubkey':
print 'sending pubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'pubkey\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'getpubkey':
print 'sending getpubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getpubkey\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'msg':
print 'sending msg'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'broadcast':
print 'sending broadcast'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'broadcast\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'getpubkey' or objectType == 'pubkeyrequest':
print 'sending getpubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getpubkey\x00\x00\x00' #version command
headerData += pack('>L',len(payload)) #payload length
headerData += hashlib.sha512(payload).digest()[0:4]
self.sock.send(headerData + payload)
else:
sys.stderr.write('Error: sendData has been asked to send a strange objectType: %s\n' % str(objectType))
#Send an inv message with just one hash to all of our peers
def broadcastinv(self,hash):
printLock.acquire()
print 'broadcasting inv with hash:', hash.encode('hex')
printLock.release()
broadcastToSendDataQueues((self.streamNumber, 'sendinv', hash))
#We have received an addr message.
def recaddr(self):
listOfAddressDetailsToBroadcastToPeers = []
numberOfAddressesIncluded = 0
numberOfAddressesIncluded, lengthOfNumberOfAddresses = decodeVarint(self.data[24:29])
if verbose >= 1:
printLock.acquire()
print 'addr message contains', numberOfAddressesIncluded, 'IP addresses.'
printLock.release()
#print 'lengthOfNumberOfAddresses', lengthOfNumberOfAddresses
if numberOfAddressesIncluded > 1000 or numberOfAddressesIncluded == 0:
return
if self.payloadLength < lengthOfNumberOfAddresses + (34 * numberOfAddressesIncluded):
print 'addr message does not contain enough data. Ignoring.'
return
needToWriteKnownNodesToDisk = False
for i in range(0,numberOfAddressesIncluded):
try:
if self.data[40+lengthOfNumberOfAddresses+(34*i):52+lengthOfNumberOfAddresses+(34*i)] != '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF':
printLock.acquire()
print 'Skipping IPv6 address.', repr(self.data[40+lengthOfNumberOfAddresses+(34*i):56+lengthOfNumberOfAddresses+(34*i)])
printLock.release()
continue
#print repr(self.data[6+lengthOfNumberOfAddresses+(34*i):18+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (to test for an IPv6 address). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrStream, = unpack('>I',self.data[28+lengthOfNumberOfAddresses+(34*i):32+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
if recaddrStream == 0:
continue
if recaddrStream != self.streamNumber and recaddrStream != (self.streamNumber * 2) and recaddrStream != ((self.streamNumber * 2) + 1): #if the embedded stream number is not in my stream or either of my child streams then ignore it. Someone might be trying funny business.
continue
try:
recaddrServices, = unpack('>Q',self.data[32+lengthOfNumberOfAddresses+(34*i):40+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrServices). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrPort, = unpack('>H',self.data[56+lengthOfNumberOfAddresses+(34*i):58+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrPort). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
#print 'Within recaddr(): IP', recaddrIP, ', Port', recaddrPort, ', i', i
hostFromAddrMessage = socket.inet_ntoa(self.data[52+lengthOfNumberOfAddresses+(34*i):56+lengthOfNumberOfAddresses+(34*i)])
#print 'hostFromAddrMessage', hostFromAddrMessage
if self.data[52+lengthOfNumberOfAddresses+(34*i)] == '\x7F':
print 'Ignoring IP address in loopback range:', hostFromAddrMessage
continue
if self.data[52+lengthOfNumberOfAddresses+(34*i)] == '\x0A':
print 'Ignoring IP address in private range:', hostFromAddrMessage
continue
if self.data[52+lengthOfNumberOfAddresses+(34*i):52+lengthOfNumberOfAddresses+(34*i)+2] == '\xC0A8':
print 'Ignoring IP address in private range:', hostFromAddrMessage
continue
timeSomeoneElseReceivedMessageFromThisNode, = unpack('>I',self.data[24+lengthOfNumberOfAddresses+(34*i):28+lengthOfNumberOfAddresses+(34*i)]) #This is the 'time' value in the received addr message.
if recaddrStream not in knownNodes: #knownNodes is a dictionary of dictionaries with one outer dictionary for each stream. If the outer stream dictionary doesn't exist yet then we must make it.
knownNodes[recaddrStream] = {}
if hostFromAddrMessage not in knownNodes[recaddrStream]:
if len(knownNodes[recaddrStream]) < 20000 and timeSomeoneElseReceivedMessageFromThisNode > (int(time.time())-10800) and timeSomeoneElseReceivedMessageFromThisNode < (int(time.time()) + 10800): #If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
knownNodes[recaddrStream][hostFromAddrMessage] = (recaddrPort, timeSomeoneElseReceivedMessageFromThisNode)
print 'added new node', hostFromAddrMessage, 'to knownNodes in stream', recaddrStream
needToWriteKnownNodesToDisk = True
hostDetails = (timeSomeoneElseReceivedMessageFromThisNode, recaddrStream, recaddrServices, hostFromAddrMessage, recaddrPort)
listOfAddressDetailsToBroadcastToPeers.append(hostDetails)
else:
PORT, timeLastReceivedMessageFromThisNode = knownNodes[recaddrStream][hostFromAddrMessage]#PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
if (timeLastReceivedMessageFromThisNode < timeSomeoneElseReceivedMessageFromThisNode) and (timeSomeoneElseReceivedMessageFromThisNode < int(time.time())):
knownNodes[recaddrStream][hostFromAddrMessage] = (PORT, timeSomeoneElseReceivedMessageFromThisNode)
if PORT != recaddrPort:
print 'Strange occurance: The port specified in an addr message', str(recaddrPort),'does not match the port',str(PORT),'that this program (or some other peer) used to connect to it',str(hostFromAddrMessage),'. Perhaps they changed their port or are using a strange NAT configuration.'
if needToWriteKnownNodesToDisk: #Runs if any nodes were new to us. Also, share those nodes with our peers.
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
self.broadcastaddr(listOfAddressDetailsToBroadcastToPeers)
printLock.acquire()
print 'knownNodes currently has', len(knownNodes[self.streamNumber]), 'nodes for this stream.'
printLock.release()
#Function runs when we want to broadcast an addr message to all of our peers. Runs when we learn of nodes that we didn't previously know about and want to share them with our peers.
def broadcastaddr(self,listOfAddressDetailsToBroadcastToPeers):
numberOfAddressesInAddrMessage = len(listOfAddressDetailsToBroadcastToPeers)
payload = ''
for hostDetails in listOfAddressDetailsToBroadcastToPeers:
timeLastReceivedMessageFromThisNode, streamNumber, services, host, port = hostDetails
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',streamNumber)
payload += pack('>q',services) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(host)
payload += pack('>H',port)#remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
datatosend = '\xE9\xBE\xB4\xD9addr\x00\x00\x00\x00\x00\x00\x00\x00'
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
if verbose >= 2:
printLock.acquire()
print 'Broadcasting addr with', numberOfAddressesInAddrMessage, 'entries.'
printLock.release()
broadcastToSendDataQueues((self.streamNumber, 'sendaddr', datatosend))
#Send a big addr message to our peer
def sendaddr(self):
addrsInMyStream = {}
addrsInChildStreamLeft = {}
addrsInChildStreamRight = {}
#print 'knownNodes', knownNodes
#We are going to share a maximum number of 1000 addrs with our peer. 500 from this stream, 250 from the left child stream, and 250 from the right child stream.
if len(knownNodes[self.streamNumber]) > 0:
for i in range(500):
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
if self.isHostInPrivateIPRange(HOST):
continue
addrsInMyStream[HOST] = knownNodes[self.streamNumber][HOST]
if len(knownNodes[self.streamNumber*2]) > 0:
for i in range(250):
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber*2], 1)
if self.isHostInPrivateIPRange(HOST):
continue
addrsInChildStreamLeft[HOST] = knownNodes[self.streamNumber*2][HOST]
if len(knownNodes[(self.streamNumber*2)+1]) > 0:
for i in range(250):
random.seed()
HOST, = random.sample(knownNodes[(self.streamNumber*2)+1], 1)
if self.isHostInPrivateIPRange(HOST):
continue
addrsInChildStreamRight[HOST] = knownNodes[(self.streamNumber*2)+1][HOST]
numberOfAddressesInAddrMessage = 0
payload = ''
#print 'addrsInMyStream.items()', addrsInMyStream.items()
for HOST, value in addrsInMyStream.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',self.streamNumber)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
for HOST, value in addrsInChildStreamLeft.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',self.streamNumber*2)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
for HOST, value in addrsInChildStreamRight.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',(self.streamNumber*2)+1)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
datatosend = '\xE9\xBE\xB4\xD9addr\x00\x00\x00\x00\x00\x00\x00\x00'
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
if verbose >= 2:
printLock.acquire()
print 'Sending addr with', numberOfAddressesInAddrMessage, 'entries.'
printLock.release()
self.sock.send(datatosend)
#We have received a version message
def recversion(self):
if self.payloadLength < 83:
#This version message is unreasonably short. Forget it.
return
elif not self.verackSent: #There is a potential exploit if we don't check to make sure that we have not already received and accepted a version message: An attacker could connect directly to us, send a msg message with the ackdata set to an invalid version message which would cause us to close the connection to the attacker thus proving that we were able to decode the message. Checking the connectionIsOrWasFullyEstablished variable would also suffice.
self.remoteProtocolVersion, = unpack('>L',self.data[24:28])
#print 'remoteProtocolVersion', self.remoteProtocolVersion
self.myExternalIP = socket.inet_ntoa(self.data[64:68])
#print 'myExternalIP', self.myExternalIP
self.remoteNodeIncomingPort, = unpack('>H',self.data[94:96])
#print 'remoteNodeIncomingPort', self.remoteNodeIncomingPort
#print 'self.data[96:104]', repr(self.data[96:104])
#print 'eightBytesOfRandomDataUsedToDetectConnectionsToSelf', repr(eightBytesOfRandomDataUsedToDetectConnectionsToSelf)
useragentLength, lengthOfUseragentVarint = decodeVarint(self.data[104:108])
readPosition = 104 + lengthOfUseragentVarint
useragent = self.data[readPosition:readPosition+useragentLength]
readPosition += useragentLength
numberOfStreamsInVersionMessage, lengthOfNumberOfStreamsInVersionMessage = decodeVarint(self.data[readPosition:])
readPosition += lengthOfNumberOfStreamsInVersionMessage
self.streamNumber, lengthOfRemoteStreamNumber = decodeVarint(self.data[readPosition:])
printLock.acquire()
print 'Remote node useragent:', useragent, ' stream number:', self.streamNumber
printLock.release()
if self.streamNumber != 1:
self.sock.close()
printLock.acquire()
print 'Closed connection to', self.HOST, 'because they are interested in stream', self.streamNumber,'.'
printLock.release()
self.data = ''
return
#If this was an incoming connection, then the sendData thread doesn't know the stream. We have to set it.
if not self.initiatedConnection:
broadcastToSendDataQueues((0,'setStreamNumber',(self.HOST,self.streamNumber)))
if self.data[96:104] == eightBytesOfRandomDataUsedToDetectConnectionsToSelf:
self.sock.close()
printLock.acquire()
print 'Closing connection to myself: ', self.HOST
printLock.release()
self.data = ''
return
knownNodes[self.streamNumber][self.HOST] = (self.remoteNodeIncomingPort, int(time.time()))
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
#I've commented out this code because it should be up to the newer node to decide whether their protocol version is incompatiable with the remote node's version.
'''if self.remoteProtocolVersion > 1:
print 'The remote node''s protocol version is too new for this program to understand. Disconnecting. It is:', self.remoteProtocolVersion
self.sock.close()
self.selfInitiatedConnectionList.remove(self)
else:'''
self.sendverack()
if self.initiatedConnection == False:
self.sendversion()
#Sends a version message
def sendversion(self):
global softwareVersion
payload = ''
payload += pack('>L',1) #protocol version.
payload += pack('>q',1) #bitflags of the services I offer.
payload += pack('>q',int(time.time()))
payload += pack('>q',1) #boolservices offered by the remote node. This data is ignored by the remote host because how could We know what Their services are without them telling us?
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(self.HOST)
payload += pack('>H',self.PORT)#remote IPv6 and port
payload += pack('>q',1) #bitflags of the services I offer.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L',2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
payload += pack('>H',config.getint('bitmessagesettings', 'port'))#my external IPv6 and port
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + softwareVersion + '/' #Length of userAgent must be less than 253.
payload += pack('>B',len(userAgent)) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload += userAgent
payload += encodeVarint(1) #The number of streams about which I care. PyBitmessage currently only supports 1.
payload += encodeVarint(self.streamNumber)
datatosend = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + 'version\x00\x00\x00\x00\x00' #version command
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
printLock.acquire()
print 'Sending version message'
printLock.release()
self.sock.send(datatosend)
#self.versionSent = 1
#Sends a verack message
def sendverack(self):
printLock.acquire()
print 'Sending verack'
printLock.release()
self.sock.sendall('\xE9\xBE\xB4\xD9\x76\x65\x72\x61\x63\x6B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
#cf 83 e1 35
self.verackSent = True
if self.verackReceived == True:
self.connectionFullyEstablished()
def isHostInPrivateIPRange(self,host):
if host[:3] == '10.':
return True
if host[:4] == '172.':
if host[6] == '.':
if int(host[4:6]) >= 16 and int(host[4:6]) <= 31:
return True
if host[:8] == '192.168.':
return True
return False
#Every connection to a peer has a sendDataThread (and also a receiveDataThread).
class sendDataThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.mailbox = Queue.Queue()
sendDataQueues.append(self.mailbox)
self.data = ''
def setup(self,sock,HOST,PORT,streamNumber,objectsOfWhichThisRemoteNodeIsAlreadyAware):
self.sock = sock
self.HOST = HOST
self.PORT = PORT
self.streamNumber = streamNumber
self.lastTimeISentData = int(time.time()) #If this value increases beyond five minutes ago, we'll send a pong message to keep the connection alive.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
printLock.acquire()
print 'The streamNumber of this sendDataThread (ID:', id(self),') at setup() is', self.streamNumber
printLock.release()
def sendVersionMessage(self):
#Note that there is another copy of this version-sending code in the receiveData class which would need to be changed if you make changes here.
global softwareVersion
payload = ''
payload += pack('>L',1) #protocol version.
payload += pack('>q',1) #bitflags of the services I offer.
payload += pack('>q',int(time.time()))
payload += pack('>q',1) #boolservices of remote connection. How can I even know this for sure? This is probably ignored by the remote host.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(self.HOST)
payload += pack('>H',self.PORT)#remote IPv6 and port
payload += pack('>q',1) #bitflags of the services I offer.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L',2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
payload += pack('>H',config.getint('bitmessagesettings', 'port'))#my external IPv6 and port
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + softwareVersion + '/' #Length of userAgent must be less than 253.
payload += pack('>B',len(userAgent)) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload += userAgent
payload += encodeVarint(1) #The number of streams about which I care. PyBitmessage currently only supports 1 per connection.
payload += encodeVarint(self.streamNumber)
datatosend = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + 'version\x00\x00\x00\x00\x00' #version command
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
printLock.acquire()
print 'Sending version packet: ', repr(datatosend)
printLock.release()
self.sock.send(datatosend)
self.versionSent = 1
def run(self):
while True:
deststream,command,data = self.mailbox.get()
#printLock.acquire()
#print 'sendDataThread, destream:', deststream, ', Command:', command, ', ID:',id(self), ', HOST:', self.HOST
#printLock.release()
if deststream == self.streamNumber or deststream == 0:
if command == 'shutdown':
if data == self.HOST or data == 'all':
printLock.acquire()
print 'sendDataThread thread (associated with', self.HOST,') ID:',id(self), 'shutting down now.'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'len of sendDataQueues', len(sendDataQueues)
printLock.release()
break
#When you receive an incoming connection, a sendDataThread is created even though you don't yet know what stream number the remote peer is interested in. They will tell you in a version message and if you too are interested in that stream then you will continue on with the connection and will set the streamNumber of this send data thread here:
elif command == 'setStreamNumber':
hostInMessage, specifiedStreamNumber = data
if hostInMessage == self.HOST:
printLock.acquire()
print 'setting the stream number in the sendData thread (ID:',id(self), ') to', specifiedStreamNumber
printLock.release()
self.streamNumber = specifiedStreamNumber
elif command == 'sendaddr':
try:
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time unless we have a long list of messages in our queue to send.
random.seed()
time.sleep(random.randrange(0, 10))
self.sock.sendall(data)
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.sendall failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
elif command == 'sendinv':
if data not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
payload = '\x01' + data
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time
random.seed()
time.sleep(random.randrange(0, 10))
try:
self.sock.sendall(headerData + payload)
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.sendall failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
elif command == 'pong':
if self.lastTimeISentData < (int(time.time()) - 298):
#Send out a pong message to keep the connection alive.
printLock.acquire()
print 'Sending pong to', self.HOST, 'to keep connection alive.'
printLock.release()
try:
self.sock.sendall('\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.send pong failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
else:
printLock.acquire()
print 'sendDataThread ID:',id(self),'ignoring command', command,'because it is not in stream',deststream
printLock.release()
#Wen you want to command a sendDataThread to do something, like shutdown or send some data, this function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are responsible for putting their queue into (and out of) the sendDataQueues list.
def broadcastToSendDataQueues(data):
#print 'running broadcastToSendDataQueues'
for q in sendDataQueues:
q.put((data))
def flushInventory():
#Note that the singleCleanerThread clears out the inventory dictionary from time to time, although it only clears things that have been in the dictionary for a long time. This clears the inventory dictionary Now.
sqlLock.acquire()
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, receivedTime = storedValue
t = (hash,objectType,streamNumber,payload,receivedTime)
sqlSubmitQueue.put('''INSERT INTO inventory VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
del inventory[hash]
sqlLock.release()
def isInSqlInventory(hash):
t = (hash,)
sqlLock.acquire()
sqlSubmitQueue.put('''select hash from inventory where hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
return False
else:
return True
def convertIntToString(n):
a = __builtins__.hex(n)
if a[-1:] == 'L':
a = a[:-1]
if (len(a) % 2) == 0:
return a[2:].decode('hex')
else:
return ('0'+a[2:]).decode('hex')
def convertStringToInt(s):
return int(s.encode('hex'), 16)
def decodeWalletImportFormat(WIFstring):
fullString = arithmetic.changebase(WIFstring,58,256)
privkey = fullString[:-4]
if fullString[-4:] != hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]:
sys.stderr.write('Major problem! When trying to decode one of your private keys, the checksum failed. Here is the PRIVATE key: %s\n' % str(WIFstring))
return ""
else:
#checksum passed
if privkey[0] == '\x80':
return privkey[1:]
else:
sys.stderr.write('Major problem! When trying to decode one of your private keys, the checksum passed but the key doesn\'t begin with hex 80. Here is the PRIVATE key: %s\n' % str(WIFstring))
return ""
def reloadMyAddressHashes():
printLock.acquire()
print 'reloading keys from keys.dat file'
printLock.release()
myRSAAddressHashes.clear()
myECAddressHashes.clear()
#myPrivateKeys.clear()
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
if isEnabled:
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 2:
privEncryptionKey = decodeWalletImportFormat(config.get(addressInKeysFile, 'privencryptionkey')).encode('hex') #returns a simple 32 bytes of information encoded in 64 Hex characters, or null if there was an error
if len(privEncryptionKey) == 64:#It is 32 bytes encoded as 64 hex characters
myECAddressHashes[hash] = highlevelcrypto.makeCryptor(privEncryptionKey)
elif addressVersionNumber == 1:
n = config.getint(addressInKeysFile, 'n')
e = config.getint(addressInKeysFile, 'e')
d = config.getint(addressInKeysFile, 'd')
p = config.getint(addressInKeysFile, 'p')
q = config.getint(addressInKeysFile, 'q')
myRSAAddressHashes[hash] = rsa.PrivateKey(n,e,d,p,q)
#This function expects that pubkey begin with \x04
def calculateBitcoinAddressFromPubkey(pubkey):
if len(pubkey)!= 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey),'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x00' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress,256,58)
return "1"*numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def calculateTestnetAddressFromPubkey(pubkey):
if len(pubkey)!= 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey),'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x6F' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress,256,58)
return "1"*numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def safeConfigGetBoolean(section,field):
try:
if config.getboolean(section,field):
return True
else:
return False
except:
return False
def lookupAppdataFolder():
APPNAME = "PyBitmessage"
from os import path, environ
if sys.platform == 'darwin':
if "HOME" in environ:
appdata = path.join(os.environ["HOME"], "Library/Application support/", APPNAME) + '/'
else:
print 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
sys.exit()
elif 'win32' in sys.platform or 'win64' in sys.platform:
appdata = path.join(environ['APPDATA'], APPNAME) + '\\'
else:
appdata = path.expanduser(path.join("~", "." + APPNAME + "/"))
return appdata
#This thread exists because SQLITE3 is so un-threadsafe that we must submit queries to it and it puts results back in a different queue. They won't let us just use locks.
class sqlThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
self.conn = sqlite3.connect(appdata + 'messages.dat' )
self.conn.text_factory = str
self.cur = self.conn.cursor()
try:
self.cur.execute( '''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text, received text, message text, folder text, UNIQUE(msgid) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, lastactiontime integer, status text, pubkeyretrynumber integer, msgretrynumber integer, folder text)''' )
self.cur.execute( '''CREATE TABLE subscriptions (label text, address text, enabled bool)''' )
self.cur.execute( '''CREATE TABLE addressbook (label text, address text)''' )
self.cur.execute( '''CREATE TABLE blacklist (label text, address text, enabled bool)''' )
self.cur.execute( '''CREATE TABLE whitelist (label text, address text, enabled bool)''' )
#Explanation of what is in the pubkeys table:
# The hash is the RIPEMD160 hash that is encoded in the Bitmessage address.
# If you or someone else did the POW for this pubkey, then havecorrectnonce will be true. If you received the pubkey in a msg message then havecorrectnonce will be false. You won't have the correct nonce and won't be able to send the message to peers if they request the pubkey.
# transmitdata is literally the data that was included in the Bitmessage pubkey message when it arrived, except for the 24 byte protocol header- ie, it starts with the POW nonce.
# time is the time that the pubkey was broadcast on the network same as with every other type of Bitmessage object.
# usedpersonally is set to "yes" if we have used the key personally. This keeps us from deleting it because we may want to reply to a message in the future. This field is not a bool because we may need more flexability in the future and it doesn't take up much more space anyway.
self.cur.execute( '''CREATE TABLE pubkeys (hash blob, havecorrectnonce bool, transmitdata blob, time blob, usedpersonally text, UNIQUE(hash, havecorrectnonce) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE knownnodes (timelastseen int, stream int, services blob, host blob, port blob, UNIQUE(host, stream, port) ON CONFLICT REPLACE)''' ) #This table isn't used in the program yet but I have a feeling that we'll need it.
self.cur.execute( '''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx',1)''')
self.conn.commit()
print 'Created messages database file'
except Exception, err:
if str(err) == 'table inbox already exists':
print 'Database file already exists.'
else:
sys.stderr.write('ERROR trying to create database file (message.dat). Error message: %s\n' % str(err))
sys.exit()
#People running earlier versions of PyBitmessage do not have the usedpersonally field in their pubkeys table. Let's add it.
if config.getint('bitmessagesettings','settingsversion') == 2:
item = '''ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' '''
parameters = ''
self.cur.execute(item, parameters)
self.conn.commit()
config.set('bitmessagesettings','settingsversion','3')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
try:
testpayload = '\x00\x00'
t = ('1234','True',testpayload,'12345678','no')
self.cur.execute( '''INSERT INTO pubkeys VALUES(?,?,?,?,?)''',t)
self.conn.commit()
self.cur.execute('''SELECT transmitdata FROM pubkeys WHERE hash='1234' ''')
queryreturn = self.cur.fetchall()
for row in queryreturn:
transmitdata, = row
self.cur.execute('''DELETE FROM pubkeys WHERE hash='1234' ''')
self.conn.commit()
if transmitdata == '':
sys.stderr.write('Problem: The version of SQLite you have cannot store Null values. Please download and install the latest revision of your version of Python (for example, the latest Python 2.7 revision) and try again.\n')
sys.stderr.write('PyBitmessage will now exist very abruptly. You may now see threading errors related to this abrupt exit but the problem you need to solve is related to SQLite.\n\n')
sys.exit()
except Exception, err:
print err
while True:
item = sqlSubmitQueue.get()
parameters = sqlSubmitQueue.get()
#print 'item', item
#print 'parameters', parameters
self.cur.execute(item, parameters)
sqlReturnQueue.put(self.cur.fetchall())
sqlSubmitQueue.task_done()
self.conn.commit()
'''The singleCleaner class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy.
It cleans these data structures in memory:
inventory (moves data to the on-disk sql database)
It cleans these tables on the disk:
inventory (clears data more than 2 days and 12 hours old)
pubkeys (clears pubkeys older than 4 weeks old which we have not used personally)
It resends messages when there has been no response:
resends getpubkey messages in 4 days (then 8 days, then 16 days, etc...)
resends msg messages in 4 days (then 8 days, then 16 days, etc...)
'''
class singleCleaner(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
timeWeLastClearedInventoryAndPubkeysTables = 0
while True:
time.sleep(300)
sqlLock.acquire()
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Doing housekeeping (Flushing inventory in memory to disk...)")
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, receivedTime = storedValue
if int(time.time())- 3600 > receivedTime:
t = (hash,objectType,streamNumber,payload,receivedTime)
sqlSubmitQueue.put('''INSERT INTO inventory VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
del inventory[hash]
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"")
sqlLock.release()
broadcastToSendDataQueues((0, 'pong', 'no data')) #commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
#inventory (moves data from the inventory data structure to the on-disk sql database)
sqlLock.acquire()
#inventory (clears data more than 2 days and 12 hours old)
t = (int(time.time())-lengthOfTimeToLeaveObjectsInInventory,)
sqlSubmitQueue.put('''DELETE FROM inventory WHERE receivedtime<?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#pubkeys
t = (int(time.time())-lengthOfTimeToHoldOnToAllPubkeys,)
sqlSubmitQueue.put('''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
t = ()
sqlSubmitQueue.put('''select toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber FROM sent WHERE ((status='findingpubkey' OR status='sentmessage') AND folder='sent') ''') #If the message's folder='trash' then we'll ignore it.
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber = row
if status == 'findingpubkey':
if int(time.time()) - lastactiontime > (maximumAgeOfAnObjectThatIAmWillingToAccept * (2 ** (pubkeyretrynumber))):
print 'It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.'
try:
del neededPubkeys[toripe] #We need to take this entry out of the neededPubkeys structure because the workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
except:
pass
workerQueue.put(('sendmessage',toaddress))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Doing work necessary to again attempt to request a public key...")
t = (int(time.time()),pubkeyretrynumber+1,toripe)
sqlSubmitQueue.put('''UPDATE sent SET lastactiontime=?, pubkeyretrynumber=? WHERE toripe=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),toripe,'Public key requested again. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
else:# status == sentmessage
if int(time.time()) - lastactiontime > (maximumAgeOfAnObjectThatIAmWillingToAccept * (2 ** (msgretrynumber))):
print 'It has been a long time and we haven\'t heard an acknowledgement to our msg. Sending again.'
t = (int(time.time()),msgretrynumber+1,'findingpubkey',ackdata)
sqlSubmitQueue.put('''UPDATE sent SET lastactiontime=?, msgretrynumber=?, status=? WHERE ackdata=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Message sent again because the acknowledgement was never received. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
workerQueue.put(('sendmessage',toaddress))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Doing work necessary to again attempt to deliver a message...")
sqlLock.release()
#This thread, of which there is only one, does the heavy lifting: calculating POWs.
class singleWorker(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT toripe FROM sent WHERE (status=? AND folder='sent')''')
sqlSubmitQueue.put(('findingpubkey',))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toripe, = row
neededPubkeys[toripe] = 0
self.sendBroadcast() #just in case there are any proof of work tasks for Broadcasts that have yet to be sent.
#Now let us see if there are any proofs of work for msg messages that we have yet to complete..
sqlLock.acquire()
t = ('doingpow',)
sqlSubmitQueue.put('SELECT toripe FROM sent WHERE status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toripe, = row
self.sendMsg(toripe)
while True:
command, data = workerQueue.get()
#statusbar = 'The singleWorker thread is working on work.'
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
if command == 'sendmessage':
toAddress = data
toStatus,toAddressVersionNumber,toStreamNumber,toRipe = decodeAddress(toAddress)
#print 'message type', type(message)
#print repr(message.toUtf8())
#print str(message.toUtf8())
sqlLock.acquire()
sqlSubmitQueue.put('SELECT * FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
#print 'queryreturn', queryreturn
if queryreturn == []:
#We'll need to request the pub key because we don't have it.
if not toRipe in neededPubkeys:
neededPubkeys[toRipe] = 0
print 'requesting pubkey:', toRipe.encode('hex')
self.requestPubKey(toAddressVersionNumber,toStreamNumber,toRipe)
else:
print 'We have already requested this pubkey (the ripe hash is in neededPubkeys). We will re-request again soon.'
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),toRipe,'Public key was requested earlier. Receiver must be offline. Will retry.')
else:
print 'We already have the necessary public key.'
self.sendMsg(toRipe) #by calling this function, we are asserting that we already have the pubkey for toRipe
elif command == 'sendbroadcast':
print 'Within WorkerThread, processing sendbroadcast command.'
fromAddress,subject,message = data
self.sendBroadcast()
elif command == 'doPOWForMyV2Pubkey':
self.doPOWForMyV2Pubkey(data)
elif command == 'newpubkey':
toAddressVersion,toStreamNumber,toRipe = data
if toRipe in neededPubkeys:
print 'We have been awaiting the arrival of this pubkey.'
del neededPubkeys[toRipe]
self.sendMsg(toRipe)
else:
print 'We don\'t need this pub key. We didn\'t ask for it. Pubkey hash:', toRipe.encode('hex')
else:
printLock.acquire()
sys.stderr.write('Probable programming error: The command sent to the workerThread is weird. It is: %s\n' % command)
printLock.release()
workerQueue.task_done()
def doPOWForMyV2Pubkey(self,myAddress): #This function also broadcasts out the pubkey message once it is done with the POW
status,addressVersionNumber,streamNumber,hash = decodeAddress(myAddress)
embeddedTime = int(time.time())+random.randrange(-300, 300) #the current time plus or minus five minutes
payload = pack('>I',(embeddedTime))
payload += encodeVarint(2) #Address version number
payload += encodeVarint(streamNumber)
payload += '\x00\x00\x00\x01' #bitfield of features supported by me (see the wiki).
try:
privSigningKeyBase58 = config.get(myAddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(myAddress, 'privencryptionkey')
except Exception, err:
printLock.acquire()
sys.stderr.write('Error within doPOWForMyV2Pubkey. Could not read the keys from the keys.dat file for a requested address. %s\n' % err)
printLock.release()
return
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload += pubSigningKey[1:]
payload += pubEncryptionKey[1:]
#Do the POW for this pubkey message
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
t = (hash,True,payload,embeddedTime,'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, streamNumber, payload, embeddedTime)
printLock.acquire()
print 'broadcasting inv with hash:', inventoryHash.encode('hex')
printLock.release()
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"")
def sendBroadcast(self):
sqlLock.acquire()
t = ('broadcastpending',)
sqlSubmitQueue.put('SELECT fromaddress, subject, message, ackdata FROM sent WHERE status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
fromaddress, subject, body, ackdata = row
status,addressVersionNumber,streamNumber,ripe = decodeAddress(fromaddress)
if addressVersionNumber == 2:
#We need to convert our private keys to public keys in order to include them.
try:
privSigningKeyBase58 = config.get(fromaddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(fromaddress, 'privencryptionkey')
except:
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Error! Could not find sender address (your address) in the keys.dat file.')
continue
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex') #At this time these pubkeys are 65 bytes long because they include the encoding byte which we won't be sending in the broadcast message.
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload = pack('>I',(int(time.time())+random.randrange(-300, 300)))#the current time plus or minus five minutes
payload += encodeVarint(1) #broadcast version
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += '\x00\x00\x00\x01' #behavior bitfield
payload += pubSigningKey[1:]
payload += pubEncryptionKey[1:]
payload += ripe
payload += '\x02' #message encoding type
payload += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding.
payload += 'Subject:' + subject + '\n' + 'Body:' + body
signature = highlevelcrypto.sign(payload,privSigningKeyHex)
payload += encodeVarint(len(signature))
payload += signature
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For broadcast message) Doing proof of work...'
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Doing work necessary to send broadcast...')
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (within sendBroadcast function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Broadcast sent at '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
sqlLock.acquire()
t = ('broadcastsent',int(time.time()),fromaddress, subject, body,'broadcastpending')
sqlSubmitQueue.put('UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
"""elif addressVersionNumber == 1: #This whole section can be taken out soon because we aren't supporting v1 addresses for much longer.
messageToTransmit = '\x02' #message encoding type
messageToTransmit += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding.
messageToTransmit += 'Subject:' + subject + '\n' + 'Body:' + body
#We need the all the integers for our private key in order to sign our message, and we need our public key to send with the message.
n = config.getint(fromaddress, 'n')
e = config.getint(fromaddress, 'e')
d = config.getint(fromaddress, 'd')
p = config.getint(fromaddress, 'p')
q = config.getint(fromaddress, 'q')
nString = convertIntToString(n)
eString = convertIntToString(e)
#myPubkey = rsa.PublicKey(n,e)
myPrivatekey = rsa.PrivateKey(n,e,d,p,q)
#The payload of the broadcast message starts with a POW, but that will be added later.
payload = pack('>I',(int(time.time())))
payload += encodeVarint(1) #broadcast version
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += ripe
payload += encodeVarint(len(nString))
payload += nString
payload += encodeVarint(len(eString))
payload += eString
payload += messageToTransmit
signature = rsa.sign(messageToTransmit,myPrivatekey,'SHA-512')
#print 'signature', signature.encode('hex')
payload += signature
#print 'nString', repr(nString)
#print 'eString', repr(eString)
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For broadcast message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (within sendBroadcast function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Broadcast sent at '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
sqlLock.acquire()
t = ('broadcastsent',int(time.time()),fromaddress, subject, body,'broadcastpending')
sqlSubmitQueue.put('UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()"""
else:
printLock.acquire()
print 'In the singleWorker thread, the sendBroadcast function doesn\'t understand the address version'
printLock.release()
def sendMsg(self,toRipe):
sqlLock.acquire()
t = ('doingpow','findingpubkey',toRipe)
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE status=? AND toripe=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
t = ('doingpow',toRipe)
sqlSubmitQueue.put('SELECT toaddress, fromaddress, subject, message, ackdata FROM sent WHERE status=? AND toripe=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toaddress, fromaddress, subject, message, ackdata = row
ackdataForWhichImWatching[ackdata] = 0
toStatus,toAddressVersionNumber,toStreamNumber,toHash = decodeAddress(toaddress)
fromStatus,fromAddressVersionNumber,fromStreamNumber,fromHash = decodeAddress(fromaddress)
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Doing work necessary to send the message.')
printLock.acquire()
print 'Found a message in our database that needs to be sent with this pubkey.'
print 'First 150 characters of message:', message[:150]
printLock.release()
embeddedTime = pack('>I',(int(time.time())+random.randrange(-300, 300)))#the current time plus or minus five minutes. We will use this time both for our message and for the ackdata packed within our message.
if fromAddressVersionNumber == 2:
payload = '\x01' #Message version.
payload += encodeVarint(fromAddressVersionNumber)
payload += encodeVarint(fromStreamNumber)
payload += '\x00\x00\x00\x01' #Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
#We need to convert our private keys to public keys in order to include them.
try:
privSigningKeyBase58 = config.get(fromaddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(fromaddress, 'privencryptionkey')
except:
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Error! Could not find sender address (your address) in the keys.dat file.')
continue
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload += pubSigningKey[1:] #The \x04 on the beginning of the public keys are not sent. This way there is only one acceptable way to encode and send a public key.
payload += pubEncryptionKey[1:]
payload += toHash #This hash will be checked by the receiver of the message to verify that toHash belongs to them. This prevents a Surreptitious Forwarding Attack.
payload += '\x02' #Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
messageToTransmit = 'Subject:' + subject + '\n' + 'Body:' + message
payload += encodeVarint(len(messageToTransmit))
payload += messageToTransmit
fullAckPayload = self.generateFullAckMessage(ackdata,toStreamNumber,embeddedTime)#The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
payload += encodeVarint(len(fullAckPayload))
payload += fullAckPayload
signature = highlevelcrypto.sign(payload,privSigningKeyHex)
payload += encodeVarint(len(signature))
payload += signature
"""elif fromAddressVersionNumber == 1: #This code is for old version 1 (RSA) addresses. It will soon be removed.
payload = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' #this run of nulls allows the true message receiver to identify his message
payload += '\x01' #Message version.
payload += '\x00\x00\x00\x01'
payload += encodeVarint(fromAddressVersionNumber)
payload += encodeVarint(fromStreamNumber)
try:
sendersN = convertIntToString(config.getint(fromaddress, 'n'))
except:
printLock.acquire()
print 'Error: Could not find', fromaddress, 'in our keys.dat file. You must have deleted it. Aborting the send.'
printLock.release()
return
payload += encodeVarint(len(sendersN))
payload += sendersN
sendersE = convertIntToString(config.getint(fromaddress, 'e'))
payload += encodeVarint(len(sendersE))
payload += sendersE
payload += '\x02' #Type 2 is simple UTF-8 message encoding.
messageToTransmit = 'Subject:' + subject + '\n' + 'Body:' + message
payload += encodeVarint(len(messageToTransmit))
payload += messageToTransmit
#Later, if anyone impliments clients that don't send the ack_data, then we should probably check here to make sure that the receiver will make use of this ack_data and not attach it if not.
fullAckPayload = self.generateFullAckMessage(ackdata,toStreamNumber,embeddedTime)
payload += encodeVarint(len(fullAckPayload))
payload += fullAckPayload
sendersPrivKey = rsa.PrivateKey(config.getint(fromaddress, 'n'),config.getint(fromaddress, 'e'),config.getint(fromaddress, 'd'),config.getint(fromaddress, 'p'),config.getint(fromaddress, 'q'))
payload += rsa.sign(payload,sendersPrivKey,'SHA-512')"""
#We have assembled the data that will be encrypted. Now let us fetch the recipient's public key out of our database and do the encryption.
if toAddressVersionNumber == 2:
sqlLock.acquire()
sqlSubmitQueue.put('SELECT transmitdata FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
pubkeyPayload, = row
#The pubkey is stored the way we originally received it which means that we need to read beyond things like the nonce and time to get to the public keys.
readPosition = 8 #to bypass the nonce
readPosition += 4 #to bypass the embedded time
readPosition += 1 #to bypass the address version whose length is definitely 1
streamNumber, streamNumberLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += streamNumberLength
behaviorBitfield = pubkeyPayload[readPosition:readPosition+4]
readPosition += 4 #to bypass the bitfield of behaviors
#pubSigningKeyBase256 = pubkeyPayload[readPosition:readPosition+64] #We don't use this key for anything here.
readPosition += 64
pubEncryptionKeyBase256 = pubkeyPayload[readPosition:readPosition+64]
readPosition += 64
encrypted = highlevelcrypto.encrypt(payload,"04"+pubEncryptionKeyBase256.encode('hex'))
"""elif toAddressVersionNumber == 1:
sqlLock.acquire()
sqlSubmitQueue.put('SELECT transmitdata FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
pubkeyPayload, = row
readPosition = 8 #to bypass the nonce
behaviorBitfield = pubkeyPayload[8:12]
readPosition += 4 #to bypass the bitfield of behaviors
addressVersion, addressVersionLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += addressVersionLength
streamNumber, streamNumberLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += streamNumberLength
nLength, nLengthLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += nLengthLength
n = convertStringToInt(pubkeyPayload[readPosition:readPosition+nLength])
readPosition += nLength
eLength, eLengthLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += eLengthLength
e = convertStringToInt(pubkeyPayload[readPosition:readPosition+eLength])
receiversPubkey = rsa.PublicKey(n,e)
infile = cStringIO.StringIO(payload)
outfile = cStringIO.StringIO()
#print 'Encrypting using public key:', receiversPubkey
encrypt_bigfile(infile,outfile,receiversPubkey)
encrypted = outfile.getvalue()
infile.close()
outfile.close()"""
nonce = 0
trialValue = 99999999999999999999
encodedStreamNumber = encodeVarint(toStreamNumber)
#We are now dropping the unencrypted data in payload since it has already been encrypted and replacing it with the encrypted payload that we will send out.
payload = embeddedTime + encodedStreamNumber + encrypted
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For msg message) Doing proof of work. Target:', target
powStartTime = time.time()
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For msg message) Found proof of work', trialValue, 'Nonce:', nonce
try:
print 'POW took', int(time.time()-powStartTime), 'seconds.', nonce/(time.time()-powStartTime), 'nonce trials per second.'
except:
pass
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'msg'
inventory[inventoryHash] = (objectType, toStreamNumber, payload, int(time.time()))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Message sent. Waiting on acknowledgement. Sent on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
print 'sending inv (within sendmsg function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
#Update the status of the message in the 'sent' table to have a 'sent' status
sqlLock.acquire()
t = ('sentmessage',toaddress, fromaddress, subject, message,'doingpow')
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE toaddress=? AND fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
t = (toRipe,)
sqlSubmitQueue.put('''UPDATE pubkeys SET usedpersonally='yes' WHERE hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
def requestPubKey(self,addressVersionNumber,streamNumber,ripe):
payload = pack('>I',int(time.time()))
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += ripe
printLock.acquire()
print 'making request for pubkey with ripe:', ripe.encode('hex')
printLock.release()
nonce = 0
trialValue = 99999999999999999999
#print 'trial value', trialValue
statusbar = 'Doing the computations necessary to request the recipient\'s public key.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),ripe,'Doing work necessary to request public key.')
print 'Doing proof-of-work necessary to send getpubkey message.'
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
printLock.acquire()
print 'Found proof of work', trialValue, 'Nonce:', nonce
printLock.release()
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'getpubkey'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (for the getpubkey message)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Broacasting the public key request. This program will auto-retry if they are offline.')
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),ripe,'Sending public key request. Waiting for reply. Requested at ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
def generateFullAckMessage(self,ackdata,toStreamNumber,embeddedTime):
nonce = 0
trialValue = 99999999999999999999
encodedStreamNumber = encodeVarint(toStreamNumber)
payload = embeddedTime + encodedStreamNumber + ackdata
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
printLock.acquire()
print '(For ack message) Doing proof of work...'
printLock.release()
powStartTime = time.time()
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
printLock.acquire()
print '(For ack message) Found proof of work', trialValue, 'Nonce:', nonce
try:
print 'POW took', int(time.time()-powStartTime), 'seconds.', nonce/(time.time()-powStartTime), 'nonce trials per second.'
except:
pass
printLock.release()
payload = pack('>Q',nonce) + payload
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
return headerData + payload
class addressGenerator(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def setup(self,addressVersionNumber,streamNumber,label="(no label)",numberOfAddressesToMake=1,deterministicPassphrase="",eighteenByteRipe=False):
self.addressVersionNumber = addressVersionNumber
self.streamNumber = streamNumber
self.label = label
self.numberOfAddressesToMake = numberOfAddressesToMake
self.deterministicPassphrase = deterministicPassphrase
self.eighteenByteRipe = eighteenByteRipe
def run(self):
if self.addressVersionNumber == 2:
if self.deterministicPassphrase == "":
statusbar = 'Generating one new address'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that starts with either \x00 or \x00\x00. Then when we pack them into a Bitmessage address,
#we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time.time()
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
potentialPrivSigningKey = OpenSSL.rand(32)
potentialPubSigningKey = self.pointMult(potentialPrivSigningKey)
while True:
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
potentialPrivEncryptionKey = OpenSSL.rand(32)
potentialPubEncryptionKey = self.pointMult(potentialPrivEncryptionKey)
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(potentialPubSigningKey+potentialPubEncryptionKey)
ripe.update(sha.digest())
#print 'potential ripe.digest', ripe.digest().encode('hex')
if self.eighteenByteRipe:
if ripe.digest()[:2] == '\x00\x00':
break
else:
if ripe.digest()[:1] == '\x00':
break
print 'Generated address with ripe digest:', ripe.digest().encode('hex')
print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix/(time.time()-startTime),'addresses per second before finding one with the correct ripe-prefix.'
if ripe.digest()[:2] == '\x00\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[2:])
elif ripe.digest()[:1] == '\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[1:])
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = '\x80'+potentialPrivSigningKey
checksum = hashlib.sha256(hashlib.sha256(privSigningKey).digest()).digest()[0:4]
privSigningKeyWIF = arithmetic.changebase(privSigningKey + checksum,256,58)
#print 'privSigningKeyWIF',privSigningKeyWIF
privEncryptionKey = '\x80'+potentialPrivEncryptionKey
checksum = hashlib.sha256(hashlib.sha256(privEncryptionKey).digest()).digest()[0:4]
privEncryptionKeyWIF = arithmetic.changebase(privEncryptionKey + checksum,256,58)
#print 'privEncryptionKeyWIF',privEncryptionKeyWIF
config.add_section(address)
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'privSigningKey',privSigningKeyWIF)
config.set(address,'privEncryptionKey',privEncryptionKeyWIF)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#It may be the case that this address is being generated as a result of a call to the API. Let us put the result in the necessary queue.
apiAddressGeneratorReturnQueue.put(address)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address. Doing work necessary to broadcast it...')
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
reloadMyAddressHashes()
workerQueue.put(('doPOWForMyV2Pubkey',address))
else: #There is something in the deterministicPassphrase variable thus we are going to do this deterministically.
statusbar = 'Generating '+str(self.numberOfAddressesToMake) + ' new addresses.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
signingKeyNonce = 0
encryptionKeyNonce = 1
listOfNewAddressesToSendOutThroughTheAPI = [] #We fill out this list no matter what although we only need it if we end up passing the info to the API.
for i in range(self.numberOfAddressesToMake):
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that has a RIPEMD hash that starts with either \x00 or \x00\x00. Then when we pack them
#into a Bitmessage address, we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time.time()
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
while True:
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
potentialPrivSigningKey = hashlib.sha512(self.deterministicPassphrase + encodeVarint(signingKeyNonce)).digest()[:32]
potentialPrivEncryptionKey = hashlib.sha512(self.deterministicPassphrase + encodeVarint(encryptionKeyNonce)).digest()[:32]
potentialPubSigningKey = self.pointMult(potentialPrivSigningKey)
potentialPubEncryptionKey = self.pointMult(potentialPrivEncryptionKey)
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
signingKeyNonce += 2
encryptionKeyNonce += 2
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(potentialPubSigningKey+potentialPubEncryptionKey)
ripe.update(sha.digest())
#print 'potential ripe.digest', ripe.digest().encode('hex')
if self.eighteenByteRipe:
if ripe.digest()[:2] == '\x00\x00':
break
else:
if ripe.digest()[:1] == '\x00':
break
print 'ripe.digest', ripe.digest().encode('hex')
print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix/(time.time()-startTime),'keys per second.'
if ripe.digest()[:2] == '\x00\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[2:])
elif ripe.digest()[:1] == '\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[1:])
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = '\x80'+potentialPrivSigningKey
checksum = hashlib.sha256(hashlib.sha256(privSigningKey).digest()).digest()[0:4]
privSigningKeyWIF = arithmetic.changebase(privSigningKey + checksum,256,58)
privEncryptionKey = '\x80'+potentialPrivEncryptionKey
checksum = hashlib.sha256(hashlib.sha256(privEncryptionKey).digest()).digest()[0:4]
privEncryptionKeyWIF = arithmetic.changebase(privEncryptionKey + checksum,256,58)
try:
config.add_section(address)
print 'self.label', self.label
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'privSigningKey',privSigningKeyWIF)
config.set(address,'privEncryptionKey',privEncryptionKeyWIF)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
listOfNewAddressesToSendOutThroughTheAPI.append(address)
except:
print address,'already exists. Not adding it again.'
#It may be the case that this address is being generated as a result of a call to the API. Let us put the result in the necessary queue.
apiAddressGeneratorReturnQueue.put(listOfNewAddressesToSendOutThroughTheAPI)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address')
reloadMyAddressHashes()
#This code which deals with old RSA addresses will soon be removed.
"""elif self.addressVersionNumber == 1:
statusbar = 'Generating new ' + str(config.getint('bitmessagesettings', 'bitstrength')) + ' bit RSA key. This takes a minute on average. If you want to generate multiple addresses now, you can; they will queue.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
(pubkey, privkey) = rsa.newkeys(config.getint('bitmessagesettings', 'bitstrength'))
print privkey['n']
print privkey['e']
print privkey['d']
print privkey['p']
print privkey['q']
sha = hashlib.new('sha512')
#sha.update(str(pubkey.n)+str(pubkey.e))
sha.update(convertIntToString(pubkey.n)+convertIntToString(pubkey.e))
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
address = encodeAddress(1,self.streamNumber,ripe.digest())
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
config.add_section(address)
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'n',str(privkey['n']))
config.set(address,'e',str(privkey['e']))
config.set(address,'d',str(privkey['d']))
config.set(address,'p',str(privkey['p']))
config.set(address,'q',str(privkey['q']))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address')
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
reloadMyAddressHashes()"""
#Does an EC point multiplication; turns a private key into a public key.
def pointMult(self,secret):
#ctx = OpenSSL.BN_CTX_new() #This value proved to cause Seg Faults on Linux. It turns out that it really didn't speed up EC_POINT_mul anyway.
k = OpenSSL.EC_KEY_new_by_curve_name(OpenSSL.get_curve('secp256k1'))
priv_key = OpenSSL.BN_bin2bn(secret, 32, 0)
group = OpenSSL.EC_KEY_get0_group(k)
pub_key = OpenSSL.EC_POINT_new(group)
OpenSSL.EC_POINT_mul(group, pub_key, priv_key, None, None, None)
OpenSSL.EC_KEY_set_private_key(k, priv_key)
OpenSSL.EC_KEY_set_public_key(k, pub_key)
#print 'priv_key',priv_key
#print 'pub_key',pub_key
size = OpenSSL.i2o_ECPublicKey(k, 0)
mb = ctypes.create_string_buffer(size)
OpenSSL.i2o_ECPublicKey(k, ctypes.byref(ctypes.pointer(mb)))
#print 'mb.raw', mb.raw.encode('hex'), 'length:', len(mb.raw)
#print 'mb.raw', mb.raw, 'length:', len(mb.raw)
OpenSSL.EC_POINT_free(pub_key)
#OpenSSL.BN_CTX_free(ctx)
OpenSSL.BN_free(priv_key)
OpenSSL.EC_KEY_free(k)
return mb.raw
#This is one of several classes that constitute the API
#This class was written by Vaibhav Bhatia. Modified by Jonathan Warren (Atheros).
#http://code.activestate.com/recipes/501148-xmlrpc-serverclient-which-does-cookie-handling-and/
class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
def do_POST(self):
#Handles the HTTP POST request.
#Attempts to interpret all HTTP POST requests as XML-RPC calls,
#which are forwarded to the server's _dispatch method for handling.
#Note: this method is the same as in SimpleXMLRPCRequestHandler,
#just hacked to handle cookies
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
# HACK :start -> sends cookies here
if self.cookies:
for cookie in self.cookies:
self.send_header('Set-Cookie',cookie.output(header=''))
# HACK :end
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def APIAuthenticateClient(self):
if self.headers.has_key('Authorization'):
# handle Basic authentication
(enctype, encstr) = self.headers.get('Authorization').split()
(emailid, password) = encstr.decode('base64').split(':')
if emailid == config.get('bitmessagesettings', 'apiusername') and password == config.get('bitmessagesettings', 'apipassword'):
return True
else:
return False
else:
print 'Authentication failed because header lacks Authentication field'
time.sleep(2)
return False
return False
def _dispatch(self, method, params):
self.cookies = []
validuser = self.APIAuthenticateClient()
if not validuser:
time.sleep(2)
return "RPC Username or password incorrect or HTTP header lacks authentication at all."
# handle request
if method == 'helloWorld':
(a,b) = params
return a+'-'+b
elif method == 'add':
(a,b) = params
return a+b
elif method == 'statusBar':
message, = params
apiSignalQueue.put(('updateStatusBar',message))
elif method == 'listAddresses':
data = '{"addresses":['
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
data
if len(data) > 20:
data += ','
data += json.dumps({'label':config.get(addressInKeysFile,'label'),'address':addressInKeysFile,'stream':streamNumber,'enabled':config.getboolean(addressInKeysFile,'enabled')},indent=4, separators=(',', ': '))
data += ']}'
return data
elif method == 'createRandomAddress':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
elif len(params) == 1:
label, = params
eighteenByteRipe = False
elif len(params) == 2:
label, eighteenByteRipe = params
label = label.decode('base64')
apiAddressGeneratorReturnQueue.queue.clear()
apiSignalQueue.put(('createRandomAddress',(label, eighteenByteRipe))) #params should be a twopul which equals (eighteenByteRipe, label)
return apiAddressGeneratorReturnQueue.get()
elif method == 'createDeterministicAddresses':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
elif len(params) == 1:
passphrase, = params
numberOfAddresses = 1
addressVersionNumber = 0
streamNumber = 0
eighteenByteRipe = False
elif len(params) == 2:
passphrase, numberOfAddresses = params
addressVersionNumber = 0
streamNumber = 0
eighteenByteRipe = False
elif len(params) == 3:
passphrase, numberOfAddresses, addressVersionNumber = params
streamNumber = 0
eighteenByteRipe = False
elif len(params) == 4:
passphrase, numberOfAddresses, addressVersionNumber, streamNumber = params
eighteenByteRipe = False
elif len(params) == 5:
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe = params
if len(passphrase) == 0:
return 'API Error 0001: the specified passphrase is blank.'
passphrase = passphrase.decode('base64')
if addressVersionNumber == 0: #0 means "just use the proper addressVersionNumber"
addressVersionNumber == 2
if addressVersionNumber != 2:
return 'API Error 0002: the address version number currently must be 2 (or 0 which means auto-select). Others aren\'t supported.'
if streamNumber == 0: #0 means "just use the most available stream"
streamNumber = 1
if streamNumber != 1:
return 'API Error 0003: the stream number must be 1 (or 0 which means auto-select). Others aren\'t supported.'
if numberOfAddresses == 0:
return 'API Error 0004: Why would you ask me to generate 0 addresses for you?'
if numberOfAddresses > 9999:
return 'API Error 0005: You have (accidentially?) specified too many addresses to make. Maximum 9999. This check only exists to prevent mischief; if you really want to create more addresses than this, contact the Bitmessage developers and we can modify the check or you can do it yourself by searching the source code for this message.'
apiAddressGeneratorReturnQueue.queue.clear()
print 'about to send numberOfAddresses', numberOfAddresses
apiSignalQueue.put(('createDeterministicAddresses',(passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe)))
data = '{"addresses":['
queueReturn = apiAddressGeneratorReturnQueue.get()
for item in queueReturn:
if len(data) > 20:
data += ','
data += "\""+item+ "\""
data += ']}'
return data
elif method == 'getAllInboxMessages':
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT msgid, toaddress, fromaddress, subject, received, message FROM inbox where folder='inbox' ORDER BY received''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
sqlLock.release()
data = '{"inboxMessages":['
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, = row
if len(data) > 25:
data += ','
data += json.dumps({'msgid':msgid.encode('hex'),'toAddress':toAddress,'fromAddress':fromAddress,'subject':subject.encode('base64'),'message':message.encode('base64'),'encodingType':2,'receivedTime':received},indent=4, separators=(',', ': '))
data += ']}'
return data
elif method == 'trashMessage':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
msgid = params[0].decode('hex')
t = (msgid,)
sqlLock.acquire()
sqlSubmitQueue.put('''UPDATE inbox SET folder='trash' WHERE msgid=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
apiSignalQueue.put(('updateStatusBar','Per API: Trashed message (assuming message existed). UI not updated.'))
return 'Trashed message (assuming message existed). UI not updated. To double check, run getAllInboxMessages to see that the message disappeared, or restart Bitmessage and look in the normal Bitmessage GUI.'
elif method == 'sendMessage':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
elif len(params) == 4:
toAddress, fromAddress, subject, message = params
encodingType = 2
elif len(params) == 5:
toAddress, fromAddress, subject, message, encodingType = params
if encodingType != 2:
return 'API Error 0006: The encoding type must be 2 because that is the only one this program currently supports.'
subject = subject.decode('base64')
message = message.decode('base64')
status,addressVersionNumber,streamNumber,toRipe = decodeAddress(toAddress)
if status <> 'success':
printLock.acquire()
print 'API Error 0007: Could not decode address:', toAddress, ':', status
printLock.release()
if status == 'checksumfailed':
return 'API Error 0008: Checksum failed for address: ' + toAddress
if status == 'invalidcharacters':
return 'API Error 0009: Invalid characters in address: '+ toAddress
if status == 'versiontoohigh':
return 'API Error 0010: Address version number too high (or zero) in address: ' + toAddress
if addressVersionNumber != 2:
return 'API Error 0011: the address version number currently must be 2. Others aren\'t supported. Check the toAddress.'
if streamNumber != 1:
return 'API Error 0012: the stream number must be 1. Others aren\'t supported. Check the toAddress.'
status,addressVersionNumber,streamNumber,fromRipe = decodeAddress(fromAddress)
if status <> 'success':
printLock.acquire()
print 'API Error 0007: Could not decode address:', fromAddress, ':', status
printLock.release()
if status == 'checksumfailed':
return 'API Error 0008: Checksum failed for address: ' + fromAddress
if status == 'invalidcharacters':
return 'API Error 0009: Invalid characters in address: '+ fromAddress
if status == 'versiontoohigh':
return 'API Error 0010: Address version number too high (or zero) in address: ' + fromAddress
if addressVersionNumber != 2:
return 'API Error 0011: the address version number currently must be 2. Others aren\'t supported. Check the fromAddress.'
if streamNumber != 1:
return 'API Error 0012: the stream number must be 1. Others aren\'t supported. Check the fromAddress.'
toAddress = addBMIfNotPresent(toAddress)
fromAddress = addBMIfNotPresent(fromAddress)
try:
fromAddressEnabled = config.getboolean(fromAddress,'enabled')
except:
return 'API Error 0013: could not find your fromAddress in the keys.dat file.'
if not fromAddressEnabled:
return 'API Error 0014: your fromAddress is disabled. Cannot send.'
ackdata = OpenSSL.rand(32)
sqlLock.acquire()
t = ('',toAddress,toRipe,fromAddress,subject,message,ackdata,int(time.time()),'findingpubkey',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
toLabel = ''
t = (toAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
apiSignalQueue.put(('displayNewSentMessage',(toAddress,toLabel,fromAddress,subject,message,ackdata)))
workerQueue.put(('sendmessage',toAddress))
return ackdata.encode('hex')
elif method == 'sendBroadcast':
if len(params) == 0:
return 'API Error 0000: I need parameters!'
if len(params) == 3:
fromAddress, subject, message = params
encodingType = 2
elif len(params) == 4:
fromAddress, subject, message, encodingType = params
if encodingType != 2:
return 'API Error 0006: The encoding type must be 2 because that is the only one this program currently supports.'
subject = subject.decode('base64')
message = message.decode('base64')
status,addressVersionNumber,streamNumber,fromRipe = decodeAddress(fromAddress)
if status <> 'success':
printLock.acquire()
print 'API Error 0007: Could not decode address:', fromAddress, ':', status
printLock.release()
if status == 'checksumfailed':
return 'API Error 0008: Checksum failed for address: ' + fromAddress
if status == 'invalidcharacters':
return 'API Error 0009: Invalid characters in address: '+ fromAddress
if status == 'versiontoohigh':
return 'API Error 0010: Address version number too high (or zero) in address: ' + fromAddress
if addressVersionNumber != 2:
return 'API Error 0011: the address version number currently must be 2. Others aren\'t supported. Check the fromAddress.'
if streamNumber != 1:
return 'API Error 0012: the stream number must be 1. Others aren\'t supported. Check the fromAddress.'
fromAddress = addBMIfNotPresent(fromAddress)
try:
fromAddressEnabled = config.getboolean(fromAddress,'enabled')
except:
return 'API Error 0013: could not find your fromAddress in the keys.dat file.'
ackdata = OpenSSL.rand(32)
toAddress = '[Broadcast subscribers]'
ripe = ''
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'broadcastpending',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
toLabel = '[Broadcast subscribers]'
apiSignalQueue.put(('displayNewSentMessage',(toAddress,toLabel,fromAddress,subject,message,ackdata)))
workerQueue.put(('sendbroadcast',(fromAddress,subject,message)))
return ackdata.encode('hex')
else:
return 'Invalid Method: %s'%method
#This thread, of which there is only one, runs the API.
class singleAPI(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
se = SimpleXMLRPCServer((config.get('bitmessagesettings', 'apiinterface'),config.getint('bitmessagesettings', 'apiport')), MySimpleXMLRPCRequestHandler, True, True)
se.register_introspection_functions()
se.serve_forever()
#The MySimpleXMLRPCRequestHandler class cannot emit signals (or at least I don't know how) because it is not a QT thread. It therefore puts data in a queue which this thread monitors and emits the signals on its behalf.
class singleAPISignalHandler(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
while True:
command, data = apiSignalQueue.get()
if command == 'updateStatusBar':
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),data)
elif command == 'createRandomAddress':
label, eighteenByteRipe = data
streamNumberForAddress = 1
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,label,1,"",eighteenByteRipe)
self.emit(SIGNAL("passAddressGeneratorObjectThrough(PyQt_PyObject)"),self.addressGenerator)
self.addressGenerator.start()
elif command == 'createDeterministicAddresses':
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe = data
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(addressVersionNumber,streamNumber,'unused API address',numberOfAddresses,passphrase,eighteenByteRipe)
self.emit(SIGNAL("passAddressGeneratorObjectThrough(PyQt_PyObject)"),self.addressGenerator)
self.addressGenerator.start()
elif command == 'displayNewSentMessage':
toAddress,toLabel,fromAddress,subject,message,ackdata = data
self.emit(SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),toAddress,toLabel,fromAddress,subject,message,ackdata)
class iconGlossaryDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_iconGlossaryDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelPortNumber.setText('You are using TCP port ' + str(config.getint('bitmessagesettings', 'port')) + '. (This can be changed in the settings).')
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class helpDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_helpDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelHelpURI.setOpenExternalLinks(True)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class aboutDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_aboutDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelVersion.setText('version ' + softwareVersion)
class regenerateAddressesDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_regenerateAddressesDialog()
self.ui.setupUi(self)
self.parent = parent
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class settingsDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_settingsDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.checkBoxStartOnLogon.setChecked(config.getboolean('bitmessagesettings', 'startonlogon'))
self.ui.checkBoxMinimizeToTray.setChecked(config.getboolean('bitmessagesettings', 'minimizetotray'))
self.ui.checkBoxShowTrayNotifications.setChecked(config.getboolean('bitmessagesettings', 'showtraynotifications'))
self.ui.checkBoxStartInTray.setChecked(config.getboolean('bitmessagesettings', 'startintray'))
if appdata == '':
self.ui.checkBoxPortableMode.setChecked(True)
if 'darwin' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxShowTrayNotifications.setDisabled(True)
self.ui.checkBoxStartInTray.setDisabled(True)
self.ui.labelSettingsNote.setText('Options have been disabled because they either arn\'t applicable or because they haven\'t yet been implimented for your operating system.')
elif 'linux' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxStartInTray.setDisabled(True)
self.ui.labelSettingsNote.setText('Options have been disabled because they either arn\'t applicable or because they haven\'t yet been implimented for your operating system.')
#On the Network settings tab:
self.ui.lineEditTCPPort.setText(str(config.get('bitmessagesettings', 'port')))
self.ui.checkBoxAuthentication.setChecked(config.getboolean('bitmessagesettings', 'socksauthentication'))
if str(config.get('bitmessagesettings', 'socksproxytype')) == 'none':
self.ui.comboBoxProxyType.setCurrentIndex(0)
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
elif str(config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS4a':
self.ui.comboBoxProxyType.setCurrentIndex(1)
self.ui.lineEditTCPPort.setEnabled(False)
elif str(config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS5':
self.ui.comboBoxProxyType.setCurrentIndex(2)
self.ui.lineEditTCPPort.setEnabled(False)
self.ui.lineEditSocksHostname.setText(str(config.get('bitmessagesettings', 'sockshostname')))
self.ui.lineEditSocksPort.setText(str(config.get('bitmessagesettings', 'socksport')))
self.ui.lineEditSocksUsername.setText(str(config.get('bitmessagesettings', 'socksusername')))
self.ui.lineEditSocksPassword.setText(str(config.get('bitmessagesettings', 'sockspassword')))
QtCore.QObject.connect(self.ui.comboBoxProxyType, QtCore.SIGNAL("currentIndexChanged(int)"), self.comboBoxProxyTypeChanged)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
def comboBoxProxyTypeChanged(self,comboBoxIndex):
if comboBoxIndex == 0:
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
self.ui.lineEditTCPPort.setEnabled(True)
elif comboBoxIndex == 1 or comboBoxIndex == 2:
self.ui.lineEditSocksHostname.setEnabled(True)
self.ui.lineEditSocksPort.setEnabled(True)
self.ui.checkBoxAuthentication.setEnabled(True)
if self.ui.checkBoxAuthentication.isChecked():
self.ui.lineEditSocksUsername.setEnabled(True)
self.ui.lineEditSocksPassword.setEnabled(True)
self.ui.lineEditTCPPort.setEnabled(False)
class SpecialAddressBehaviorDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_SpecialAddressBehaviorDialog()
self.ui.setupUi(self)
self.parent = parent
currentRow = parent.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(parent.ui.tableWidgetYourIdentities.item(currentRow,1).text())
if safeConfigGetBoolean(addressAtCurrentRow,'mailinglist'):
self.ui.radioButtonBehaviorMailingList.click()
else:
self.ui.radioButtonBehaveNormalAddress.click()
try:
mailingListName = config.get(addressAtCurrentRow, 'mailinglistname')
except:
mailingListName = ''
self.ui.lineEditMailingListName.setText(unicode(mailingListName,'utf-8'))
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class NewSubscriptionDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewSubscriptionDialog()
self.ui.setupUi(self)
self.parent = parent
QtCore.QObject.connect(self.ui.lineEditSubscriptionAddress, QtCore.SIGNAL("textChanged(QString)"), self.subscriptionAddressChanged)
def subscriptionAddressChanged(self,QString):
status,a,b,c = decodeAddress(str(QString))
if status == 'missingbm':
self.ui.labelSubscriptionAddressCheck.setText('The address should start with ''BM-''')
elif status == 'checksumfailed':
self.ui.labelSubscriptionAddressCheck.setText('The address is not typed or copied correctly (the checksum failed).')
elif status == 'versiontoohigh':
self.ui.labelSubscriptionAddressCheck.setText('The version number of this address is higher than this software can support. Please upgrade Bitmessage.')
elif status == 'invalidcharacters':
self.ui.labelSubscriptionAddressCheck.setText('The address contains invalid characters.')
elif status == 'success':
self.ui.labelSubscriptionAddressCheck.setText('Address is valid.')
class NewAddressDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewAddressDialog()
self.ui.setupUi(self)
self.parent = parent
row = 1
#Let's fill out the 'existing address' combo box with addresses from the 'Your Identities' tab.
while self.parent.ui.tableWidgetYourIdentities.item(row-1,1):
self.ui.radioButtonExisting.click()
#print self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text()
self.ui.comboBoxExisting.addItem(self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text())
row += 1
self.ui.groupBoxDeterministic.setHidden(True)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class MyForm(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#Ask the user if we may delete their old version 1 addresses if they have any.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 1:
displayMsg = "One of your addresses, "+addressInKeysFile+", is an old version 1 address. Version 1 addresses are no longer supported. May we delete it now?"
reply = QtGui.QMessageBox.question(self, 'Message',displayMsg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
config.remove_section(addressInKeysFile)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#Configure Bitmessage to start on startup (or remove the configuration) based on the setting in the keys.dat file
if 'win32' in sys.platform or 'win64' in sys.platform:
#Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
self.settings.remove("PyBitmessage") #In case the user moves the program and the registry entry is no longer valid, this will delete the old registry entry.
if config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage",sys.argv[0])
elif 'darwin' in sys.platform:
#startup for mac
pass
elif 'linux' in sys.platform:
#startup for linux
pass
self.trayIcon = QtGui.QSystemTrayIcon(self)
self.trayIcon.setIcon( QtGui.QIcon(':/newPrefix/images/can-icon-16px.png') )
traySignal = "activated(QSystemTrayIcon::ActivationReason)"
QtCore.QObject.connect(self.trayIcon, QtCore.SIGNAL(traySignal), self.__icon_activated)
menu = QtGui.QMenu()
self.exitAction = menu.addAction("Exit", self.close)
self.trayIcon.setContextMenu(menu)
#I'm currently under the impression that Mac users have different expectations for the tray icon. They don't necessairly expect it to open the main window when clicked and they still expect a program showing a tray icon to also be in the dock.
if 'darwin' in sys.platform:
self.trayIcon.show()
#FILE MENU and other buttons
QtCore.QObject.connect(self.ui.actionExit, QtCore.SIGNAL("triggered()"), self.close)
QtCore.QObject.connect(self.ui.actionManageKeys, QtCore.SIGNAL("triggered()"), self.click_actionManageKeys)
QtCore.QObject.connect(self.ui.actionRegenerateDeterministicAddresses, QtCore.SIGNAL("triggered()"), self.click_actionRegenerateDeterministicAddresses)
QtCore.QObject.connect(self.ui.pushButtonNewAddress, QtCore.SIGNAL("clicked()"), self.click_NewAddressDialog)
QtCore.QObject.connect(self.ui.comboBoxSendFrom, QtCore.SIGNAL("activated(int)"),self.redrawLabelFrom)
QtCore.QObject.connect(self.ui.pushButtonAddAddressBook, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddAddressBook)
QtCore.QObject.connect(self.ui.pushButtonAddSubscription, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddSubscription)
QtCore.QObject.connect(self.ui.pushButtonAddBlacklist, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddBlacklist)
QtCore.QObject.connect(self.ui.pushButtonSend, QtCore.SIGNAL("clicked()"), self.click_pushButtonSend)
QtCore.QObject.connect(self.ui.pushButtonLoadFromAddressBook, QtCore.SIGNAL("clicked()"), self.click_pushButtonLoadFromAddressBook)
QtCore.QObject.connect(self.ui.radioButtonBlacklist, QtCore.SIGNAL("clicked()"), self.click_radioButtonBlacklist)
QtCore.QObject.connect(self.ui.radioButtonWhitelist, QtCore.SIGNAL("clicked()"), self.click_radioButtonWhitelist)
QtCore.QObject.connect(self.ui.pushButtonStatusIcon, QtCore.SIGNAL("clicked()"), self.click_pushButtonStatusIcon)
QtCore.QObject.connect(self.ui.actionSettings, QtCore.SIGNAL("triggered()"), self.click_actionSettings)
QtCore.QObject.connect(self.ui.actionAbout, QtCore.SIGNAL("triggered()"), self.click_actionAbout)
QtCore.QObject.connect(self.ui.actionHelp, QtCore.SIGNAL("triggered()"), self.click_actionHelp)
#Popup menu for the Inbox tab
self.ui.inboxContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionReply = self.ui.inboxContextMenuToolbar.addAction("Reply", self.on_action_InboxReply)
self.actionAddSenderToAddressBook = self.ui.inboxContextMenuToolbar.addAction("Add sender to your Address Book", self.on_action_InboxAddSenderToAddressBook)
self.actionTrashInboxMessage = self.ui.inboxContextMenuToolbar.addAction("Move to Trash", self.on_action_InboxTrash)
self.ui.tableWidgetInbox.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuInbox)
self.popMenuInbox = QtGui.QMenu( self )
self.popMenuInbox.addAction( self.actionReply )
self.popMenuInbox.addAction( self.actionAddSenderToAddressBook )
self.popMenuInbox.addSeparator()
self.popMenuInbox.addAction( self.actionTrashInboxMessage )
#Popup menu for the Your Identities tab
self.ui.addressContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionNew = self.ui.addressContextMenuToolbar.addAction("New", self.on_action_YourIdentitiesNew)
self.actionEnable = self.ui.addressContextMenuToolbar.addAction("Enable", self.on_action_YourIdentitiesEnable)
self.actionDisable = self.ui.addressContextMenuToolbar.addAction("Disable", self.on_action_YourIdentitiesDisable)
self.actionClipboard = self.ui.addressContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_YourIdentitiesClipboard)
self.actionSpecialAddressBehavior = self.ui.addressContextMenuToolbar.addAction("Special address behavior...", self.on_action_SpecialAddressBehaviorDialog)
self.ui.tableWidgetYourIdentities.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuYourIdentities)
self.popMenu = QtGui.QMenu( self )
self.popMenu.addAction( self.actionNew )
self.popMenu.addSeparator()
self.popMenu.addAction( self.actionClipboard )
self.popMenu.addSeparator()
self.popMenu.addAction( self.actionEnable )
self.popMenu.addAction( self.actionDisable )
self.popMenu.addAction( self.actionSpecialAddressBehavior )
#Popup menu for the Address Book page
self.ui.addressBookContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionAddressBookSend = self.ui.addressBookContextMenuToolbar.addAction("Send message to this address", self.on_action_AddressBookSend)
self.actionAddressBookClipboard = self.ui.addressBookContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_AddressBookClipboard)
self.actionAddressBookNew = self.ui.addressBookContextMenuToolbar.addAction("Add New Address", self.on_action_AddressBookNew)
self.actionAddressBookDelete = self.ui.addressBookContextMenuToolbar.addAction("Delete", self.on_action_AddressBookDelete)
self.ui.tableWidgetAddressBook.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuAddressBook)
self.popMenuAddressBook = QtGui.QMenu( self )
self.popMenuAddressBook.addAction( self.actionAddressBookSend )
self.popMenuAddressBook.addAction( self.actionAddressBookClipboard )
self.popMenuAddressBook.addSeparator()
self.popMenuAddressBook.addAction( self.actionAddressBookNew )
self.popMenuAddressBook.addAction( self.actionAddressBookDelete )
#Popup menu for the Subscriptions page
self.ui.subscriptionsContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionsubscriptionsNew = self.ui.subscriptionsContextMenuToolbar.addAction("New", self.on_action_SubscriptionsNew)
self.actionsubscriptionsDelete = self.ui.subscriptionsContextMenuToolbar.addAction("Delete", self.on_action_SubscriptionsDelete)
self.actionsubscriptionsClipboard = self.ui.subscriptionsContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_SubscriptionsClipboard)
self.ui.tableWidgetSubscriptions.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuSubscriptions)
self.popMenuSubscriptions = QtGui.QMenu( self )
self.popMenuSubscriptions.addAction( self.actionsubscriptionsNew )
self.popMenuSubscriptions.addAction( self.actionsubscriptionsDelete )
self.popMenuSubscriptions.addSeparator()
self.popMenuSubscriptions.addAction( self.actionsubscriptionsClipboard )
#Popup menu for the Sent page
self.ui.sentContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionTrashSentMessage = self.ui.sentContextMenuToolbar.addAction("Move to Trash", self.on_action_SentTrash)
self.actionSentClipboard = self.ui.sentContextMenuToolbar.addAction("Copy destination address to clipboard", self.on_action_SentClipboard)
self.ui.tableWidgetSent.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetSent, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuSent)
self.popMenuSent = QtGui.QMenu( self )
self.popMenuSent.addAction( self.actionSentClipboard )
self.popMenuSent.addAction( self.actionTrashSentMessage )
#Popup menu for the Blacklist page
self.ui.blacklistContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionBlacklistNew = self.ui.blacklistContextMenuToolbar.addAction("Add new entry", self.on_action_BlacklistNew)
self.actionBlacklistDelete = self.ui.blacklistContextMenuToolbar.addAction("Delete", self.on_action_BlacklistDelete)
self.actionBlacklistClipboard = self.ui.blacklistContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_BlacklistClipboard)
self.actionBlacklistEnable = self.ui.blacklistContextMenuToolbar.addAction("Enable", self.on_action_BlacklistEnable)
self.actionBlacklistDisable = self.ui.blacklistContextMenuToolbar.addAction("Disable", self.on_action_BlacklistDisable)
self.ui.tableWidgetBlacklist.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetBlacklist, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuBlacklist)
self.popMenuBlacklist = QtGui.QMenu( self )
#self.popMenuBlacklist.addAction( self.actionBlacklistNew )
self.popMenuBlacklist.addAction( self.actionBlacklistDelete )
self.popMenuBlacklist.addSeparator()
self.popMenuBlacklist.addAction( self.actionBlacklistClipboard )
self.popMenuBlacklist.addSeparator()
self.popMenuBlacklist.addAction( self.actionBlacklistEnable )
self.popMenuBlacklist.addAction( self.actionBlacklistDisable )
#Initialize the user's list of addresses on the 'Your Identities' tab.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
newItem = QtGui.QTableWidgetItem(unicode(config.get(addressInKeysFile, 'label'),'utf-8)'))
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.insertRow(0)
self.ui.tableWidgetYourIdentities.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(addressInKeysFile)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
if safeConfigGetBoolean(addressInKeysFile,'mailinglist'):
newItem.setTextColor(QtGui.QColor(137,04,177))#magenta
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(str(addressStream(addressInKeysFile)))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
if isEnabled:
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
self.sqlLookup = sqlThread()
self.sqlLookup.start()
reloadMyAddressHashes()
self.reloadBroadcastSendersForWhichImWatching()
#Load inbox from messages database file
sqlSubmitQueue.put('''SELECT msgid, toaddress, fromaddress, subject, received, message FROM inbox where folder='inbox' ORDER BY received''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, = row
try:
if toAddress == '[Broadcast subscribers]':
toLabel = '[Broadcast subscribers]'
else:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
fromLabel = ''
t = (fromAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
newItem.setData(Qt.UserRole,str(toAddress))
if safeConfigGetBoolean(toAddress,'mailinglist'):
newItem.setTextColor(QtGui.QColor(137,04,177))
self.ui.tableWidgetInbox.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetInbox.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetInbox.setItem(0,2,newItem)
newItem = myTableWidgetItem(strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(received))))
newItem.setData(Qt.UserRole,QByteArray(msgid))
newItem.setData(33,int(received))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetInbox.setItem(0,3,newItem)
#self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(0,2).data(Qt.UserRole).toPyObject())
#Load Sent items from database
sqlSubmitQueue.put('''SELECT toaddress, fromaddress, subject, message, status, ackdata, lastactiontime FROM sent where folder = 'sent' ORDER BY lastactiontime''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
toAddress, fromAddress, subject, message, status, ackdata, lastactiontime = row
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = ''
t = (toAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,2,newItem)
if status == 'findingpubkey':
newItem = myTableWidgetItem('Waiting on their public key. Will request it again soon.')
elif status == 'sentmessage':
newItem = myTableWidgetItem('Message sent. Waiting on acknowledgement. Sent at ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(lastactiontime)))
elif status == 'doingpow':
newItem = myTableWidgetItem('Need to do work to send message. Work is queued.')
elif status == 'ackreceived':
newItem = myTableWidgetItem('Acknowledgement of the message received ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
elif status == 'broadcastpending':
newItem = myTableWidgetItem('Doing the work necessary to send broadcast...')
elif status == 'broadcastsent':
newItem = myTableWidgetItem('Broadcast on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
else:
newItem = myTableWidgetItem('Unknown status. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(lastactiontime))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,3,newItem)
#Initialize the address book
sqlSubmitQueue.put('SELECT * FROM addressbook')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
#Initialize the Subscriptions
sqlSubmitQueue.put('SELECT label, address FROM subscriptions')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
#Initialize the Blacklist or Whitelist
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
self.loadBlackWhiteList()
else:
self.ui.tabWidget.setTabText(6,'Whitelist')
self.ui.radioButtonWhitelist.click()
self.loadBlackWhiteList()
#Initialize the ackdataForWhichImWatching data structure using data from the sql database.
sqlSubmitQueue.put('''SELECT ackdata FROM sent where (status='sentmessage' OR status='doingpow')''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
ackdata, = row
print 'Watching for ackdata', ackdata.encode('hex')
ackdataForWhichImWatching[ackdata] = 0
QtCore.QObject.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetYourIdentitiesItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetAddressBookItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetSubscriptionsItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL("itemSelectionChanged ()"), self.tableWidgetInboxItemClicked)
QtCore.QObject.connect(self.ui.tableWidgetSent, QtCore.SIGNAL("itemSelectionChanged ()"), self.tableWidgetSentItemClicked)
#Put the colored icon on the status bar
#self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
self.statusbar = self.statusBar()
self.statusbar.insertPermanentWidget(0,self.ui.pushButtonStatusIcon)
self.ui.labelStartupTime.setText('Since startup on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
self.numberOfMessagesProcessed = 0
self.numberOfBroadcastsProcessed = 0
self.numberOfPubkeysProcessed = 0
#Below this point, it would be good if all of the necessary global data structures were initialized.
self.rerenderComboBoxSendFrom()
self.listOfOutgoingSynSenderThreads = [] #if we don't maintain this list, the threads will get garbage-collected.
self.connectToStream(1)
self.singleListenerThread = singleListener()
self.singleListenerThread.start()
QtCore.QObject.connect(self.singleListenerThread, QtCore.SIGNAL("passObjectThrough(PyQt_PyObject)"), self.connectObjectToSignals)
self.singleCleanerThread = singleCleaner()
self.singleCleanerThread.start()
QtCore.QObject.connect(self.singleCleanerThread, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.singleCleanerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.workerThread = singleWorker()
self.workerThread.start()
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
if safeConfigGetBoolean('bitmessagesettings','apienabled'):
try:
apiNotifyPath = config.get('bitmessagesettings','apinotifypath')
except:
apiNotifyPath = ''
if apiNotifyPath != '':
printLock.acquire()
print 'Trying to call', apiNotifyPath
printLock.release()
call([apiNotifyPath, "startingUp"])
self.singleAPIThread = singleAPI()
self.singleAPIThread.start()
self.singleAPISignalHandlerThread = singleAPISignalHandler()
self.singleAPISignalHandlerThread.start()
QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("passAddressGeneratorObjectThrough(PyQt_PyObject)"), self.connectObjectToAddressGeneratorSignals)
QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewSentMessage)
def click_actionManageKeys(self):
if 'darwin' in sys.platform or 'linux' in sys.platform:
if appdata == '':
reply = QtGui.QMessageBox.information(self, 'keys.dat?','You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file.', QMessageBox.Ok)
else:
QtGui.QMessageBox.information(self, 'keys.dat?','You may manage your keys by editing the keys.dat file stored in\n' + appdata + '\nIt is important that you back up this file.', QMessageBox.Ok)
elif sys.platform == 'win32' or sys.platform == 'win64':
if appdata == '':
reply = QtGui.QMessageBox.question(self, 'Open keys.dat?','You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
else:
reply = QtGui.QMessageBox.question(self, 'Open keys.dat?','You may manage your keys by editing the keys.dat file stored in\n' + appdata + '\nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.openKeysFile()
def click_actionRegenerateDeterministicAddresses(self):
self.regenerateAddressesDialogInstance = regenerateAddressesDialog(self)
if self.regenerateAddressesDialogInstance.exec_():
if self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, "bad passphrase", "You must type your passphrase. If you don\'t have one then this is not the form for you.")
else:
streamNumberForAddress = int(self.regenerateAddressesDialogInstance.ui.lineEditStreamNumber.text())
addressVersionNumber = int(self.regenerateAddressesDialogInstance.ui.lineEditAddressVersionNumber.text())
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(addressVersionNumber,streamNumberForAddress,"unused address",self.regenerateAddressesDialogInstance.ui.spinBoxNumberOfAddressesToMake.value(),self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text().toUtf8(),self.regenerateAddressesDialogInstance.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
self.ui.tabWidget.setCurrentIndex(3)
def openKeysFile(self):
if 'linux' in sys.platform:
subprocess.call(["xdg-open", appdata + 'keys.dat'])
else:
os.startfile(appdata + 'keys.dat')
def changeEvent(self, event):
if config.getboolean('bitmessagesettings', 'minimizetotray') and not 'darwin' in sys.platform:
if event.type() == QtCore.QEvent.WindowStateChange:
if self.windowState() & QtCore.Qt.WindowMinimized:
self.hide()
self.trayIcon.show()
#self.hidden = True
if 'win32' in sys.platform or 'win64' in sys.platform:
self.setWindowFlags(Qt.ToolTip)
elif event.oldState() & QtCore.Qt.WindowMinimized:
#The window state has just been changed to Normal/Maximised/FullScreen
pass
#QtGui.QWidget.changeEvent(self, event)
def __icon_activated(self, reason):
if reason == QtGui.QSystemTrayIcon.Trigger:
if 'linux' in sys.platform:
self.trayIcon.hide()
self.setWindowFlags(Qt.Window)
self.show()
elif 'win32' in sys.platform or 'win64' in sys.platform:
self.trayIcon.hide()
self.setWindowFlags(Qt.Window)
self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
elif 'darwin' in sys.platform:
#self.trayIcon.hide() #this line causes a segmentation fault
#self.setWindowFlags(Qt.Window)
#self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
def incrementNumberOfMessagesProcessed(self):
self.numberOfMessagesProcessed += 1
self.ui.labelMessageCount.setText('Processed ' + str(self.numberOfMessagesProcessed) + ' person-to-person messages.')
def incrementNumberOfBroadcastsProcessed(self):
self.numberOfBroadcastsProcessed += 1
self.ui.labelBroadcastCount.setText('Processed ' + str(self.numberOfBroadcastsProcessed) + ' broadcast messages.')
def incrementNumberOfPubkeysProcessed(self):
self.numberOfPubkeysProcessed += 1
self.ui.labelPubkeyCount.setText('Processed ' + str(self.numberOfPubkeysProcessed) + ' public keys.')
def updateNetworkStatusTab(self,streamNumber,connectionCount):
global statusIconColor
#print 'updating network status tab'
totalNumberOfConnectionsFromAllStreams = 0 #One would think we could use len(sendDataQueues) for this, but sendData threads don't remove themselves from sendDataQueues fast enough for len(sendDataQueues) to be accurate here.
for currentRow in range(self.ui.tableWidgetConnectionCount.rowCount()):
rowStreamNumber = int(self.ui.tableWidgetConnectionCount.item(currentRow,0).text())
if streamNumber == rowStreamNumber:
self.ui.tableWidgetConnectionCount.item(currentRow,1).setText(str(connectionCount))
totalNumberOfConnectionsFromAllStreams += connectionCount
self.ui.labelTotalConnections.setText('Total Connections: ' + str(totalNumberOfConnectionsFromAllStreams))
if totalNumberOfConnectionsFromAllStreams > 0 and statusIconColor == 'red': #FYI: The 'singlelistener' thread sets the icon color to green when it receives an incoming connection, meaning that the user's firewall is configured correctly.
self.setStatusIcon('yellow')
elif totalNumberOfConnectionsFromAllStreams == 0:
self.setStatusIcon('red')
def setStatusIcon(self,color):
global statusIconColor
#print 'setting status icon color'
if color == 'red':
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/redicon.png"))
statusIconColor = 'red'
if color == 'yellow':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
statusIconColor = 'yellow'
if color == 'green':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/greenicon.png"))
statusIconColor = 'green'
def updateSentItemStatusByHash(self,toRipe,textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if ripe == toRipe:
self.ui.tableWidgetSent.item(i,3).setText(unicode(textToDisplay,'utf-8'))
def updateSentItemStatusByAckdata(self,ackdata,textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
tableAckdata = self.ui.tableWidgetSent.item(i,3).data(Qt.UserRole).toPyObject()
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if ackdata == tableAckdata:
self.ui.tableWidgetSent.item(i,3).setText(unicode(textToDisplay,'utf-8'))
def rerenderInboxFromLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
addressToLookup = str(self.ui.tableWidgetInbox.item(i,1).data(Qt.UserRole).toPyObject())
fromLabel = ''
t = (addressToLookup,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.item(i,1).setText(unicode(fromLabel,'utf-8'))
else:
#It might be a broadcast message. We should check for that label.
sqlLock.acquire()
sqlSubmitQueue.put('''select label from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.item(i,1).setText(unicode(fromLabel,'utf-8'))
def rerenderInboxToLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
toAddress = str(self.ui.tableWidgetInbox.item(i,0).data(Qt.UserRole).toPyObject())
try:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
self.ui.tableWidgetInbox.item(i,0).setText(unicode(toLabel,'utf-8'))
#Set the color according to whether it is the address of a mailing list or not.
if safeConfigGetBoolean(toAddress,'mailinglist'):
self.ui.tableWidgetInbox.item(i,0).setTextColor(QtGui.QColor(137,04,177))
else:
self.ui.tableWidgetInbox.item(i,0).setTextColor(QtGui.QColor(0,0,0))
def rerenderSentFromLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
fromAddress = str(self.ui.tableWidgetSent.item(i,1).data(Qt.UserRole).toPyObject())
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
self.ui.tableWidgetSent.item(i,1).setText(unicode(fromLabel,'utf-8'))
def rerenderSentToLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
addressToLookup = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
toLabel = ''
t = (addressToLookup,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.item(i,0).setText(unicode(toLabel,'utf-8'))
def click_pushButtonSend(self):
self.statusBar().showMessage('')
toAddresses = str(self.ui.lineEditTo.text())
fromAddress = str(self.ui.labelFrom.text())
subject = str(self.ui.lineEditSubject.text().toUtf8())
message = str(self.ui.textEditMessage.document().toPlainText().toUtf8())
if self.ui.radioButtonSpecific.isChecked(): #To send a message to specific people (rather than broadcast)
toAddressesList = [s.strip() for s in toAddresses.replace(',', ';').split(';')]
toAddressesList = list(set(toAddressesList)) #remove duplicate addresses. If the user has one address with a BM- and the same address without the BM-, this will not catch it. They'll send the message to the person twice.
for toAddress in toAddressesList:
if toAddress <> '':
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if status <> 'success':
printLock.acquire()
print 'Error: Could not decode', toAddress, ':', status
printLock.release()
if status == 'missingbm':
self.statusBar().showMessage('Error: Bitmessage addresses start with BM- Please check ' + toAddress)
if status == 'checksumfailed':
self.statusBar().showMessage('Error: The address ' + toAddress+' is not typed or copied correctly. Please check it.')
if status == 'invalidcharacters':
self.statusBar().showMessage('Error: The address '+ toAddress+ ' contains invalid characters. Please check it.')
if status == 'versiontoohigh':
self.statusBar().showMessage('Error: The address version in '+ toAddress+ ' is too high. Either you need to upgrade your Bitmessage software or your acquaintance is being clever.')
elif fromAddress == '':
self.statusBar().showMessage('Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab.')
else:
toAddress = addBMIfNotPresent(toAddress)
try:
config.get(toAddress, 'enabled')
#The toAddress is one owned by me. We cannot send messages to ourselves without significant changes to the codebase.
QMessageBox.about(self, "Sending to your address", "Error: One of the addresses to which you are sending a message, "+toAddress+", is yours. Unfortunately the Bitmessage client cannot process its own messages. Please try running a second client on a different computer or within a VM.")
continue
except:
pass
if addressVersionNumber > 2 or addressVersionNumber == 0:
QMessageBox.about(self, "Address version number", "Concerning the address "+toAddress+", Bitmessage cannot understand address version numbers of "+str(addressVersionNumber)+". Perhaps upgrade Bitmessage to the latest version.")
continue
if streamNumber > 1 or streamNumber == 0:
QMessageBox.about(self, "Stream number", "Concerning the address "+toAddress+", Bitmessage cannot handle stream numbers of "+str(streamNumber)+". Perhaps upgrade Bitmessage to the latest version.")
continue
self.statusBar().showMessage('')
try:
if connectionsCount[streamNumber] == 0:
self.statusBar().showMessage('Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.')
except:
self.statusBar().showMessage('Warning: The address uses a stream number currently not supported by this Bitmessage version. Perhaps upgrade.')
ackdata = OpenSSL.rand(32)
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'findingpubkey',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
"""try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress"""
toLabel = ''
t = (toAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.displayNewSentMessage(toAddress,toLabel,fromAddress, subject, message, ackdata)
workerQueue.put(('sendmessage',toAddress))
"""self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
newItem = myTableWidgetItem('Just pressed ''send'' '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())"""
self.ui.comboBoxSendFrom.setCurrentIndex(0)
self.ui.labelFrom.setText('')
self.ui.lineEditTo.setText('')
self.ui.lineEditSubject.setText('')
self.ui.textEditMessage.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0,0)
else:
self.statusBar().showMessage('Your \'To\' field is empty.')
else: #User selected 'Broadcast'
if fromAddress == '':
self.statusBar().showMessage('Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab.')
else:
self.statusBar().showMessage('')
#We don't actually need the ackdata for acknowledgement since this is a broadcast message, but we can use it to update the user interface when the POW is done generating.
ackdata = OpenSSL.rand(32)
toAddress = '[Broadcast subscribers]'
ripe = ''
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'broadcastpending',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('sendbroadcast',(fromAddress,subject,message)))
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = '[Broadcast subscribers]'
self.ui.tableWidgetSent.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
#newItem = QtGui.QTableWidgetItem('Doing work necessary to send broadcast...'+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem = myTableWidgetItem('Work is queued.')
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.comboBoxSendFrom.setCurrentIndex(0)
self.ui.labelFrom.setText('')
self.ui.lineEditTo.setText('')
self.ui.lineEditSubject.setText('')
self.ui.textEditMessage.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0,0)
def click_pushButtonLoadFromAddressBook(self):
self.ui.tabWidget.setCurrentIndex(5)
for i in range(4):
time.sleep(0.1)
self.statusBar().showMessage('')
time.sleep(0.1)
self.statusBar().showMessage('Right click an entry in your address book and select \'Send message to this address\'.')
def redrawLabelFrom(self,index):
self.ui.labelFrom.setText(self.ui.comboBoxSendFrom.itemData(index).toPyObject())
def rerenderComboBoxSendFrom(self):
self.ui.comboBoxSendFrom.clear()
self.ui.labelFrom.setText('')
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled') #I realize that this is poor programming practice but I don't care. It's easier for others to read.
if isEnabled:
self.ui.comboBoxSendFrom.insertItem(0,unicode(config.get(addressInKeysFile, 'label'),'utf-8'),addressInKeysFile)
self.ui.comboBoxSendFrom.insertItem(0,'','')
if(self.ui.comboBoxSendFrom.count() == 2):
self.ui.comboBoxSendFrom.setCurrentIndex(1)
self.redrawLabelFrom(self.ui.comboBoxSendFrom.currentIndex())
else:
self.ui.comboBoxSendFrom.setCurrentIndex(0)
def connectToStream(self,streamNumber):
connectionsCount[streamNumber] = 0
#Add a line to the Connection Count table on the Network Status tab with a 'zero' connection count. This will be updated as necessary by another function.
self.ui.tableWidgetConnectionCount.insertRow(0)
newItem = QtGui.QTableWidgetItem(str(streamNumber))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem('0')
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,1,newItem)
a = outgoingSynSender()
self.listOfOutgoingSynSenderThreads.append(a)
QtCore.QObject.connect(a, QtCore.SIGNAL("passObjectThrough(PyQt_PyObject)"), self.connectObjectToSignals)
QtCore.QObject.connect(a, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
a.setup(streamNumber)
a.start()
def connectObjectToSignals(self,object):
QtCore.QObject.connect(object, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
QtCore.QObject.connect(object, QtCore.SIGNAL("displayNewInboxMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewInboxMessage)
QtCore.QObject.connect(object, QtCore.SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewSentMessage)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"), self.updateNetworkStatusTab)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfMessagesProcessed()"), self.incrementNumberOfMessagesProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfPubkeysProcessed()"), self.incrementNumberOfPubkeysProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfBroadcastsProcessed()"), self.incrementNumberOfBroadcastsProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("setStatusIcon(PyQt_PyObject)"), self.setStatusIcon)
#This function exists because of the API. The API thread starts an address generator thread and must somehow connect the address generator's signals to the QApplication thread. This function is used to connect the slots and signals.
def connectObjectToAddressGeneratorSignals(self,object):
QtCore.QObject.connect(object, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
#This function is called by the processmsg function when that function receives a message to an address that is acting as a pseudo-mailing-list. The message will be broadcast out. This function puts the message on the 'Sent' tab.
def displayNewSentMessage(self,toAddress,toLabel,fromAddress,subject,message,ackdata):
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
#newItem = QtGui.QTableWidgetItem('Doing work necessary to send broadcast...'+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem = myTableWidgetItem('Work is queued. '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())
def displayNewInboxMessage(self,inventoryHash,toAddress,fromAddress,subject,message):
'''print 'test signals displayNewInboxMessage'
print 'toAddress', toAddress
print 'fromAddress', fromAddress
print 'message', message'''
fromLabel = ''
sqlLock.acquire()
t = (fromAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
else:
#There might be a label in the subscriptions table
sqlLock.acquire()
t = (fromAddress,)
sqlSubmitQueue.put('''select label from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
try:
if toAddress == '[Broadcast subscribers]':
toLabel = '[Broadcast subscribers]'
else:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
#msgid, toaddress, fromaddress, subject, received, message = row
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
if safeConfigGetBoolean(str(toAddress),'mailinglist'):
newItem.setTextColor(QtGui.QColor(137,04,177))
self.ui.tableWidgetInbox.insertRow(0)
self.ui.tableWidgetInbox.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
if config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.trayIcon.showMessage('New Message', 'New message from '+ fromAddress, 1, 2000)
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
if config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.trayIcon.showMessage('New Message', 'New message from '+fromLabel, 1, 2000)
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetInbox.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetInbox.setItem(0,2,newItem)
newItem = myTableWidgetItem(strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(inventoryHash))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetInbox.setItem(0,3,newItem)
self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.tableWidgetInbox.setCurrentCell(0,0)
def click_pushButtonAddAddressBook(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),)
sqlSubmitQueue.put('''select * from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
t = (str(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())))
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO addressbook VALUES (?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def click_pushButtonAddSubscription(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),)
sqlSubmitQueue.put('''select * from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
t = (str(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),True)
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO subscriptions VALUES (?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
self.reloadBroadcastSendersForWhichImWatching()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your subsciptions twice. Perhaps rename the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def loadBlackWhiteList(self):
#Initialize the Blacklist or Whitelist table
listType = config.get('bitmessagesettings', 'blackwhitelist')
if listType == 'black':
sqlSubmitQueue.put('''SELECT label, address, enabled FROM blacklist''')
else:
sqlSubmitQueue.put('''SELECT label, address, enabled FROM whitelist''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address, enabled = row
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
if not enabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetBlacklist.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not enabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetBlacklist.setItem(0,1,newItem)
def click_pushButtonStatusIcon(self):
print 'click_pushButtonStatusIcon'
self.iconGlossaryInstance = iconGlossaryDialog(self)
if self.iconGlossaryInstance.exec_():
pass
def click_actionHelp(self):
self.helpDialogInstance = helpDialog(self)
self.helpDialogInstance.exec_()
def click_actionAbout(self):
self.aboutDialogInstance = aboutDialog(self)
self.aboutDialogInstance.exec_()
def click_actionSettings(self):
global statusIconColor
global appdata
self.settingsDialogInstance = settingsDialog(self)
if self.settingsDialogInstance.exec_():
config.set('bitmessagesettings', 'startonlogon', str(self.settingsDialogInstance.ui.checkBoxStartOnLogon.isChecked()))
config.set('bitmessagesettings', 'minimizetotray', str(self.settingsDialogInstance.ui.checkBoxMinimizeToTray.isChecked()))
config.set('bitmessagesettings', 'showtraynotifications', str(self.settingsDialogInstance.ui.checkBoxShowTrayNotifications.isChecked()))
config.set('bitmessagesettings', 'startintray', str(self.settingsDialogInstance.ui.checkBoxStartInTray.isChecked()))
if int(config.get('bitmessagesettings','port')) != int(self.settingsDialogInstance.ui.lineEditTCPPort.text()):
QMessageBox.about(self, "Restart", "You must restart Bitmessage for the port number change to take effect.")
config.set('bitmessagesettings', 'port', str(self.settingsDialogInstance.ui.lineEditTCPPort.text()))
if config.get('bitmessagesettings', 'socksproxytype') == 'none' and str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText())[0:5] == 'SOCKS':
if statusIconColor != 'red':
QMessageBox.about(self, "Restart", "Bitmessage will use your proxy from now on now but you may want to manually restart Bitmessage now to close existing connections.")
if config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS' and str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText()) == 'none':
self.statusBar().showMessage('')
config.set('bitmessagesettings', 'socksproxytype', str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText()))
config.set('bitmessagesettings', 'socksauthentication', str(self.settingsDialogInstance.ui.checkBoxAuthentication.isChecked()))
config.set('bitmessagesettings', 'sockshostname', str(self.settingsDialogInstance.ui.lineEditSocksHostname.text()))
config.set('bitmessagesettings', 'socksport', str(self.settingsDialogInstance.ui.lineEditSocksPort.text()))
config.set('bitmessagesettings', 'socksusername', str(self.settingsDialogInstance.ui.lineEditSocksUsername.text()))
config.set('bitmessagesettings', 'sockspassword', str(self.settingsDialogInstance.ui.lineEditSocksPassword.text()))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
if 'win32' in sys.platform or 'win64' in sys.platform:
#Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
if config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage",sys.argv[0])
else:
self.settings.remove("PyBitmessage")
elif 'darwin' in sys.platform:
#startup for mac
pass
elif 'linux' in sys.platform:
#startup for linux
pass
if appdata != '' and self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): #If we are NOT using portable mode now but the user selected that we should...
config.set('bitmessagesettings','movemessagstoprog','true') #Tells bitmessage to move the messages.dat file to the program directory the next time the program starts.
#Write the keys.dat file to disk in the new location
with open('keys.dat', 'wb') as configfile:
config.write(configfile)
#Write the knownnodes.dat file to disk in the new location
output = open('knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
os.remove(appdata + 'keys.dat')
os.remove(appdata + 'knownnodes.dat')
appdata = ''
QMessageBox.about(self, "Restart", "Bitmessage has moved most of your config files to the program directory but you must restart Bitmessage to move the last file (the file which holds messages).")
if appdata == '' and not self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): #If we ARE using portable mode now but the user selected that we shouldn't...
appdata = lookupAppdataFolder()
if not os.path.exists(appdata):
os.makedirs(appdata)
config.set('bitmessagesettings','movemessagstoappdata','true') #Tells bitmessage to move the messages.dat file to the appdata directory the next time the program starts.
#Write the keys.dat file to disk in the new location
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#Write the knownnodes.dat file to disk in the new location
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
os.remove('keys.dat')
os.remove('knownnodes.dat')
QMessageBox.about(self, "Restart", "Bitmessage has moved most of your config files to the application data directory but you must restart Bitmessage to move the last file (the file which holds messages).")
def click_radioButtonBlacklist(self):
if config.get('bitmessagesettings', 'blackwhitelist') == 'white':
config.set('bitmessagesettings','blackwhitelist','black')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6,'Blacklist')
def click_radioButtonWhitelist(self):
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
config.set('bitmessagesettings','blackwhitelist','white')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6,'Whitelist')
def click_pushButtonAddBlacklist(self):
self.NewBlacklistDialogInstance = NewSubscriptionDialog(self)
if self.NewBlacklistDialogInstance.exec_():
if self.NewBlacklistDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text())),)
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''select * from blacklist where address=?''')
else:
sqlSubmitQueue.put('''select * from whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewBlacklistDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetBlacklist.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetBlacklist.setItem(0,1,newItem)
t = (str(self.NewBlacklistDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text())),True)
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''INSERT INTO blacklist VALUES (?,?,?)''')
else:
sqlSubmitQueue.put('''INSERT INTO whitelist VALUES (?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your list twice. Perhaps rename the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def on_action_SpecialAddressBehaviorDialog(self):
self.dialog = SpecialAddressBehaviorDialog(self)
# For Modal dialogs
if self.dialog.exec_():
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetYourIdentities.item(currentRow,1).text())
if self.dialog.ui.radioButtonBehaveNormalAddress.isChecked():
config.set(str(addressAtCurrentRow),'mailinglist','false')
#Set the color to either black or grey
if config.getboolean(addressAtCurrentRow,'enabled'):
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(0,0,0))
else:
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(128,128,128))
else:
config.set(str(addressAtCurrentRow),'mailinglist','true')
config.set(str(addressAtCurrentRow),'mailinglistname',str(self.dialog.ui.lineEditMailingListName.text().toUtf8()))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(137,04,177))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.rerenderInboxToLabels()
def click_NewAddressDialog(self):
self.dialog = NewAddressDialog(self)
# For Modal dialogs
if self.dialog.exec_():
#self.dialog.ui.buttonBox.enabled = False
if self.dialog.ui.radioButtonRandomAddress.isChecked():
if self.dialog.ui.radioButtonMostAvailable.isChecked():
streamNumberForAddress = 1
else:
#User selected 'Use the same stream as an existing address.'
streamNumberForAddress = addressStream(self.dialog.ui.comboBoxExisting.currentText())
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,str(self.dialog.ui.newaddresslabel.text().toUtf8()),1,"",self.dialog.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
else:
if self.dialog.ui.lineEditPassphrase.text() != self.dialog.ui.lineEditPassphraseAgain.text():
QMessageBox.about(self, "Passphrase mismatch", "The passphrase you entered twice doesn\'t match. Try again.")
elif self.dialog.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, "Choose a passphrase", "You really do need a passphrase.")
else:
streamNumberForAddress = 1 #this will eventually have to be replaced by logic to determine the most available stream number.
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,"unused address",self.dialog.ui.spinBoxNumberOfAddressesToMake.value(),self.dialog.ui.lineEditPassphrase.text().toUtf8(),self.dialog.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
else:
print 'new address dialog box rejected'
def closeEvent(self, event):
'''quit_msg = "Are you sure you want to exit Bitmessage?"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()'''
broadcastToSendDataQueues((0, 'shutdown', 'all'))
printLock.acquire()
print 'Closing. Flushing inventory in memory out to disk...'
printLock.release()
self.statusBar().showMessage('Flushing inventory in memory out to disk.')
flushInventory()
#This one last useless query will guarantee that the previous query committed before we close the program.
sqlLock.acquire()
sqlSubmitQueue.put('SELECT address FROM subscriptions')
sqlSubmitQueue.put('')
sqlReturnQueue.get()
sqlLock.release()
self.statusBar().showMessage('Saving the knownNodes list of peers to disk...')
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
self.trayIcon.hide()
printLock.acquire()
print 'Done.'
printLock.release()
self.statusBar().showMessage('All done. Closing user interface...')
event.accept()
raise SystemExit
def on_action_InboxReply(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
toAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,0).data(Qt.UserRole).toPyObject())
fromAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,1).data(Qt.UserRole).toPyObject())
if toAddressAtCurrentInboxRow == '[Broadcast subscribers]':
self.ui.labelFrom.setText('')
else:
if not config.get(toAddressAtCurrentInboxRow,'enabled'):
self.statusBar().showMessage('Error: The address from which you are trying to send is disabled. Enable it from the \'Your Identities\' tab first.')
return
self.ui.labelFrom.setText(toAddressAtCurrentInboxRow)
self.ui.lineEditTo.setText(str(fromAddressAtCurrentInboxRow))
self.ui.comboBoxSendFrom.setCurrentIndex(0)
#self.ui.comboBoxSendFrom.setEditText(str(self.ui.tableWidgetInbox.item(currentInboxRow,0).text))
self.ui.textEditMessage.setText('\n\n------------------------------------------------------\n'+self.ui.tableWidgetInbox.item(currentInboxRow,2).data(Qt.UserRole).toPyObject())
if self.ui.tableWidgetInbox.item(currentInboxRow,2).text()[0:3] == 'Re:':
self.ui.lineEditSubject.setText(self.ui.tableWidgetInbox.item(currentInboxRow,2).text())
else:
self.ui.lineEditSubject.setText('Re: '+self.ui.tableWidgetInbox.item(currentInboxRow,2).text())
self.ui.radioButtonSpecific.setChecked(True)
self.ui.tabWidget.setCurrentIndex(1)
def on_action_InboxAddSenderToAddressBook(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
#self.ui.tableWidgetInbox.item(currentRow,1).data(Qt.UserRole).toPyObject()
addressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,1).data(Qt.UserRole).toPyObject())
#Let's make sure that it isn't already in the address book
sqlLock.acquire()
t = (addressAtCurrentInboxRow,)
sqlSubmitQueue.put('''select * from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem('--New entry. Change label in Address Book.--')
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addressAtCurrentInboxRow)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
t = ('--New entry. Change label in Address Book.--',addressAtCurrentInboxRow)
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO addressbook VALUES (?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.ui.tabWidget.setCurrentIndex(5)
self.ui.tableWidgetAddressBook.setCurrentCell(0,0)
self.statusBar().showMessage('Entry added to the Address Book. Edit the label to your liking.')
else:
self.statusBar().showMessage('Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want.')
#Send item on the Inbox tab to trash
def on_action_InboxTrash(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
inventoryHashToTrash = str(self.ui.tableWidgetInbox.item(currentRow,3).data(Qt.UserRole).toPyObject())
t = (inventoryHashToTrash,)
sqlLock.acquire()
#sqlSubmitQueue.put('''delete from inbox where msgid=?''')
sqlSubmitQueue.put('''UPDATE inbox SET folder='trash' WHERE msgid=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.textEditInboxMessage.setText("")
self.ui.tableWidgetInbox.removeRow(currentRow)
self.statusBar().showMessage('Moved item to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back.')
#Send item on the Sent tab to trash
def on_action_SentTrash(self):
currentRow = self.ui.tableWidgetSent.currentRow()
ackdataToTrash = str(self.ui.tableWidgetSent.item(currentRow,3).data(Qt.UserRole).toPyObject())
t = (ackdataToTrash,)
sqlLock.acquire()
sqlSubmitQueue.put('''UPDATE sent SET folder='trash' WHERE ackdata=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.textEditSentMessage.setText("")
self.ui.tableWidgetSent.removeRow(currentRow)
self.statusBar().showMessage('Moved item to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back.')
def on_action_SentClipboard(self):
currentRow = self.ui.tableWidgetSent.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetSent.item(currentRow,0).data(Qt.UserRole).toPyObject())
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
#Group of functions for the Address Book dialog box
def on_action_AddressBookNew(self):
self.click_pushButtonAddAddressBook()
def on_action_AddressBookDelete(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
labelAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
sqlSubmitQueue.put('''DELETE FROM addressbook WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetAddressBook.removeRow(currentRow)
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
self.reloadBroadcastSendersForWhichImWatching()
def on_action_AddressBookClipboard(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_action_AddressBookSend(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
if self.ui.lineEditTo.text() == '':
self.ui.lineEditTo.setText(str(addressAtCurrentRow))
else:
self.ui.lineEditTo.setText(str(self.ui.lineEditTo.text()) + '; '+ str(addressAtCurrentRow))
self.statusBar().showMessage('You have added the address to the \'To\' field on the \'Send\' tab. You may add more recipients if you want. When you are done, go to the \'Send\' tab.')
def on_context_menuAddressBook(self, point):
self.popMenuAddressBook.exec_( self.ui.tableWidgetAddressBook.mapToGlobal(point) )
#Group of functions for the Subscriptions dialog box
def on_action_SubscriptionsNew(self):
self.click_pushButtonAddSubscription()
def on_action_SubscriptionsDelete(self):
print 'clicked Delete'
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
labelAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
sqlSubmitQueue.put('''DELETE FROM subscriptions WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetSubscriptions.removeRow(currentRow)
self.rerenderInboxFromLabels()
self.reloadBroadcastSendersForWhichImWatching()
def on_action_SubscriptionsClipboard(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuSubscriptions(self, point):
self.popMenuSubscriptions.exec_( self.ui.tableWidgetSubscriptions.mapToGlobal(point) )
#Group of functions for the Blacklist dialog box
def on_action_BlacklistNew(self):
self.click_pushButtonAddBlacklist()
def on_action_BlacklistDelete(self):
print 'clicked Delete'
currentRow = self.ui.tableWidgetBlacklist.currentRow()
labelAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''DELETE FROM blacklist WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
else:
sqlSubmitQueue.put('''DELETE FROM whitelist WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetBlacklist.removeRow(currentRow)
def on_action_BlacklistClipboard(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuBlacklist(self, point):
self.popMenuBlacklist.exec_( self.ui.tableWidgetBlacklist.mapToGlobal(point) )
def on_action_BlacklistEnable(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,1).text()
self.ui.tableWidgetBlacklist.item(currentRow,0).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetBlacklist.item(currentRow,1).setTextColor(QtGui.QColor(0,0,0))
t = (str(addressAtCurrentRow),)
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''UPDATE blacklist SET enabled=1 WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
else:
sqlSubmitQueue.put('''UPDATE whitelist SET enabled=1 WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
def on_action_BlacklistDisable(self):
currentRow = self.ui.tableWidgetBlacklist.currentRow()
addressAtCurrentRow = self.ui.tableWidgetBlacklist.item(currentRow,1).text()
self.ui.tableWidgetBlacklist.item(currentRow,0).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetBlacklist.item(currentRow,1).setTextColor(QtGui.QColor(128,128,128))
t = (str(addressAtCurrentRow),)
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''UPDATE blacklist SET enabled=0 WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
else:
sqlSubmitQueue.put('''UPDATE whitelist SET enabled=0 WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
#Group of functions for the Your Identities dialog box
def on_action_YourIdentitiesNew(self):
self.click_NewAddressDialog()
def on_action_YourIdentitiesEnable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetYourIdentities.item(currentRow,1).text())
config.set(addressAtCurrentRow,'enabled','true')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.ui.tableWidgetYourIdentities.item(currentRow,0).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetYourIdentities.item(currentRow,2).setTextColor(QtGui.QColor(0,0,0))
if safeConfigGetBoolean(addressAtCurrentRow,'mailinglist'):
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(137,04,177))
reloadMyAddressHashes()
def on_action_YourIdentitiesDisable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = str(self.ui.tableWidgetYourIdentities.item(currentRow,1).text())
config.set(str(addressAtCurrentRow),'enabled','false')
self.ui.tableWidgetYourIdentities.item(currentRow,0).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.item(currentRow,2).setTextColor(QtGui.QColor(128,128,128))
if safeConfigGetBoolean(addressAtCurrentRow,'mailinglist'):
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(137,04,177))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
reloadMyAddressHashes()
def on_action_YourIdentitiesClipboard(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuYourIdentities(self, point):
self.popMenu.exec_( self.ui.tableWidgetYourIdentities.mapToGlobal(point) )
def on_context_menuInbox(self, point):
self.popMenuInbox.exec_( self.ui.tableWidgetInbox.mapToGlobal(point) )
def on_context_menuSent(self, point):
self.popMenuSent.exec_( self.ui.tableWidgetSent.mapToGlobal(point) )
def tableWidgetInboxItemClicked(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
if currentRow >= 0:
self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(currentRow,2).data(Qt.UserRole).toPyObject())
def tableWidgetSentItemClicked(self):
currentRow = self.ui.tableWidgetSent.currentRow()
if currentRow >= 0:
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(currentRow,2).data(Qt.UserRole).toPyObject())
def tableWidgetYourIdentitiesItemChanged(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
config.set(str(addressAtCurrentRow),'label',str(self.ui.tableWidgetYourIdentities.item(currentRow,0).text().toUtf8()))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.rerenderComboBoxSendFrom()
#self.rerenderInboxFromLabels()
self.rerenderInboxToLabels()
self.rerenderSentFromLabels()
#self.rerenderSentToLabels()
def tableWidgetAddressBookItemChanged(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
sqlLock.acquire()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
t = (str(self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()),str(addressAtCurrentRow))
sqlSubmitQueue.put('''UPDATE addressbook set label=? WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def tableWidgetSubscriptionsItemChanged(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
sqlLock.acquire()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
t = (str(self.ui.tableWidgetSubscriptions.item(currentRow,0).text().toUtf8()),str(addressAtCurrentRow))
sqlSubmitQueue.put('''UPDATE subscriptions set label=? WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def writeNewAddressToTable(self,label,address,streamNumber):
self.ui.tableWidgetYourIdentities.insertRow(0)
self.ui.tableWidgetYourIdentities.setItem(0, 0, QtGui.QTableWidgetItem(unicode(label,'utf-8')))
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(streamNumber)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
self.rerenderComboBoxSendFrom()
def updateStatusBar(self,data):
if data != "":
printLock.acquire()
print 'Status bar:', data
printLock.release()
self.statusBar().showMessage(data)
def reloadBroadcastSendersForWhichImWatching(self):
broadcastSendersForWhichImWatching.clear()
sqlLock.acquire()
sqlSubmitQueue.put('SELECT address FROM subscriptions')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
address, = row
status,addressVersionNumber,streamNumber,hash = decodeAddress(address)
broadcastSendersForWhichImWatching[hash] = 0
#In order for the time columns on the Inbox and Sent tabs to be sorted correctly (rather than alphabetically), we need to overload the < operator and use this class instead of QTableWidgetItem.
class myTableWidgetItem(QTableWidgetItem):
def __lt__(self,other):
return int(self.data(33).toPyObject()) < int(other.data(33).toPyObject())
sendDataQueues = [] #each sendData thread puts its queue in this list.
myRSAAddressHashes = {}
myECAddressHashes = {}
#myPrivateKeys = {}
inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
workerQueue = Queue.Queue()
sqlSubmitQueue = Queue.Queue() #SQLITE3 is so thread-unsafe that they won't even let you call it from different threads using your own locks. SQL objects can only be called from one thread.
sqlReturnQueue = Queue.Queue()
sqlLock = threading.Lock()
printLock = threading.Lock()
ackdataForWhichImWatching = {}
broadcastSendersForWhichImWatching = {}
statusIconColor = 'red'
connectionsCount = {} #Used for the 'network status' tab.
connectionsCountLock = threading.Lock()
inventoryLock = threading.Lock() #Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack('>Q',random.randrange(1, 18446744073709551615))
connectedHostsList = {} #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender thread won't connect to the same remote node twice.
neededPubkeys = {}
successfullyDecryptMessageTimings = [] #A list of the amounts of time it took to successfully decrypt msg messages
apiSignalQueue = Queue.Queue() #The singleAPI thread uses this queue to pass messages to a QT thread which can emit signals to do things like display a message in the UI.
apiAddressGeneratorReturnQueue = Queue.Queue() #The address generator thread uses this queue to get information back to the API thread.
#These constants are not at the top because if changed they will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
averageProofOfWorkNonceTrialsPerByte = 320 #The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work.
payloadLengthExtraBytes = 14000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
if useVeryEasyProofOfWorkForTesting:
averageProofOfWorkNonceTrialsPerByte = averageProofOfWorkNonceTrialsPerByte / 16
payloadLengthExtraBytes = payloadLengthExtraBytes / 7000
if __name__ == "__main__":
# Check the Major version, the first element in the array
if sqlite3.sqlite_version_info[0] < 3:
print 'This program requires sqlite version 3 or higher because 2 and lower cannot store NULL values. I see version:', sqlite3.sqlite_version_info
sys.exit()
#First try to load the config file (the keys.dat file) from the program directory
config = ConfigParser.SafeConfigParser()
config.read('keys.dat')
try:
config.get('bitmessagesettings', 'settingsversion')
#settingsFileExistsInProgramDirectory = True
print 'Loading config files from same directory as program'
appdata = ''
except:
#Could not load the keys.dat file in the program directory. Perhaps it is in the appdata directory.
appdata = lookupAppdataFolder()
#if not os.path.exists(appdata):
# os.makedirs(appdata)
config = ConfigParser.SafeConfigParser()
config.read(appdata + 'keys.dat')
try:
config.get('bitmessagesettings', 'settingsversion')
print 'Loading existing config files from', appdata
except:
#This appears to be the first time running the program; there is no config file (or it cannot be accessed). Create config file.
config.add_section('bitmessagesettings')
config.set('bitmessagesettings','settingsversion','1')
config.set('bitmessagesettings','port','8444')
config.set('bitmessagesettings','timeformat','%%a, %%d %%b %%Y %%I:%%M %%p')
config.set('bitmessagesettings','blackwhitelist','black')
config.set('bitmessagesettings','startonlogon','false')
if 'linux' in sys.platform:
config.set('bitmessagesettings','minimizetotray','false')#This isn't implimented yet and when True on Ubuntu causes Bitmessage to disappear while running when minimized.
else:
config.set('bitmessagesettings','minimizetotray','true')
config.set('bitmessagesettings','showtraynotifications','true')
config.set('bitmessagesettings','startintray','false')
if storeConfigFilesInSameDirectoryAsProgramByDefault:
#Just use the same directory as the program and forget about the appdata folder
appdata = ''
print 'Creating new config files in same directory as program.'
else:
print 'Creating new config files in', appdata
if not os.path.exists(appdata):
os.makedirs(appdata)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
if config.getint('bitmessagesettings','settingsversion') == 1:
config.set('bitmessagesettings','settingsversion','3') #If the settings version is equal to 2 then the sqlThread will modify the pubkeys table and change the settings version to 3.
config.set('bitmessagesettings','socksproxytype','none')
config.set('bitmessagesettings','sockshostname','localhost')
config.set('bitmessagesettings','socksport','9050')
config.set('bitmessagesettings','socksauthentication','false')
config.set('bitmessagesettings','socksusername','')
config.set('bitmessagesettings','sockspassword','')
config.set('bitmessagesettings','keysencrypted','false')
config.set('bitmessagesettings','messagesencrypted','false')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#Let us now see if we should move the messages.dat file. There is an option in the settings to switch 'Portable Mode' on or off. Most of the files are moved instantly, but the messages.dat file cannot be moved while it is open. Now that it is not open we can move it now!
try:
config.getboolean('bitmessagesettings', 'movemessagstoprog')
#If we have reached this point then we must move the messages.dat file from the appdata folder to the program folder
print 'Moving messages.dat from its old location in the application data folder to its new home along side the program.'
shutil.move(lookupAppdataFolder()+'messages.dat','messages.dat')
config.remove_option('bitmessagesettings', 'movemessagstoprog')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
except:
pass
try:
config.getboolean('bitmessagesettings', 'movemessagstoappdata')
#If we have reached this point then we must move the messages.dat file from the appdata folder to the program folder
print 'Moving messages.dat from its old location next to the program to its new home in the application data folder.'
shutil.move('messages.dat',lookupAppdataFolder()+'messages.dat')
config.remove_option('bitmessagesettings', 'movemessagstoappdata')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
except:
pass
try:
pickleFile = open(appdata + 'knownnodes.dat', 'rb')
knownNodes = pickle.load(pickleFile)
pickleFile.close()
except:
createDefaultKnownNodes(appdata)
pickleFile = open(appdata + 'knownnodes.dat', 'rb')
knownNodes = pickle.load(pickleFile)
pickleFile.close()
if config.getint('bitmessagesettings', 'settingsversion') > 3:
print 'Bitmessage cannot read future versions of the keys file (keys.dat). Run the newer version of Bitmessage.'
raise SystemExit
#DNS bootstrap. This could be programmed to use the SOCKS proxy to do the DNS lookup some day but for now we will just rely on the entries in defaultKnownNodes.py. Hopefully either they are up to date or the user has run Bitmessage recently without SOCKS turned on and received good bootstrap nodes using that method.
if config.get('bitmessagesettings', 'socksproxytype') == 'none':
try:
for item in socket.getaddrinfo('bootstrap8080.bitmessage.org',80):
print 'Adding', item[4][0],'to knownNodes based on DNS boostrap method'
knownNodes[1][item[4][0]] = (8080,int(time.time()))
except:
print 'bootstrap8080.bitmessage.org DNS bootstraping failed.'
try:
for item in socket.getaddrinfo('bootstrap8444.bitmessage.org',80):
print 'Adding', item[4][0],'to knownNodes based on DNS boostrap method'
knownNodes[1][item[4][0]] = (8444,int(time.time()))
except:
print 'bootstrap8444.bitmessage.org DNS bootstrapping failed.'
else:
print 'DNS bootstrap skipped because SOCKS is used.'
app = QtGui.QApplication(sys.argv)
app.setStyleSheet("QStatusBar::item { border: 0px solid black }")
myapp = MyForm()
myapp.show()
if config.getboolean('bitmessagesettings', 'startintray'):
myapp.hide()
myapp.trayIcon.show()
#self.hidden = True
#self.setWindowState(self.windowState() & QtCore.Qt.WindowMinimized)
#self.hide()
if 'win32' in sys.platform or 'win64' in sys.platform:
myapp.setWindowFlags(Qt.ToolTip)
sys.exit(app.exec_())
# So far, the Bitmessage protocol, this client, the Wiki, and the forums
# are all a one-man operation. Bitcoin tips are quite appreciated!
# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u
|
#!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
from xml.dom.minidom import parseString
class YouTube(VideoExtractor):
name = "YouTube"
# YouTube media encoding options, in descending quality order.
# http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs. Retrieved July 17, 2014.
stream_types = [
{'itag': '38', 'container': 'MP4', 'video_resolution': '3072p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3.5-5', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '85', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '3-4', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '46', 'container': 'WebM', 'video_resolution': '1080p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '37', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3-4.3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '102', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '45', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '2', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '84', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '22', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '120', 'container': 'FLV', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'Main@L3.1', 'video_bitrate': '2', 'audio_encoding': 'AAC', 'audio_bitrate': '128'}, # Live streaming only
{'itag': '44', 'container': 'WebM', 'video_resolution': '480p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '1', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '35', 'container': 'FLV', 'video_resolution': '480p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.8-1', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '101', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '100', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '43', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '34', 'container': 'FLV', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '82', 'container': 'MP4', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '18', 'container': 'MP4', 'video_resolution': '270p/360p', 'video_encoding': 'H.264', 'video_profile': 'Baseline', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '6', 'container': 'FLV', 'video_resolution': '270p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.8', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
#{'itag': '83', 'container': 'MP4', 'video_resolution': '240p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '13', 'container': '3GP', 'video_resolution': '', 'video_encoding': 'MPEG-4 Visual', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': ''},
{'itag': '5', 'container': 'FLV', 'video_resolution': '240p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.25', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
{'itag': '36', 'container': '3GP', 'video_resolution': '240p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.175', 'audio_encoding': 'AAC', 'audio_bitrate': '36'},
{'itag': '17', 'container': '3GP', 'video_resolution': '144p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.05', 'audio_encoding': 'AAC', 'audio_bitrate': '24'},
]
def decipher(js, s):
# Examples:
# - https://www.youtube.com/yts/jsbin/player-da_DK-vflWlK-zq/base.js
# - https://www.youtube.com/yts/jsbin/player-vflvABTsY/da_DK/base.js
# - https://www.youtube.com/yts/jsbin/player-vfls4aurX/da_DK/base.js
def tr_js(code):
code = re.sub(r'function', r'def', code)
code = re.sub(r'(\W)(as|if|in|is|or)\(', r'\1_\2(', code)
code = re.sub(r'\$', '_dollar', code)
code = re.sub(r'\{', r':\n\t', code)
code = re.sub(r'\}', r'\n', code)
code = re.sub(r'var\s+', r'', code)
code = re.sub(r'(\w+).join\(""\)', r'"".join(\1)', code)
code = re.sub(r'(\w+).length', r'len(\1)', code)
code = re.sub(r'(\w+).slice\((\w+)\)', r'\1[\2:]', code)
code = re.sub(r'(\w+).splice\((\w+),(\w+)\)', r'del \1[\2:\2+\3]', code)
code = re.sub(r'(\w+).split\(""\)', r'list(\1)', code)
return code
js = js.replace('\n', ' ')
f1 = match1(js, r'\.set\(\w+\.sp,\(0,window\.encodeURIComponent\)\(([$\w]+)') or \
match1(js, r'\.set\(\w+\.sp,([$\w]+)\(\w+\.s\)\)') or \
match1(js, r'"signature",([$\w]+)\(\w+\.\w+\)')
f1def = match1(js, r'function %s(\(\w+\)\{[^\{]+\})' % re.escape(f1)) or \
match1(js, r'\W%s=function(\(\w+\)\{[^\{]+\})' % re.escape(f1))
f1def = re.sub(r'([$\w]+\.)([$\w]+\(\w+,\d+\))', r'\2', f1def)
f1def = 'function main_%s%s' % (f1, f1def) # prefix to avoid potential namespace conflict
code = tr_js(f1def)
f2s = set(re.findall(r'([$\w]+)\(\w+,\d+\)', f1def))
for f2 in f2s:
f2e = re.escape(f2)
f2def = re.search(r'[^$\w]%s:function\((\w+,\w+)\)(\{[^\{\}]+\})' % f2e, js)
if f2def:
f2def = 'function {}({}){}'.format(f2e, f2def.group(1), f2def.group(2))
else:
f2def = re.search(r'[^$\w]%s:function\((\w+)\)(\{[^\{\}]+\})' % f2e, js)
f2def = 'function {}({},b){}'.format(f2e, f2def.group(1), f2def.group(2))
f2 = re.sub(r'(\W)(as|if|in|is|or)\(', r'\1_\2(', f2)
f2 = re.sub(r'\$', '_dollar', f2)
code = code + 'global %s\n' % f2 + tr_js(f2def)
f1 = re.sub(r'(as|if|in|is|or)', r'_\1', f1)
f1 = re.sub(r'\$', '_dollar', f1)
code = code + 'sig=main_%s(s)' % f1 # prefix to avoid potential namespace conflict
exec(code, globals(), locals())
return locals()['sig']
def chunk_by_range(url, size):
urls = []
chunk_size = 10485760
start, end = 0, chunk_size - 1
urls.append('%s&range=%s-%s' % (url, start, end))
while end + 1 < size: # processed size < expected size
start, end = end + 1, end + chunk_size
urls.append('%s&range=%s-%s' % (url, start, end))
return urls
def get_url_from_vid(vid):
return 'https://youtu.be/{}'.format(vid)
def get_vid_from_url(url):
"""Extracts video ID from URL.
"""
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
match1(url, r'youtube\.com/watch/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v')
def get_playlist_id_from_url(url):
"""Extracts playlist ID from URL.
"""
return parse_query_param(url, 'list') or \
parse_query_param(url, 'p')
def download_playlist_by_url(self, url, **kwargs):
self.url = url
playlist_id = self.__class__.get_playlist_id_from_url(self.url)
if playlist_id is None:
log.wtf('[Failed] Unsupported URL pattern.')
video_page = get_content('https://www.youtube.com/playlist?list=%s' % playlist_id)
from html.parser import HTMLParser
videos = sorted([HTMLParser().unescape(video)
for video in re.findall(r'<a href="(/watch\?[^"]+)"', video_page)
if parse_query_param(video, 'index')],
key=lambda video: parse_query_param(video, 'index'))
# Parse browse_ajax page for more videos to load
load_more_href = match1(video_page, r'data-uix-load-more-href="([^"]+)"')
while load_more_href:
browse_ajax = get_content('https://www.youtube.com/%s' % load_more_href)
browse_data = json.loads(browse_ajax)
load_more_widget_html = browse_data['load_more_widget_html']
content_html = browse_data['content_html']
vs = set(re.findall(r'href="(/watch\?[^"]+)"', content_html))
videos += sorted([HTMLParser().unescape(video)
for video in list(vs)
if parse_query_param(video, 'index')])
load_more_href = match1(load_more_widget_html, r'data-uix-load-more-href="([^"]+)"')
self.title = re.search(r'<meta name="title" content="([^"]+)"', video_page).group(1)
self.p_playlist()
for video in videos:
vid = parse_query_param(video, 'v')
index = parse_query_param(video, 'index')
try:
self.__class__().download_by_url(self.__class__.get_url_from_vid(vid), index=index, **kwargs)
except:
pass
def prepare(self, **kwargs):
assert self.url or self.vid
if not self.vid and self.url:
self.vid = self.__class__.get_vid_from_url(self.url)
if self.vid is None:
self.download_playlist_by_url(self.url, **kwargs)
exit(0)
video_info = parse.parse_qs(get_content('https://www.youtube.com/get_video_info?video_id={}'.format(self.vid)))
ytplayer_config = None
if 'status' not in video_info:
log.wtf('[Failed] Unknown status.', exit_code=None)
raise
elif video_info['status'] == ['ok']:
if 'use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']:
self.title = parse.unquote_plus(video_info['title'][0])
# Parse video page (for DASH)
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
# Workaround: get_video_info returns bad s. Why?
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
except:
stream_list = video_info['url_encoded_fmt_stream_map'][0].split(',')
self.html5player = None
else:
# Parse video page instead
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.title = ytplayer_config['args']['title']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
elif video_info['status'] == ['fail']:
if video_info['errorcode'] == ['150']:
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+});ytplayer', video_page).group(1))
except:
msg = re.search('class="message">([^<]+)<', video_page).group(1)
log.wtf('[Failed] "%s"' % msg.strip(), exit_code=None)
raise
if 'title' in ytplayer_config['args']:
# 150 Restricted from playback on certain sites
# Parse video page instead
self.title = ytplayer_config['args']['title']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
else:
log.wtf('[Error] The uploader has not made this video available in your country.', exit_code=None)
raise
#self.title = re.search('<meta name="title" content="([^"]+)"', video_page).group(1)
#stream_list = []
elif video_info['errorcode'] == ['100']:
log.wtf('[Failed] This video does not exist.', exit_code=None) #int(video_info['errorcode'][0])
raise
else:
log.wtf('[Failed] %s' % video_info['reason'][0], exit_code=None) #int(video_info['errorcode'][0])
raise
else:
log.wtf('[Failed] Invalid status.', exit_code=None)
raise
# YouTube Live
if ytplayer_config and (ytplayer_config['args'].get('livestream') == '1' or ytplayer_config['args'].get('live_playback') == '1'):
hlsvp = ytplayer_config['args']['hlsvp']
if 'info_only' in kwargs and kwargs['info_only']:
return
else:
download_url_ffmpeg(hlsvp, self.title, 'mp4')
exit(0)
for stream in stream_list:
metadata = parse.parse_qs(stream)
stream_itag = metadata['itag'][0]
self.streams[stream_itag] = {
'itag': metadata['itag'][0],
'url': metadata['url'][0],
'sig': metadata['sig'][0] if 'sig' in metadata else None,
's': metadata['s'][0] if 's' in metadata else None,
'quality': metadata['quality'][0],
'type': metadata['type'][0],
'mime': metadata['type'][0].split(';')[0],
'container': mime_to_container(metadata['type'][0].split(';')[0]),
}
# Prepare caption tracks
try:
caption_tracks = json.loads(ytplayer_config['args']['player_response'])['captions']['playerCaptionsTracklistRenderer']['captionTracks']
for ct in caption_tracks:
ttsurl, lang = ct['baseUrl'], ct['languageCode']
tts_xml = parseString(get_content(ttsurl))
transcript = tts_xml.getElementsByTagName('transcript')[0]
texts = transcript.getElementsByTagName('text')
srt = ""; seq = 0
for text in texts:
if text.firstChild is None: continue # empty element
seq += 1
start = float(text.getAttribute('start'))
if text.getAttribute('dur'):
dur = float(text.getAttribute('dur'))
else: dur = 1.0 # could be ill-formed XML
finish = start + dur
m, s = divmod(start, 60); h, m = divmod(m, 60)
start = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
m, s = divmod(finish, 60); h, m = divmod(m, 60)
finish = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
content = unescape_html(text.firstChild.nodeValue)
srt += '%s\n' % str(seq)
srt += '%s --> %s\n' % (start, finish)
srt += '%s\n\n' % content
self.caption_tracks[lang] = srt
except: pass
# Prepare DASH streams
try:
dashmpd = ytplayer_config['args']['dashmpd']
dash_xml = parseString(get_content(dashmpd))
for aset in dash_xml.getElementsByTagName('AdaptationSet'):
mimeType = aset.getAttribute('mimeType')
if mimeType == 'audio/mp4':
rep = aset.getElementsByTagName('Representation')[-1]
burls = rep.getElementsByTagName('BaseURL')
dash_mp4_a_url = burls[0].firstChild.nodeValue
dash_mp4_a_size = burls[0].getAttribute('yt:contentLength')
if not dash_mp4_a_size:
try: dash_mp4_a_size = url_size(dash_mp4_a_url)
except: continue
elif mimeType == 'audio/webm':
rep = aset.getElementsByTagName('Representation')[-1]
burls = rep.getElementsByTagName('BaseURL')
dash_webm_a_url = burls[0].firstChild.nodeValue
dash_webm_a_size = burls[0].getAttribute('yt:contentLength')
if not dash_webm_a_size:
try: dash_webm_a_size = url_size(dash_webm_a_url)
except: continue
elif mimeType == 'video/mp4':
for rep in aset.getElementsByTagName('Representation'):
w = int(rep.getAttribute('width'))
h = int(rep.getAttribute('height'))
itag = rep.getAttribute('id')
burls = rep.getElementsByTagName('BaseURL')
dash_url = burls[0].firstChild.nodeValue
dash_size = burls[0].getAttribute('yt:contentLength')
if not dash_size:
try: dash_size = url_size(dash_url)
except: continue
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': '%sx%s' % (w, h),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif mimeType == 'video/webm':
for rep in aset.getElementsByTagName('Representation'):
w = int(rep.getAttribute('width'))
h = int(rep.getAttribute('height'))
itag = rep.getAttribute('id')
burls = rep.getElementsByTagName('BaseURL')
dash_url = burls[0].firstChild.nodeValue
dash_size = burls[0].getAttribute('yt:contentLength')
if not dash_size:
try: dash_size = url_size(dash_url)
except: continue
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_webm_a_urls = self.__class__.chunk_by_range(dash_webm_a_url, int(dash_webm_a_size))
self.dash_streams[itag] = {
'quality': '%sx%s' % (w, h),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, dash_webm_a_urls],
'size': int(dash_size) + int(dash_webm_a_size)
}
except:
# VEVO
if not self.html5player: return
self.js = get_content(self.html5player)
if 'adaptive_fmts' in ytplayer_config['args']:
streams = [dict([(i.split('=')[0],
parse.unquote(i.split('=')[1]))
for i in afmt.split('&')])
for afmt in ytplayer_config['args']['adaptive_fmts'].split(',')]
for stream in streams: # get over speed limiting
stream['url'] += '&ratebypass=yes'
for stream in streams: # audio
if stream['type'].startswith('audio/mp4'):
dash_mp4_a_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_mp4_a_url += '&signature={}'.format(sig)
dash_mp4_a_size = stream['clen']
elif stream['type'].startswith('audio/webm'):
dash_webm_a_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_webm_a_url += '&signature={}'.format(sig)
dash_webm_a_size = stream['clen']
for stream in streams: # video
if 'size' in stream:
if stream['type'].startswith('video/mp4'):
mimeType = 'video/mp4'
dash_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif stream['type'].startswith('video/webm'):
mimeType = 'video/webm'
dash_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
audio_url = None
audio_size = None
try:
audio_url = dash_webm_a_url
audio_size = int(dash_webm_a_size)
except UnboundLocalError as e:
audio_url = dash_mp4_a_url
audio_size = int(dash_mp4_a_size)
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
audio_urls = self.__class__.chunk_by_range(audio_url, int(audio_size))
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, audio_urls],
'size': int(dash_size) + int(audio_size)
}
def extract(self, **kwargs):
if not self.streams_sorted:
# No stream is available
return
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams and stream_id not in self.dash_streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# Extract stream with the best quality
stream_id = self.streams_sorted[0]['itag']
if stream_id in self.streams:
src = self.streams[stream_id]['url']
if self.streams[stream_id]['sig'] is not None:
sig = self.streams[stream_id]['sig']
src += '&signature={}'.format(sig)
elif self.streams[stream_id]['s'] is not None:
if not hasattr(self, 'js'):
self.js = get_content(self.html5player)
s = self.streams[stream_id]['s']
sig = self.__class__.decipher(self.js, s)
src += '&signature={}'.format(sig)
self.streams[stream_id]['src'] = [src]
self.streams[stream_id]['size'] = urls_size(self.streams[stream_id]['src'])
site = YouTube()
download = site.download_by_url
download_playlist = site.download_playlist_by_url
[youtube] show warning for premieres
#!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
from xml.dom.minidom import parseString
class YouTube(VideoExtractor):
name = "YouTube"
# YouTube media encoding options, in descending quality order.
# http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs. Retrieved July 17, 2014.
stream_types = [
{'itag': '38', 'container': 'MP4', 'video_resolution': '3072p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3.5-5', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '85', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '3-4', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '46', 'container': 'WebM', 'video_resolution': '1080p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '37', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3-4.3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '102', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '45', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '2', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '84', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '22', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '120', 'container': 'FLV', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'Main@L3.1', 'video_bitrate': '2', 'audio_encoding': 'AAC', 'audio_bitrate': '128'}, # Live streaming only
{'itag': '44', 'container': 'WebM', 'video_resolution': '480p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '1', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '35', 'container': 'FLV', 'video_resolution': '480p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.8-1', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '101', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '100', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '43', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '34', 'container': 'FLV', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '82', 'container': 'MP4', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '18', 'container': 'MP4', 'video_resolution': '270p/360p', 'video_encoding': 'H.264', 'video_profile': 'Baseline', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '6', 'container': 'FLV', 'video_resolution': '270p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.8', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
#{'itag': '83', 'container': 'MP4', 'video_resolution': '240p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '13', 'container': '3GP', 'video_resolution': '', 'video_encoding': 'MPEG-4 Visual', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': ''},
{'itag': '5', 'container': 'FLV', 'video_resolution': '240p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.25', 'audio_encoding': 'MP3', 'audio_bitrate': '64'},
{'itag': '36', 'container': '3GP', 'video_resolution': '240p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.175', 'audio_encoding': 'AAC', 'audio_bitrate': '36'},
{'itag': '17', 'container': '3GP', 'video_resolution': '144p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.05', 'audio_encoding': 'AAC', 'audio_bitrate': '24'},
]
def decipher(js, s):
# Examples:
# - https://www.youtube.com/yts/jsbin/player-da_DK-vflWlK-zq/base.js
# - https://www.youtube.com/yts/jsbin/player-vflvABTsY/da_DK/base.js
# - https://www.youtube.com/yts/jsbin/player-vfls4aurX/da_DK/base.js
def tr_js(code):
code = re.sub(r'function', r'def', code)
code = re.sub(r'(\W)(as|if|in|is|or)\(', r'\1_\2(', code)
code = re.sub(r'\$', '_dollar', code)
code = re.sub(r'\{', r':\n\t', code)
code = re.sub(r'\}', r'\n', code)
code = re.sub(r'var\s+', r'', code)
code = re.sub(r'(\w+).join\(""\)', r'"".join(\1)', code)
code = re.sub(r'(\w+).length', r'len(\1)', code)
code = re.sub(r'(\w+).slice\((\w+)\)', r'\1[\2:]', code)
code = re.sub(r'(\w+).splice\((\w+),(\w+)\)', r'del \1[\2:\2+\3]', code)
code = re.sub(r'(\w+).split\(""\)', r'list(\1)', code)
return code
js = js.replace('\n', ' ')
f1 = match1(js, r'\.set\(\w+\.sp,\(0,window\.encodeURIComponent\)\(([$\w]+)') or \
match1(js, r'\.set\(\w+\.sp,([$\w]+)\(\w+\.s\)\)') or \
match1(js, r'"signature",([$\w]+)\(\w+\.\w+\)')
f1def = match1(js, r'function %s(\(\w+\)\{[^\{]+\})' % re.escape(f1)) or \
match1(js, r'\W%s=function(\(\w+\)\{[^\{]+\})' % re.escape(f1))
f1def = re.sub(r'([$\w]+\.)([$\w]+\(\w+,\d+\))', r'\2', f1def)
f1def = 'function main_%s%s' % (f1, f1def) # prefix to avoid potential namespace conflict
code = tr_js(f1def)
f2s = set(re.findall(r'([$\w]+)\(\w+,\d+\)', f1def))
for f2 in f2s:
f2e = re.escape(f2)
f2def = re.search(r'[^$\w]%s:function\((\w+,\w+)\)(\{[^\{\}]+\})' % f2e, js)
if f2def:
f2def = 'function {}({}){}'.format(f2e, f2def.group(1), f2def.group(2))
else:
f2def = re.search(r'[^$\w]%s:function\((\w+)\)(\{[^\{\}]+\})' % f2e, js)
f2def = 'function {}({},b){}'.format(f2e, f2def.group(1), f2def.group(2))
f2 = re.sub(r'(\W)(as|if|in|is|or)\(', r'\1_\2(', f2)
f2 = re.sub(r'\$', '_dollar', f2)
code = code + 'global %s\n' % f2 + tr_js(f2def)
f1 = re.sub(r'(as|if|in|is|or)', r'_\1', f1)
f1 = re.sub(r'\$', '_dollar', f1)
code = code + 'sig=main_%s(s)' % f1 # prefix to avoid potential namespace conflict
exec(code, globals(), locals())
return locals()['sig']
def chunk_by_range(url, size):
urls = []
chunk_size = 10485760
start, end = 0, chunk_size - 1
urls.append('%s&range=%s-%s' % (url, start, end))
while end + 1 < size: # processed size < expected size
start, end = end + 1, end + chunk_size
urls.append('%s&range=%s-%s' % (url, start, end))
return urls
def get_url_from_vid(vid):
return 'https://youtu.be/{}'.format(vid)
def get_vid_from_url(url):
"""Extracts video ID from URL.
"""
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
match1(url, r'youtube\.com/watch/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v')
def get_playlist_id_from_url(url):
"""Extracts playlist ID from URL.
"""
return parse_query_param(url, 'list') or \
parse_query_param(url, 'p')
def download_playlist_by_url(self, url, **kwargs):
self.url = url
playlist_id = self.__class__.get_playlist_id_from_url(self.url)
if playlist_id is None:
log.wtf('[Failed] Unsupported URL pattern.')
video_page = get_content('https://www.youtube.com/playlist?list=%s' % playlist_id)
from html.parser import HTMLParser
videos = sorted([HTMLParser().unescape(video)
for video in re.findall(r'<a href="(/watch\?[^"]+)"', video_page)
if parse_query_param(video, 'index')],
key=lambda video: parse_query_param(video, 'index'))
# Parse browse_ajax page for more videos to load
load_more_href = match1(video_page, r'data-uix-load-more-href="([^"]+)"')
while load_more_href:
browse_ajax = get_content('https://www.youtube.com/%s' % load_more_href)
browse_data = json.loads(browse_ajax)
load_more_widget_html = browse_data['load_more_widget_html']
content_html = browse_data['content_html']
vs = set(re.findall(r'href="(/watch\?[^"]+)"', content_html))
videos += sorted([HTMLParser().unescape(video)
for video in list(vs)
if parse_query_param(video, 'index')])
load_more_href = match1(load_more_widget_html, r'data-uix-load-more-href="([^"]+)"')
self.title = re.search(r'<meta name="title" content="([^"]+)"', video_page).group(1)
self.p_playlist()
for video in videos:
vid = parse_query_param(video, 'v')
index = parse_query_param(video, 'index')
try:
self.__class__().download_by_url(self.__class__.get_url_from_vid(vid), index=index, **kwargs)
except:
pass
def prepare(self, **kwargs):
assert self.url or self.vid
if not self.vid and self.url:
self.vid = self.__class__.get_vid_from_url(self.url)
if self.vid is None:
self.download_playlist_by_url(self.url, **kwargs)
exit(0)
video_info = parse.parse_qs(get_content('https://www.youtube.com/get_video_info?video_id={}'.format(self.vid)))
ytplayer_config = None
if 'status' not in video_info:
log.wtf('[Failed] Unknown status.', exit_code=None)
raise
elif video_info['status'] == ['ok']:
if 'use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']:
self.title = parse.unquote_plus(video_info['title'][0])
# Parse video page (for DASH)
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
# Workaround: get_video_info returns bad s. Why?
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
except:
stream_list = video_info['url_encoded_fmt_stream_map'][0].split(',')
self.html5player = None
else:
# Parse video page instead
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.title = ytplayer_config['args']['title']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
elif video_info['status'] == ['fail']:
if video_info['errorcode'] == ['150']:
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+});ytplayer', video_page).group(1))
except:
msg = re.search('class="message">([^<]+)<', video_page).group(1)
log.wtf('[Failed] "%s"' % msg.strip(), exit_code=None)
raise
if 'title' in ytplayer_config['args']:
# 150 Restricted from playback on certain sites
# Parse video page instead
self.title = ytplayer_config['args']['title']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
else:
log.wtf('[Error] The uploader has not made this video available in your country.', exit_code=None)
raise
#self.title = re.search('<meta name="title" content="([^"]+)"', video_page).group(1)
#stream_list = []
elif video_info['errorcode'] == ['100']:
log.wtf('[Failed] This video does not exist.', exit_code=None) #int(video_info['errorcode'][0])
raise
else:
log.wtf('[Failed] %s' % video_info['reason'][0], exit_code=None) #int(video_info['errorcode'][0])
raise
else:
log.wtf('[Failed] Invalid status.', exit_code=None)
raise
# YouTube Live
if ytplayer_config and (ytplayer_config['args'].get('livestream') == '1' or ytplayer_config['args'].get('live_playback') == '1'):
if 'hlsvp' in ytplayer_config['args']:
hlsvp = ytplayer_config['args']['hlsvp']
else:
player_response= json.loads(ytplayer_config['args']['player_response'])
log.e('[Failed] %s' % player_response['playabilityStatus']['reason'], exit_code=1)
if 'info_only' in kwargs and kwargs['info_only']:
return
else:
download_url_ffmpeg(hlsvp, self.title, 'mp4')
exit(0)
for stream in stream_list:
metadata = parse.parse_qs(stream)
stream_itag = metadata['itag'][0]
self.streams[stream_itag] = {
'itag': metadata['itag'][0],
'url': metadata['url'][0],
'sig': metadata['sig'][0] if 'sig' in metadata else None,
's': metadata['s'][0] if 's' in metadata else None,
'quality': metadata['quality'][0],
'type': metadata['type'][0],
'mime': metadata['type'][0].split(';')[0],
'container': mime_to_container(metadata['type'][0].split(';')[0]),
}
# Prepare caption tracks
try:
caption_tracks = json.loads(ytplayer_config['args']['player_response'])['captions']['playerCaptionsTracklistRenderer']['captionTracks']
for ct in caption_tracks:
ttsurl, lang = ct['baseUrl'], ct['languageCode']
tts_xml = parseString(get_content(ttsurl))
transcript = tts_xml.getElementsByTagName('transcript')[0]
texts = transcript.getElementsByTagName('text')
srt = ""; seq = 0
for text in texts:
if text.firstChild is None: continue # empty element
seq += 1
start = float(text.getAttribute('start'))
if text.getAttribute('dur'):
dur = float(text.getAttribute('dur'))
else: dur = 1.0 # could be ill-formed XML
finish = start + dur
m, s = divmod(start, 60); h, m = divmod(m, 60)
start = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
m, s = divmod(finish, 60); h, m = divmod(m, 60)
finish = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
content = unescape_html(text.firstChild.nodeValue)
srt += '%s\n' % str(seq)
srt += '%s --> %s\n' % (start, finish)
srt += '%s\n\n' % content
self.caption_tracks[lang] = srt
except: pass
# Prepare DASH streams
try:
dashmpd = ytplayer_config['args']['dashmpd']
dash_xml = parseString(get_content(dashmpd))
for aset in dash_xml.getElementsByTagName('AdaptationSet'):
mimeType = aset.getAttribute('mimeType')
if mimeType == 'audio/mp4':
rep = aset.getElementsByTagName('Representation')[-1]
burls = rep.getElementsByTagName('BaseURL')
dash_mp4_a_url = burls[0].firstChild.nodeValue
dash_mp4_a_size = burls[0].getAttribute('yt:contentLength')
if not dash_mp4_a_size:
try: dash_mp4_a_size = url_size(dash_mp4_a_url)
except: continue
elif mimeType == 'audio/webm':
rep = aset.getElementsByTagName('Representation')[-1]
burls = rep.getElementsByTagName('BaseURL')
dash_webm_a_url = burls[0].firstChild.nodeValue
dash_webm_a_size = burls[0].getAttribute('yt:contentLength')
if not dash_webm_a_size:
try: dash_webm_a_size = url_size(dash_webm_a_url)
except: continue
elif mimeType == 'video/mp4':
for rep in aset.getElementsByTagName('Representation'):
w = int(rep.getAttribute('width'))
h = int(rep.getAttribute('height'))
itag = rep.getAttribute('id')
burls = rep.getElementsByTagName('BaseURL')
dash_url = burls[0].firstChild.nodeValue
dash_size = burls[0].getAttribute('yt:contentLength')
if not dash_size:
try: dash_size = url_size(dash_url)
except: continue
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': '%sx%s' % (w, h),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif mimeType == 'video/webm':
for rep in aset.getElementsByTagName('Representation'):
w = int(rep.getAttribute('width'))
h = int(rep.getAttribute('height'))
itag = rep.getAttribute('id')
burls = rep.getElementsByTagName('BaseURL')
dash_url = burls[0].firstChild.nodeValue
dash_size = burls[0].getAttribute('yt:contentLength')
if not dash_size:
try: dash_size = url_size(dash_url)
except: continue
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_webm_a_urls = self.__class__.chunk_by_range(dash_webm_a_url, int(dash_webm_a_size))
self.dash_streams[itag] = {
'quality': '%sx%s' % (w, h),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, dash_webm_a_urls],
'size': int(dash_size) + int(dash_webm_a_size)
}
except:
# VEVO
if not self.html5player: return
self.js = get_content(self.html5player)
if 'adaptive_fmts' in ytplayer_config['args']:
streams = [dict([(i.split('=')[0],
parse.unquote(i.split('=')[1]))
for i in afmt.split('&')])
for afmt in ytplayer_config['args']['adaptive_fmts'].split(',')]
for stream in streams: # get over speed limiting
stream['url'] += '&ratebypass=yes'
for stream in streams: # audio
if stream['type'].startswith('audio/mp4'):
dash_mp4_a_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_mp4_a_url += '&signature={}'.format(sig)
dash_mp4_a_size = stream['clen']
elif stream['type'].startswith('audio/webm'):
dash_webm_a_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_webm_a_url += '&signature={}'.format(sig)
dash_webm_a_size = stream['clen']
for stream in streams: # video
if 'size' in stream:
if stream['type'].startswith('video/mp4'):
mimeType = 'video/mp4'
dash_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif stream['type'].startswith('video/webm'):
mimeType = 'video/webm'
dash_url = stream['url']
if 's' in stream:
sig = self.__class__.decipher(self.js, stream['s'])
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
audio_url = None
audio_size = None
try:
audio_url = dash_webm_a_url
audio_size = int(dash_webm_a_size)
except UnboundLocalError as e:
audio_url = dash_mp4_a_url
audio_size = int(dash_mp4_a_size)
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
audio_urls = self.__class__.chunk_by_range(audio_url, int(audio_size))
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, audio_urls],
'size': int(dash_size) + int(audio_size)
}
def extract(self, **kwargs):
if not self.streams_sorted:
# No stream is available
return
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams and stream_id not in self.dash_streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# Extract stream with the best quality
stream_id = self.streams_sorted[0]['itag']
if stream_id in self.streams:
src = self.streams[stream_id]['url']
if self.streams[stream_id]['sig'] is not None:
sig = self.streams[stream_id]['sig']
src += '&signature={}'.format(sig)
elif self.streams[stream_id]['s'] is not None:
if not hasattr(self, 'js'):
self.js = get_content(self.html5player)
s = self.streams[stream_id]['s']
sig = self.__class__.decipher(self.js, s)
src += '&signature={}'.format(sig)
self.streams[stream_id]['src'] = [src]
self.streams[stream_id]['size'] = urls_size(self.streams[stream_id]['src'])
site = YouTube()
download = site.download_by_url
download_playlist = site.download_playlist_by_url
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import eventlet
from flask import request, Flask
from st2reactor.sensor.base import Sensor
from st2common.util import isotime
__all__ = [
'ServiceNotificationsSensor'
]
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=False if '--use-debugger' in sys.argv else True,
time=True)
SUPPORTED_SERVICES_EVENT_KEYS = [
's3'
]
class ServiceNotificationsSensor(Sensor):
def __init__(self, sensor_service, config=None):
super(ServiceNotificationsSensor, self).__init__(sensor_service=sensor_service,
config=config)
self._config = self._config.get('service_notifications_sensor', {})
self._host = self._config.get('host', 'localhost')
self._port = self._config.get('port', 12345)
self._path = self._config.get('path', None)
if not self._path:
raise ValueError('path setting not configured')
self._log = self._sensor_service.get_logger(__name__)
self._app = Flask(__name__)
def setup(self):
pass
def run(self):
@self._app.route(self._path, methods=['POST'])
def handle_notification_webhook():
return self._handle_notification_webhook()
self._log.info('Listening for webhooks on http://%s:%s%s' %
(self._host, self._port, self._path))
self._app.run(host=self._host, port=self._port)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _handle_notification_webhook(self):
self._log.debug('Received webhook, data=%s' % (request.data))
try:
data = json.loads(request.data)
except ValueError as e:
self._log.debug('Failed to parse body as JSON')
return ''
message = data.get('Message', None)
if not message:
self._log.debug('Payload contains no "Message attribute, skipping"')
return ''
try:
message = json.loads(message)
except ValueError as e:
self._log.info('Failed to parse message as JSON: %s (message=%s)' %
(str(e), message))
# log
return ''
self._process_message(message=message)
return ''
def _process_message(self, message):
records = message.get('Records', [])
for record in records:
self._dispatch_trigger_for_record(record=record)
def _dispatch_trigger_for_record(self, record):
trigger = 'aws.service_notification'
timestamp_datetime = isotime.parse(record['eventTime'])
timestamp = int(timestamp_datetime.strftime('%s'))
source = record.get('eventSource', 'unknown')
region = record.get('awsRegion', 'unknown')
name = record.get('eventName', 'unknown')
request_parameters = record['requestParameters']
response_elements = record['responseElements']
# Build event specified payload object
event_payload = None
for event_key in SUPPORTED_SERVICES_EVENT_KEYS:
value = record.get(event_key, None)
if value:
event_payload = value
break
if not event_payload:
# Unsupported service
return
payload = {
# Common attributes for all the AWS services events
'source': source,
'region': region,
'name': name,
'timestamp': timestamp,
'request_parameters': request_parameters,
'response_elements': response_elements,
# Service and event specific payload
'payload': event_payload
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
Fix lint issue.
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import eventlet
from flask import request, Flask
from st2reactor.sensor.base import Sensor
from st2common.util import isotime
__all__ = [
'ServiceNotificationsSensor'
]
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=False if '--use-debugger' in sys.argv else True,
time=True)
SUPPORTED_SERVICES_EVENT_KEYS = [
's3'
]
class ServiceNotificationsSensor(Sensor):
def __init__(self, sensor_service, config=None):
super(ServiceNotificationsSensor, self).__init__(sensor_service=sensor_service,
config=config)
self._config = self._config.get('service_notifications_sensor', {})
self._host = self._config.get('host', 'localhost')
self._port = self._config.get('port', 12345)
self._path = self._config.get('path', None)
if not self._path:
raise ValueError('path setting not configured')
self._log = self._sensor_service.get_logger(__name__)
self._app = Flask(__name__)
def setup(self):
pass
def run(self):
@self._app.route(self._path, methods=['POST'])
def handle_notification_webhook():
return self._handle_notification_webhook()
self._log.info('Listening for webhooks on http://%s:%s%s' %
(self._host, self._port, self._path))
self._app.run(host=self._host, port=self._port)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _handle_notification_webhook(self):
self._log.debug('Received webhook, data=%s' % (request.data))
try:
data = json.loads(request.data)
except ValueError as e:
self._log.debug('Failed to parse body as JSON')
return ''
message = data.get('Message', None)
if not message:
self._log.debug('Payload contains no "Message attribute, skipping"')
return ''
try:
message = json.loads(message)
except ValueError as e:
self._log.info('Failed to parse message as JSON: %s (message=%s)' %
(str(e), message))
# log
return ''
self._process_message(message=message)
return ''
def _process_message(self, message):
records = message.get('Records', [])
for record in records:
self._dispatch_trigger_for_record(record=record)
def _dispatch_trigger_for_record(self, record):
trigger = 'aws.service_notification'
timestamp_datetime = isotime.parse(record['eventTime'])
timestamp = int(timestamp_datetime.strftime('%s')) # pylint: disable=no-member
source = record.get('eventSource', 'unknown')
region = record.get('awsRegion', 'unknown')
name = record.get('eventName', 'unknown')
request_parameters = record['requestParameters']
response_elements = record['responseElements']
# Build event specified payload object
event_payload = None
for event_key in SUPPORTED_SERVICES_EVENT_KEYS:
value = record.get(event_key, None)
if value:
event_payload = value
break
if not event_payload:
# Unsupported service
return
payload = {
# Common attributes for all the AWS services events
'source': source,
'region': region,
'name': name,
'timestamp': timestamp,
'request_parameters': request_parameters,
'response_elements': response_elements,
# Service and event specific payload
'payload': event_payload
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
|
__author__ = 'Brandon C. Kelly'
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve
from scipy.optimize import basinhopping
import samplers
import multiprocessing
import _carmcmc as carmcmcLib
class CarmaModel(object):
"""
Class for running the MCMC sampler assuming a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, nsamples, p=1, q=0, nwalkers=None, nburnin=None, nthin=1):
"""
Constructor for the CarmaMCMC class.
:param time: The observation times.
:param y: The measured time series.
:param ysig: The standard deviation in the measurements errors on the time series.
:param p: The order of the autoregressive (AR) polynomial.
:param nsamples: The number of MCMC samples to generate.
:param q: The order of the moving average polynomial. Default is q = 0. Note that p > q.
:param doZcarma: If true, then use the z-transformed CAR parameterization.
:param nwalkers: Number of parallel MCMC chains to run in the parallel tempering algorithm. Default is 1 (no
tempering) for p = 1 and max(10, p+q) for p > 1.
:param nburnin: Number of burnin iterations to run. The default is nsamples / 2.
:param nthin: Thinning interval for the MCMC sampler. Default is 1.
"""
try:
p > q
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynimial, q."
# convert input to std::vector<double> extension class
self._time = carmcmcLib.vecD()
self._time.extend(time)
self._y = carmcmcLib.vecD()
self._y.extend(y)
self._ysig = carmcmcLib.vecD()
self._ysig.extend(ysig)
if nwalkers is None:
nwalkers = max(10, p + q)
if nburnin is None:
nburnin = nsamples / 2
# save parameters
self.time = time
self.y = y
self.ysig = ysig
self.p = p
self.nsamples = nsamples
self.nburnin = nburnin
self.q = q
self.nwalkers = nwalkers
self.nthin = nthin
def run_mcmc(self):
"""
Run the MCMC sampler. This is actually a wrapper that calls the C++ code that runs the MCMC sampler.
:return: Either a CarmaSample, ZCarmaSample, or CarSample1 object, depending on the values of self.p and
self.doZcarma.
"""
if self.p == 1:
# Treat the CAR(1) case separately
cppSample = carmcmcLib.run_mcmc_car1(self.nsamples, self.nburnin, self._time, self._y, self._ysig,
self.nthin)
# run_mcmc_car1 returns a wrapper around the C++ CAR1 class, convert to python object
sample = CarSample1(self.time, self.y, self.ysig, cppSample)
else:
cppSample = carmcmcLib.run_mcmc_carma(self.nsamples, self.nburnin, self._time, self._y, self._ysig,
self.p, self.q, self.nwalkers, False, self.nthin)
# run_mcmc_car1 returns a wrapper around the C++ CARMA/ZCAR class, convert to a python object
sample = CarmaSample(self.time, self.y, self.ysig, cppSample, q=self.q)
return sample
def _floglik(self, theta, args):
CppCarma, = args
theta_vec = carmcmcLib.vecD()
theta_vec.extend(theta)
logdens = CppCarma.getLogDensity(theta)
return -logdens
class _BHStep(object):
def __init__(self, stepsize=1.0):
self.stepsize = stepsize
def __call__(self, theta):
s = self.stepsize
theta = np.random.uniform(-s, s, theta.shape)
# Don't adapt step size for measurement error scale parameter
theta[1] = np.random.uniform(0.9, 1.1)
def get_map(self, pq):
# get a CARMA process object by running the MCMC sampler for a very short period. This will provide the initial
# guess and the function to compute the log-posterior
nsamples = 1
nburnin = 100
p = pq[0]
q = pq[1]
if p == 1:
# Treat the CAR(1) case separately
CarmaProcess = carmcmcLib.run_mcmc_car1(nsamples, nburnin, self._time, self._y, self._ysig, 1)
else:
CarmaProcess = carmcmcLib.run_mcmc_carma(nsamples, nburnin, self._time, self._y, self._ysig,
p, q, self.nwalkers, False, 1)
initial_theta = CarmaProcess.getSamples()
initial_theta = np.array(initial_theta[0])
initial_theta[1] = 1.0 # initial guess for measurement error scale parameter
# get maximum a posteriori (MAP) estimate
minimizer_kwargs = {'method': 'BFGS', 'jac': False, 'args': (CarmaProcess,)}
custom_step = self._BHStep()
MAP = basinhopping(self._floglik, initial_theta, minimizer_kwargs=minimizer_kwargs, niter=1000,
disp=True, stepsize=1.0, T=10.0, take_step=custom_step)
return MAP
def choose_order(self, pmax, qmax=None, pqlist=None, njobs=1):
if qmax is None:
qmax = pmax - 1
try:
pmax > qmax
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynimial, q."
if pqlist is None:
pqlist = []
for p in xrange(pmax):
for q in xrange(qmax):
pqlist.append((p, q))
if njobs == -1:
njobs = multiprocessing.cpu_count()
pool = multiprocessing.Pool(njobs)
MAPs = pool.map(self.get_map, pqlist)
best_AICc = 1e300
best_MAP = MAPs[0]
for MAP, pq in zip(MAPs, pqlist):
nparams = 2 + pq[0] + pq[1]
deviance = 2.0 * MAP.fun
this_AICc = 2.0 * nparams + deviance + 2.0 * nparams * (nparams + 1.0) / (self.time.size - nparams - 1.0)
if this_AICc < best_AICc:
# new optimum found, save values
best_MAP = MAP
best_AICc = this_AICc
self.p = pq[0]
self.q = pq[1]
print 'Model with best AICc has p =', self.p, ' and q = ', self.q
return best_MAP
class CarmaSample(samplers.MCMCSample):
"""
Class for storing and analyzing the MCMC samples of a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, sampler, q=0, filename=None):
"""
Constructor for the CarmaSample class.
:param filename: A string of the name of the file containing the MCMC samples generated by carpack.
"""
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.q = q # order of moving average polynomial
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
# now calculate the AR(p) characteristic polynomial roots, coefficients, MA coefficients, and amplitude of
# driving noise and add them to the MCMC samples
print "Calculating PSD Lorentzian parameters..."
self._ar_roots()
print "Calculating coefficients of AR polynomial..."
self._ar_coefs()
if self.q > 0:
print "Calculating coefficients of MA polynomial..."
self._ma_coefs(trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the user knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
def arrayToVec(self, array, arrType=carmcmcLib.vecD):
vec = arrType()
vec.extend(array)
return vec
def set_logpost(self, logpost):
self._samples['logpost'] = logpost # log-posterior of the CAR(p) model
def generate_from_trace(self, trace):
# Figure out how many AR terms we have
self.p = trace.shape[1] - 3 - self.q
names = ['var', 'measerr_scale', 'mu', 'quad_coefs']
if names != self._samples.keys():
idx = 0
# Parameters are not already in the dictionary, add them.
self._samples['var'] = (trace[:, 0] ** 2) # Variance of the CAR(p) process
self._samples['measerr_scale'] = trace[:, 1] # Measurement errors are scaled by this much.
self._samples['mu'] = trace[:, 2] # model mean of time series
# AR(p) polynomial is factored as a product of quadratic terms:
# alpha(s) = (quad_coefs[0] + quad_coefs[1] * s + s ** 2) * ...
self._samples['quad_coefs'] = np.exp(trace[:, 3:self.p + 3])
def generate_from_file(self, filename):
"""
Build the dictionary of parameter samples from an ascii file of MCMC samples from carpack.
:param filename: The name of the file containing the MCMC samples generated by carpack.
"""
# TODO: put in exceptions to make sure files are ready correctly
# Grab the MCMC output
trace = np.genfromtxt(filename[0], skip_header=1)
self.generate_from_trace(trace[:, 0:-1])
self.set_logpost(trace[:, -1])
def _ar_roots(self):
"""
Calculate the roots of the CARMA(p,q) characteristic polynomial and add them to the MCMC samples.
"""
var = self._samples['var']
quad_coefs = self._samples['quad_coefs']
self._samples['ar_roots'] = np.empty((var.size, self.p), dtype=complex)
self._samples['psd_centroid'] = np.empty((var.size, self.p))
self._samples['psd_width'] = np.empty((var.size, self.p))
for i in xrange(self.p / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
self._samples['ar_roots'][:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
self._samples['ar_roots'][:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
self._samples['psd_width'][:, 2 * i] = -np.real(self._samples['ar_roots'][:, 2 * i]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i])) / \
(2.0 * np.pi)
self._samples['psd_width'][:, 2 * i + 1] = -np.real(self._samples['ar_roots'][:, 2 * i + 1]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i + 1] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i + 1])) / \
(2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
self._samples['ar_roots'][:, -1] = -quad_coefs[:, -1]
self._samples['psd_centroid'][:, -1] = 0.0
self._samples['psd_width'][:, -1] = quad_coefs[:, -1] / (2.0 * np.pi)
def _ma_coefs(self, trace):
"""
Calculate the CARMA(p,q) moving average coefficients and add them to the MCMC samples.
"""
nsamples = trace.shape[0]
if self.q == 0:
self._samples['ma_coefs'] = np.ones((nsamples, 1))
else:
quad_coefs = np.exp(trace[:, 3 + self.p:])
roots = np.empty(quad_coefs.shape, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
roots[:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
roots[:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
roots[:, -1] = -quad_coefs[:, -1]
coefs = np.empty((nsamples, self.q + 1), dtype=complex)
for i in xrange(nsamples):
coefs_i = np.poly(roots[i, :])
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
coefs[i, :] = (coefs_i / coefs_i[self.q])[::-1]
self._samples['ma_coefs'] = coefs.real
def _ar_coefs(self):
"""
Calculate the CARMA(p,q) autoregressive coefficients and add them to the MCMC samples.
"""
roots = self._samples['ar_roots']
coefs = np.empty((roots.shape[0], self.p + 1), dtype=complex)
for i in xrange(roots.shape[0]):
coefs[i, :] = np.poly(roots[i, :])
self._samples['ar_coefs'] = coefs.real
def _sigma_noise(self):
"""
Calculate the MCMC samples of the standard deviation of the white noise driving process and add them to the
MCMC samples.
"""
# get the CARMA(p,q) model variance of the time series
var = self._samples['var']
# get the roots of the AR(p) characteristic polynomial
ar_roots = self._samples['ar_roots']
# get the moving average coefficients
ma_coefs = self._samples['ma_coefs']
# calculate the variance of a CAR(p) process, assuming sigma = 1.0
sigma1_variance = np.zeros_like(var) + 0j
for k in xrange(self.p):
denom = -2.0 * ar_roots[:, k].real + 0j
for l in xrange(self.p):
if l != k:
denom *= (ar_roots[:, l] - ar_roots[:, k]) * (np.conjugate(ar_roots[:, l]) + ar_roots[:, k])
ma_sum1 = np.zeros_like(ar_roots[:, 0])
ma_sum2 = ma_sum1.copy()
for l in xrange(ma_coefs.shape[1]):
ma_sum1 += ma_coefs[:, l] * ar_roots[:, k] ** l
ma_sum2 += ma_coefs[:, l] * (-1.0 * ar_roots[:, k]) ** l
numer = ma_sum1 * ma_sum2
sigma1_variance += numer / denom
sigsqr = var / sigma1_variance.real
# add the white noise sigmas to the MCMC samples
self._samples['sigma'] = np.sqrt(sigsqr)
def plot_power_spectrum(self, percentile=68.0, nsamples=None, plot_log=True, color="b", alpha=0.5, sp=None,
doShow=True):
"""
Plot the posterior median and the credibility interval corresponding to percentile of the CAR(p) PSD. This
function returns a tuple containing the lower and upper PSD credibility intervals as a function of
frequency, the median PSD as a function of frequency, and the frequencies.
:rtype : A tuple of numpy arrays, (lower PSD, upper PSD, median PSD, frequencies).
:param percentile: The percentile of the PSD credibility interval to plot.
:param nsamples: The number of MCMC samples to use to estimate the credibility interval. The default is all
of them.
:param plot_log: A boolean. If true, then a logarithmic plot is made.
"""
sigmas = self._samples['sigma']
ar_coefs = self._samples['ar_coefs']
ma_coefs = self._samples['ma_coefs']
if nsamples is None:
# Use all of the MCMC samples
nsamples = sigmas.shape[0]
else:
try:
nsamples <= sigmas.shape[0]
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = sigmas.shape[0]
index = np.arange(nsamples) * (nsamples0 / nsamples)
sigmas = sigmas[index]
ar_coefs = ar_coefs[index]
ma_coefs = ma_coefs[index]
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 1.0 / (dt_min / 2.0)
freq_min = (1.0 / (2.0 * dt_max))
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
# Compute the PSDs from the MCMC samples
omega = 2.0 * np.pi * 1j * frequencies
ar_poly = np.zeros((nfreq, nsamples), dtype=complex)
ma_poly = np.zeros_like(ar_poly)
for k in xrange(self.p):
# Here we compute:
# alpha(omega) = ar_coefs[0] * omega^p + ar_coefs[1] * omega^(p-1) + ... + ar_coefs[p]
# Note that ar_coefs[0] = 1.0.
argrid, omgrid = np.meshgrid(ar_coefs[:, k], omega)
ar_poly += argrid * (omgrid ** (self.p - k))
ar_poly += ar_coefs[:, self.p]
for k in xrange(ma_coefs.shape[1]):
# Here we compute:
# delta(omega) = ma_coefs[0] + ma_coefs[1] * omega + ... + ma_coefs[q] * omega^q
magrid, omgrid = np.meshgrid(ma_coefs[:, k], omega)
ma_poly += magrid * (omgrid ** k)
psd_samples = np.squeeze(sigmas) ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
# Now compute credibility interval for power spectrum
psd_credint[:, 0] = np.percentile(psd_samples, lower, axis=1)
psd_credint[:, 2] = np.percentile(psd_samples, upper, axis=1)
psd_credint[:, 1] = np.median(psd_samples, axis=1)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=alpha)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
if doShow:
plt.show()
if sp == None:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies, fig)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
ar_roots = self._samples['ar_roots'][max_index]
ma_coefs = self._samples['ma_coefs'][max_index]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
ar_roots = np.median(self._samples['ar_roots'], axis=0)
ma_coefs = np.median(self._samples['ma_coefs'], axis=0)
else:
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
ar_roots = np.mean(self._samples['ar_roots'], axis=0)
ma_coefs = np.mean(self._samples['ma_coefs'], axis=0)
kfilter = carmcmcLib.KalmanFilterp(self.arrayToVec(self.time),
self.arrayToVec(self.y - mu),
self.arrayToVec(self.ysig),
sigsqr,
self.arrayToVec(ar_roots, carmcmcLib.vecC),
self.arrayToVec(ma_coefs))
return kfilter, mu
def plot_models(self, bestfit="median", nplot=256, doShow=True, dtPredict=0):
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
fig = plt.figure()
sp = fig.add_subplot(111)
sp.errorbar(self.time, self.y, yerr=self.ysig, fmt='ko', label='Data', ms=4, capsize=1)
# The kalman filter seems to exactly recover the data, no point in this...
if False:
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
kmean = np.empty(self.time.size)
kvar = np.empty(self.time.size)
for i in xrange(self.time.size):
kpred = kfilter.Predict(self.time[i])
kmean[i] = kpred.first
kvar[i] = kpred.second
sp.plot(self.time, kmean + mu, '-r', label='Kalman Filter')
# compute the marginal mean and variance of the predicted values
time_predict = np.linspace(self.time.min(), self.time.max() + dtPredict, nplot)
predicted_mean, predicted_var = self.predict_lightcurve(time_predict, bestfit=bestfit)
sp.plot(time_predict, predicted_mean, '-r', label='Kalman Filter')
# NOTE we can get negative variance here in the first/last indices
idx = np.where(predicted_var > 0)
time_predict = time_predict[idx]
predicted_mean = predicted_mean[idx]
predicted_var = predicted_var[idx]
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
sp.fill_between(time_predict, predicted_low, predicted_high,
edgecolor=None, facecolor='blue', alpha=0.25, label="1-sigma range")
sp.set_xlabel('Time')
sp.set_xlim(self.time.min(), self.time.max())
sp.legend(loc=1)
if doShow:
plt.show()
def assess_fit(self, bestfit="map", nplot=256, doShow=True):
"""
Display plots and provide useful information for assessing the quality of the CARMA(p.q) model fit.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean") or the posterior median ("median").
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
fig = plt.figure()
# compute the marginal mean and variance of the predicted values
time_predict = np.linspace(self.time[1:].min(), self.time.max(), nplot)
predicted_mean, predicted_var = self.predict_lightcurve(time_predict, bestfit=bestfit)
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
# plot the time series and the marginal 1-sigma error bands
plt.subplot(221)
plt.fill_between(time_predict, predicted_low, predicted_high, color='cyan')
plt.plot(time_predict, predicted_mean, '-b', label='Interpolation')
plt.plot(self.time, self.y, 'k.', label='Data')
plt.xlabel('Time')
plt.xlim(self.time.min(), self.time.max())
#plt.legend()
# plot the standardized residuals and compare with the standard normal
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
kmean = np.asarray(kfilter.GetMean())
kvar = np.asarray(kfilter.GetVar())
standardized_residuals = (self.y - mu - kmean) / np.sqrt(kvar)
plt.subplot(222)
plt.xlabel('Time')
plt.xlim(self.time.min(), self.time.max())
# Now add the histogram of values to the standardized residuals plot
pdf, bin_edges = np.histogram(standardized_residuals, bins=10)
bin_edges = bin_edges[0:pdf.size]
# Stretch the PDF so that it is readable on the residual plot when plotted horizontally
pdf = pdf / float(pdf.max()) * 0.4 * self.time.max()
# Add the histogram to the plot
plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0])
# now overplot the expected standard normal distribution
expected_pdf = np.exp(-0.5 * bin_edges ** 2)
expected_pdf = expected_pdf / expected_pdf.max() * 0.4 * self.time.max()
plt.plot(expected_pdf, bin_edges, 'DarkOrange', lw=2)
plt.plot(self.time, standardized_residuals, '.k')
# plot the autocorrelation function of the residuals and compare with the 95% confidence intervals for white
# noise
plt.subplot(223)
maxlag = 50
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(standardized_residuals, maxlags=maxlag, lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Residuals')
# plot the autocorrelation function of the squared residuals and compare with the 95% confidence intervals for
# white noise
plt.subplot(224)
squared_residuals = standardized_residuals ** 2
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(squared_residuals - squared_residuals.mean(), maxlags=maxlag,
lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Sqrd. Resid.')
plt.tight_layout()
if doShow:
plt.show()
else:
return fig
def predict_lightcurve(self, time, bestfit='median'):
"""
Return the predicted value of the lightcurve and its standard deviation at the input time(s) given the best-fit
value of the CARMA(p,q) model and the measured lightcurve.
:param time: A scalar or numpy array containing the time values to predict the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean") or the posterior median ("median").
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
if np.isscalar(time):
pred = kfilter.Predict(time)
yhat = pred.first
yhat_var = pred.second
else:
yhat = np.empty(time.size)
yhat_var = np.empty(time.size)
for i in xrange(time.size):
pred = kfilter.Predict(time[i])
yhat[i] = pred.first
yhat_var[i] = pred.second
yhat += mu # add mean back into time series
return yhat, yhat_var
def simulate_lightcurve(self, time, bestfit='median'):
"""
Simulate a lightcurve at the input time(s) given the best-fit value of the CARMA(p,q) model and the measured
lightcurve.
:param time: A scalar or numpy array containing the time values to simulate the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean") or the posterior median ("median").
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
vtime = carmcmcLib.vecD()
if np.isscalar(time):
vtime.append(time)
else:
vtime.extend(time)
ysim = np.asarray(kfilter.Simulate(vtime))
ysim += mu # add mean back into time series
return ysim
def DIC(self):
"""
Calculate the Deviance Information Criterion for the model.
The deviance is -2 * log-likelihood, and the DIC is:
DIC = mean(deviance) + 0.5 * variance(deviance)
"""
deviance = -2.0 * self._samples['loglik']
mean_deviance = np.mean(deviance, axis=0)
effect_npar = 0.5 * np.var(deviance, axis=0)
dic = mean_deviance + effect_npar
return dic
class CarSample1(CarmaSample):
def __init__(self, time, y, ysig, sampler, filename=None):
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.p = 1 # How many AR terms
self.q = 0 # How many MA terms
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the use knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
def generate_from_trace(self, trace):
names = ['sigma', 'measerr_scale', 'mu', 'log_omega']
if names != self._samples.keys():
self._samples['var'] = trace[:, 0]
self._samples['measerr_scale'] = trace[:, 1]
self._samples['mu'] = trace[:, 2]
self._samples['log_omega'] = trace[:, 3]
def _ar_roots(self):
print "_ar_roots not supported for CAR1"
return
def _ar_coefs(self):
print "_ar_coefs not supported for CAR1"
return
def _sigma_noise(self):
self._samples['sigma'] = np.sqrt(2.0 * self._samples['var'] * np.exp(self._samples['log_omega']))
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
log_omega = self._samples['log_omega'][max_index][0]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
log_omega = np.median(self._samples['log_omega'])
else:
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
log_omega = np.mean(self._samples['log_omega'])
kfilter = carmcmcLib.KalmanFilter1(self.arrayToVec(self.time),
self.arrayToVec(self.y - mu),
self.arrayToVec(self.ysig),
sigsqr,
10**(log_omega))
return kfilter, mu
def plot_power_spectrum(self, percentile=68.0, plot_log=True, color="b", sp=None, doShow=True):
sigmas = self._samples['sigma']
log_omegas = self._samples['log_omega']
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 1.0 / (dt_min / 2.0)
freq_min = (1.0 / (2.0 * dt_max))
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
numer = 0.5 / np.pi * sigmas ** 2
for i in xrange(nfreq):
denom = 10 ** log_omegas ** 2 + frequencies[i] ** 2
psd_samples = numer / denom
# Now compute credibility interval for power spectrum
psd_credint[i, 0] = np.percentile(psd_samples, lower, axis=0)
psd_credint[i, 2] = np.percentile(psd_samples, upper, axis=0)
psd_credint[i, 1] = np.median(psd_samples, axis=0)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=0.5)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
if doShow:
plt.show()
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies), fig
def plot_2dpdf(self, name1, name2, doShow=False):
print "Plotting 2d PDF"
trace1 = self._samples[name1]
trace2 = self._samples[name2]
fig = plt.figure()
# joint distribution
axJ = fig.add_axes([0.1, 0.1, 0.7, 0.7]) # [left, bottom, width, height]
# y histogram
axY = fig.add_axes([0.8, 0.1, 0.125, 0.7], sharey=axJ)
# x histogram
axX = fig.add_axes([0.1, 0.8, 0.7, 0.125], sharex=axJ)
axJ.plot(trace1, trace2, 'ro', ms=1, alpha=0.5)
axX.hist(trace1, bins=100)
axY.hist(trace2, orientation='horizontal', bins=100)
axJ.set_xlabel("%s" % (name1))
axJ.set_ylabel("%s" % (name2))
plt.setp(axX.get_xticklabels() + axX.get_yticklabels(), visible=False)
plt.setp(axY.get_xticklabels() + axY.get_yticklabels(), visible=False)
if doShow:
plt.show()
##################
def get_ar_roots(qpo_width, qpo_centroid):
"""
Return the roots of the characteristic polynomial of the CAR(p) process, given the lorentzian widths and centroids.
:rtype : a numpy array
:param qpo_width: The widths of the lorentzian functions defining the PSD.
:param qpo_centroid: The centroids of the lorentzian functions defining the PSD.
"""
p = qpo_centroid.size + qpo_width.size
ar_roots = np.empty(p, dtype=complex)
for i in xrange(p / 2):
ar_roots[2 * i] = qpo_width[i] + 1j * qpo_centroid[i]
ar_roots[2 * i + 1] = np.conjugate(ar_roots[2 * i])
if p % 2 == 1:
# p is odd, so add in low-frequency component
ar_roots[-1] = qpo_width[-1]
return -2.0 * np.pi * ar_roots
def power_spectrum(freq, sigma, ar_coef, ma_coefs=[1.0]):
"""
Return the power spectrum for a CAR(p) process calculated at the input frequencies.
:param freq: The frequencies at which to calculate the PSD.
:param sigma: The standard deviation driving white noise.
:param ar_coef: The CAR(p) model autoregressive coefficients.
:param ma_coefs: Coefficients of the moving average polynomial
:rtype : A numpy array.
"""
try:
len(ma_coefs) <= len(ar_coef)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
ma_poly = np.polyval(ma_coefs[::-1], 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD numerator
ar_poly = np.polyval(ar_coef, 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD denominator
pspec = sigma ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
return pspec
def carma_variance(sigsqr, ar_roots, ma_coefs=[1.0], lag=0.0):
"""
Return the autocovariance function of a CARMA(p,q) process.
:param sigsqr: The variance in the driving white noise.
:param ar_roots: The roots of the AR characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:param lag: The lag at which to calculate the autocovariance function.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
if len(ma_coefs) < len(ar_roots):
# add extra zeros to end of ma_coefs
ma_coefs = np.resize(np.array(ma_coefs), len(ar_roots))
ma_coefs[1:] = 0.0
sigma1_variance = 0.0 + 0j
p = ar_roots.size
for k in xrange(p):
denom_product = 1.0 + 0j
for l in xrange(p):
if l != k:
denom_product *= (ar_roots[l] - ar_roots[k]) * (np.conjugate(ar_roots[l]) + ar_roots[k])
denom = -2.0 * denom_product * ar_roots[k].real
ma_sum1 = 0.0 + 0j
ma_sum2 = 0.0 + 0j
for l in xrange(p):
ma_sum1 += ma_coefs[l] * ar_roots[k] ** l
ma_sum2 += ma_coefs[l] * (-1.0 * ar_roots[k]) ** l
numer = ma_sum1 * ma_sum2 * np.exp(ar_roots[k] * abs(lag))
sigma1_variance += numer / denom
return sigsqr * sigma1_variance.real
def carma_process(time, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Generate a CARMA(p,q) process.
:param time: The time values at which to generate the CARMA(p,q) process at.
:param sigsqr: The variance in the driving white noise term.
:param ar_roots: The roots of the CAR(p) characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:rtype : A numpy array containing the simulated CARMA(p,q) process values at time.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
p = len(ar_roots)
if len(ma_coefs) < p:
# add extra zeros to end of ma_coefs
ma_coefs = np.resize(np.array(ma_coefs), len(ar_roots))
ma_coefs[1:] = 0.0
time.sort()
# make sure process is stationary
try:
np.any(ar_roots.real < 0)
except ValueError:
"Process is not stationary, real part of roots must be negative."
# make sure the roots are unique
tol = 1e-8
roots_grid = np.meshgrid(ar_roots, ar_roots)
roots_grid1 = roots_grid[0].ravel()
roots_grid2 = roots_grid[1].ravel()
diff_roots = np.abs(roots_grid1 - roots_grid2) / np.abs(roots_grid1 + roots_grid2)
try:
np.any(diff_roots > tol)
except ValueError:
"Roots are not unique."
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((p, p), dtype=complex)
EigenMat[1, :] = ar_roots
for k in xrange(2, p):
EigenMat[k, :] = ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((p, p), dtype=complex)
for j in xrange(p):
StateVar[:, j] = -sigsqr * Jvector * np.conjugate(Jvector[j]) / (ar_roots + np.conjugate(ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
StateVector = np.matrix(StateVector).T
StateVar = np.matrix(StateVar)
PredictionVar = np.matrix(PredictionVar)
rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
StateTransition = np.zeros_like(StateVector)
KalmanGain = np.zeros_like(StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
kalman_mean = 0.0
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the first time series value
y = np.empty_like(time)
y[0] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# Initialize the innovations, i.e., the KF residuals
innovation = y[0]
for i in xrange(1, time.size):
# First compute the Kalman gain
KalmanGain = PredictionVar * rotated_MA_coefs.H / kalman_var
# update the state vector
StateVector += innovation * KalmanGain
# update the state one-step prediction error variance
PredictionVar -= kalman_var * (KalmanGain * KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = time[i] - time[i - 1]
StateTransition = np.matrix(np.exp(ar_roots * dt)).T
StateVector = np.multiply(StateVector, StateTransition)
# update the predicted state covariance matrix
PredictionVar = np.multiply(StateTransition * StateTransition.H, PredictionVar - StateVar) + StateVar
# now predict the observation and its variance
kalman_mean = np.real(np.asscalar(rotated_MA_coefs * StateVector))
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the next time series value
y[i] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# finally, update the innovation
innovation = y[i] - kalman_mean
return y
##################
# Deprecated
class KalmanFilterDeprecated(object):
def __init__(self, time, y, yvar, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Constructor for Kalman Filter class.
:param time: The time values of the time series.
:param y: The mean-subtracted time series.
:param yvar: The variance in the measurement errors on the time series.
:param sigsqr: The variance of the driving white noise term in the CAR(p) process.
:param ar_roots: The roots of the autoregressive characteristic polynomial.
"""
try:
len(ma_coefs) <= ar_roots.size
except ValueError:
"Order of MA polynomial cannot be larger than order of AR polynomial."
self.time = time
self.y = y
self.yvar = yvar
self.sigsqr = sigsqr
self.ar_roots = ar_roots
self.p = ar_roots.size # order of the CARMA(p,q) process
self.q = len(ma_coefs)
self.ma_coefs = np.append(ma_coefs, np.zeros(self.p - self.q))
def reset(self):
"""
Reset the Kalman Filter to its initial state.
"""
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((self.p, self.p), dtype=complex)
EigenMat[1, :] = self.ar_roots
for k in xrange(2, self.p):
EigenMat[k, :] = self.ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(self.p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = self.ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((self.p, self.p), dtype=complex)
for j in xrange(self.p):
StateVar[:, j] = -self.sigsqr * Jvector * np.conjugate(Jvector[j]) / \
(self.ar_roots + np.conjugate(self.ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(self.p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
self._StateVector = np.matrix(StateVector).T
self._StateVar = np.matrix(StateVar)
self._PredictionVar = np.matrix(PredictionVar)
self._rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
self._StateTransition = np.zeros_like(self._StateVector)
self._KalmanGain = np.zeros_like(self._StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
self.kalman_mean = np.empty_like(self.time)
self.kalman_var = np.empty_like(self.time)
self.kalman_mean[0] = 0.0
self.kalman_var[0] = np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) \
+ self.yvar[0]
# Initialize the innovations, i.e., the KF residuals
self._innovation = self.y[0]
self._current_index = 1
def update(self):
"""
Perform one iteration (update) of the Kalman Filter.
"""
# First compute the Kalman gain
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[self._current_index - 1]
# update the state vector
self._StateVector += self._innovation * self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[self._current_index - 1] * (self._KalmanGain * self._KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = self.time[self._current_index] - self.time[self._current_index - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# now predict the observation and its variance
self.kalman_mean[self._current_index] = np.real(np.asscalar(self._rotated_MA_coefs * self._StateVector))
self.kalman_var[self._current_index] = \
np.real(np.asscalar(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
self.kalman_var[self._current_index] += self.yvar[self._current_index]
# finally, update the innovation
self._innovation = self.y[self._current_index] - self.kalman_mean[self._current_index]
self._current_index += 1
def filter(self):
"""
Perform the Kalman Filter on all points of the time series. The kalman mean and variance are returned upon
completion, and are stored in the instantiated KalmanFilter object.
"""
self.reset()
for i in xrange(self.time.size - 1):
self.update()
return self.kalman_mean, self.kalman_var
def predict(self, time_predict):
"""
Return the predicted value of a lightcurve and its standard deviation at the input time given the input
values of the CARMA(p,q) model parameters and a measured lightcurve.
:rtype : A tuple containing the predicted value and its variance.
:param time_predict: The time at which to predict the lightcurve.
"""
try:
self.time.min() > time_predict
except ValueError:
"backcasting currently not supported: time_predict must be greater than self.time.min()"
self.reset()
# find the index where time[ipredict-1] < time_predict < time[ipredict]
ipredict = np.max(np.where(self.time < time_predict)) + 1
for i in xrange(ipredict - 1):
# run the kalman filter for time < time_predict
self.update()
# predict the value of y[time_predict]
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[ipredict - 1]
self._StateVector += self._innovation * self._KalmanGain
self._PredictionVar -= self.kalman_var[ipredict - 1] * (self._KalmanGain * self._KalmanGain.H)
dt = time_predict - self.time[ipredict - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
ypredict_mean = np.asscalar(np.real(self._rotated_MA_coefs * self._StateVector))
ypredict_var = np.asscalar(np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
# start the running statistics for the conditional mean and precision of the predicted time series value, given
# the measured time series
cprecision = 1.0 / ypredict_var
cmean = cprecision * ypredict_mean
if ipredict >= self.time.size:
# we are forecasting (extrapolating) the value, so no need to run interpolation steps below
return ypredict_mean, ypredict_var
# for time > time_predict we need to compute the coefficients for the linear filter, i.e., at time[j]:
# E(y[j]|{y[i]; j<i}) = alpha[j] + beta[j] * ypredict. we do this using recursions similar to the kalman
# filter.
# first set the initial values.
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / ypredict_var
# initialize the coefficients for predicting the state vector at coefs(time_predict|time_predict)
const_state = self._StateVector - self._KalmanGain * ypredict_mean
slope_state = self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= ypredict_var * (self._KalmanGain * self._KalmanGain.H)
# do coefs(time_predict|time_predict) --> coefs(time[i+1]|time_predict)
dt = self.time[ipredict] - time_predict
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for the linear filter at time[ipredict], and compute the variance in the predicted
# y[ipredict]
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
self.kalman_var[ipredict] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[ipredict]
# update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[ipredict]
cmean += slope * (self.y[ipredict] - const) / self.kalman_var[ipredict]
self.const = np.zeros(self.time.size)
self.slope = np.zeros(self.time.size)
self.const[ipredict] = const
self.slope[ipredict] = slope
# now repeat for time > time_predict
for i in xrange(ipredict + 1, self.time.size):
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[i - 1]
# update the state prediction coefficients: coefs(i|i-1) --> coefs(i|i)
const_state += self._KalmanGain * (self.y[i - 1] - const)
slope_state -= self._KalmanGain * slope
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[i - 1] * (self._KalmanGain * self._KalmanGain.H)
# compute the one-step state prediction coefficients: coefs(i|i) --> coefs(i+1|i)
dt = self.time[i] - self.time[i - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# compute the state one-step prediction error variance
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for predicting y[i]|y[j],j<i as a function of ypredict
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
# compute the variance in predicting y[i]|y[j],j<i
self.kalman_var[i] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[i]
# finally, update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[i]
cmean += slope * (self.y[i] - const) / self.kalman_var[i]
self.const[i] = const
self.slope[i] = slope
cvar = 1.0 / cprecision
cmean *= cvar
return cmean, cvar
def simulate(self, time_simulate):
"""
Simulate a lightcurve at the input time values of time_simulate, given the measured lightcurve and input
CARMA(p,q) parameters.
:rtype : A scalar or numpy array, depending on type of time_simulate.
:param time_simulate: The time(s) at which to simulate a random draw of the lightcurve conditional on the
measured time series and the input parameters.
"""
if np.isscalar(time_simulate):
cmean, cvar = self.predict(time_simulate)
ysimulated = np.random.normal(cmean, np.sqrt(cvar))
return ysimulated
else:
# input is array-like, need to simulate values sequentially, adding each value to the measured time series
# as they are simulated
time0 = self.time # save original values
y0 = self.y
yvar0 = self.yvar
ysimulated = np.empty(time_simulate.size)
time_simulate.sort()
for i in xrange(time_simulate.size):
cmean, cvar = self.predict(time_simulate[i])
ysimulated[i] = np.random.normal(cmean, np.sqrt(cvar)) # simulate the time series value
# find the index where time[isimulate-1] < time_simulate < time[isimulate]
isimulate = np.max(np.where(self.time < time_simulate[i])) + 1
# insert the simulated value into the time series array
self.time = np.insert(self.time, isimulate, time_simulate[i])
self.y = np.insert(self.y, isimulate, ysimulated[i])
self.yvar = np.insert(self.yvar, isimulate, 0.0)
# reset measured time series to original values
self.y = y0
self.time = time0
self.yvar = yvar0
return ysimulated
Added method to CarmaSample that stores the maximum a posteriori estimate
__author__ = 'Brandon C. Kelly'
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve
from scipy.optimize import basinhopping
import samplers
import multiprocessing
import _carmcmc as carmcmcLib
class CarmaModel(object):
"""
Class for running the MCMC sampler assuming a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, nsamples, p=1, q=0, nwalkers=None, nburnin=None, nthin=1):
"""
Constructor for the CarmaMCMC class.
:param time: The observation times.
:param y: The measured time series.
:param ysig: The standard deviation in the measurements errors on the time series.
:param p: The order of the autoregressive (AR) polynomial.
:param nsamples: The number of MCMC samples to generate.
:param q: The order of the moving average polynomial. Default is q = 0. Note that p > q.
:param doZcarma: If true, then use the z-transformed CAR parameterization.
:param nwalkers: Number of parallel MCMC chains to run in the parallel tempering algorithm. Default is 1 (no
tempering) for p = 1 and max(10, p+q) for p > 1.
:param nburnin: Number of burnin iterations to run. The default is nsamples / 2.
:param nthin: Thinning interval for the MCMC sampler. Default is 1.
"""
try:
p > q
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynimial, q."
# convert input to std::vector<double> extension class
self._time = carmcmcLib.vecD()
self._time.extend(time)
self._y = carmcmcLib.vecD()
self._y.extend(y)
self._ysig = carmcmcLib.vecD()
self._ysig.extend(ysig)
if nwalkers is None:
nwalkers = max(10, p + q)
if nburnin is None:
nburnin = nsamples / 2
# save parameters
self.time = time
self.y = y
self.ysig = ysig
self.p = p
self.nsamples = nsamples
self.nburnin = nburnin
self.q = q
self.nwalkers = nwalkers
self.nthin = nthin
def run_mcmc(self):
"""
Run the MCMC sampler. This is actually a wrapper that calls the C++ code that runs the MCMC sampler.
:return: Either a CarmaSample, ZCarmaSample, or CarSample1 object, depending on the values of self.p and
self.doZcarma.
"""
if self.p == 1:
# Treat the CAR(1) case separately
cppSample = carmcmcLib.run_mcmc_car1(self.nsamples, self.nburnin, self._time, self._y, self._ysig,
self.nthin)
# run_mcmc_car1 returns a wrapper around the C++ CAR1 class, convert to python object
sample = CarSample1(self.time, self.y, self.ysig, cppSample)
else:
cppSample = carmcmcLib.run_mcmc_carma(self.nsamples, self.nburnin, self._time, self._y, self._ysig,
self.p, self.q, self.nwalkers, False, self.nthin)
# run_mcmc_car1 returns a wrapper around the C++ CARMA/ZCAR class, convert to a python object
sample = CarmaSample(self.time, self.y, self.ysig, cppSample, q=self.q)
return sample
def _floglik(self, theta, args):
CppCarma, = args
theta_vec = carmcmcLib.vecD()
theta_vec.extend(theta)
logdens = CppCarma.getLogDensity(theta)
return -logdens
class _BHStep(object):
def __init__(self, stepsize=1.0):
self.stepsize = stepsize
def __call__(self, theta):
s = self.stepsize
theta = np.random.uniform(-s, s, theta.shape)
# Don't adapt step size for measurement error scale parameter
theta[1] = np.random.uniform(0.9, 1.1)
def get_map(self, pq):
# get a CARMA process object by running the MCMC sampler for a very short period. This will provide the initial
# guess and the function to compute the log-posterior
nsamples = 1
nburnin = 100
p = pq[0]
q = pq[1]
if p == 1:
# Treat the CAR(1) case separately
CarmaProcess = carmcmcLib.run_mcmc_car1(nsamples, nburnin, self._time, self._y, self._ysig, 1)
else:
CarmaProcess = carmcmcLib.run_mcmc_carma(nsamples, nburnin, self._time, self._y, self._ysig,
p, q, self.nwalkers, False, 1)
initial_theta = CarmaProcess.getSamples()
initial_theta = np.array(initial_theta[0])
initial_theta[1] = 1.0 # initial guess for measurement error scale parameter
# get maximum a posteriori (MAP) estimate
minimizer_kwargs = {'method': 'BFGS', 'jac': False, 'args': (CarmaProcess,)}
custom_step = self._BHStep()
MAP = basinhopping(self._floglik, initial_theta, minimizer_kwargs=minimizer_kwargs, niter=1000,
disp=True, stepsize=1.0, T=10.0, take_step=custom_step)
return MAP
def choose_order(self, pmax, qmax=None, pqlist=None, njobs=1):
if qmax is None:
qmax = pmax - 1
try:
pmax > qmax
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynimial, q."
if pqlist is None:
pqlist = []
for p in xrange(pmax):
for q in xrange(qmax):
pqlist.append((p, q))
if njobs == -1:
njobs = multiprocessing.cpu_count()
pool = multiprocessing.Pool(njobs)
MAPs = pool.map(self.get_map, pqlist)
best_AICc = 1e300
best_MAP = MAPs[0]
for MAP, pq in zip(MAPs, pqlist):
nparams = 2 + pq[0] + pq[1]
deviance = 2.0 * MAP.fun
this_AICc = 2.0 * nparams + deviance + 2.0 * nparams * (nparams + 1.0) / (self.time.size - nparams - 1.0)
if this_AICc < best_AICc:
# new optimum found, save values
best_MAP = MAP
best_AICc = this_AICc
self.p = pq[0]
self.q = pq[1]
print 'Model with best AICc has p =', self.p, ' and q = ', self.q
return best_MAP
class CarmaSample(samplers.MCMCSample):
"""
Class for storing and analyzing the MCMC samples of a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, sampler, q=0, filename=None, MAP=None):
"""
Constructor for the CarmaSample class.
:param filename: A string of the name of the file containing the MCMC samples generated by carpack.
"""
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.q = q # order of moving average polynomial
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
# now calculate the AR(p) characteristic polynomial roots, coefficients, MA coefficients, and amplitude of
# driving noise and add them to the MCMC samples
print "Calculating PSD Lorentzian parameters..."
self._ar_roots()
print "Calculating coefficients of AR polynomial..."
self._ar_coefs()
if self.q > 0:
print "Calculating coefficients of MA polynomial..."
self._ma_coefs(trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the user knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
self.map = {}
if MAP is not None:
# add maximum a posteriori estimate
self.add_map(MAP)
def add_map(self, MAP):
self.map = {'logpost': MAP.fun, 'var': MAP.x[0] ** 2, 'measerr_scale': MAP.x[1], 'mu': MAP.x[2]}
# add AR polynomial roots and PSD lorentzian parameters
quad_coefs = MAP.x[3:self.p + 3]
ar_roots = np.zeros(self.p, dtype=complex)
psd_width = np.zeros(self.p)
psd_cent = np.zeros(self.p)
for i in xrange(self.p / 2):
quad1 = quad_coefs[2 * i]
quad2 = quad_coefs[2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
if discriminant > 0:
sqrt_disc = np.sqrt(discriminant)
else:
sqrt_disc = 1j * np.sqrt(np.abs(discriminant))
ar_roots[2 * i] = -0.5 * (quad2 + sqrt_disc)
ar_roots[2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
psd_width[2 * i] = -np.real(ar_roots[2 * i]) / (2.0 * np.pi)
psd_cent[2 * i] = np.abs(np.imag(ar_roots[2 * i])) / (2.0 * np.pi)
psd_width[2 * i + 1] = -np.real(ar_roots[2 * i + 1]) / (2.0 * np.pi)
psd_cent[2 * i + 1] = np.abs(np.imag(ar_roots[2 * i + 1])) / (2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
ar_roots[-1] = -quad_coefs[-1]
psd_cent[-1] = 0.0
psd_width[-1] = quad_coefs[-1] / (2.0 * np.pi)
self.map['ar_roots'] = ar_roots
self.map['psd_width'] = psd_width
self.map['psd_cent'] = psd_cent
self.map['ar_coefs'] = np.poly(ar_roots).real
# now calculate the moving average coefficients
if self.q == 0:
self.map['ma_coefs'] = 1.0
else:
quad_coefs = np.exp(MAP.x[3 + self.p:])
ma_roots = np.empty(quad_coefs.size, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
if discriminant > 0:
sqrt_disc = np.sqrt(discriminant)
else:
sqrt_disc = 1j * np.sqrt(np.abs(discriminant))
ma_roots[2 * i] = -0.5 * (quad2 + sqrt_disc)
ma_roots[2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
ma_roots[-1] = -quad_coefs[-1]
ma_coefs = np.poly(ma_roots)
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
self.map['ma_coefs'] = np.real(ma_coefs / ma_coefs[self.q])[::-1]
# finally, calculate sigma, the standard deviation in the driving white noise
unit_var = carma_variance(1.0, self.map['ar_roots'], self.map['ma_coefs'])
self.map['sigma'] = np.sqrt(self.map['var'] / unit_var.real)
def arrayToVec(self, array, arrType=carmcmcLib.vecD):
vec = arrType()
vec.extend(array)
return vec
def set_logpost(self, logpost):
self._samples['logpost'] = logpost # log-posterior of the CAR(p) model
def generate_from_trace(self, trace):
# Figure out how many AR terms we have
self.p = trace.shape[1] - 3 - self.q
names = ['var', 'measerr_scale', 'mu', 'quad_coefs']
if names != self._samples.keys():
idx = 0
# Parameters are not already in the dictionary, add them.
self._samples['var'] = (trace[:, 0] ** 2) # Variance of the CAR(p) process
self._samples['measerr_scale'] = trace[:, 1] # Measurement errors are scaled by this much.
self._samples['mu'] = trace[:, 2] # model mean of time series
# AR(p) polynomial is factored as a product of quadratic terms:
# alpha(s) = (quad_coefs[0] + quad_coefs[1] * s + s ** 2) * ...
self._samples['quad_coefs'] = np.exp(trace[:, 3:self.p + 3])
def generate_from_file(self, filename):
"""
Build the dictionary of parameter samples from an ascii file of MCMC samples from carpack.
:param filename: The name of the file containing the MCMC samples generated by carpack.
"""
# TODO: put in exceptions to make sure files are ready correctly
# Grab the MCMC output
trace = np.genfromtxt(filename[0], skip_header=1)
self.generate_from_trace(trace[:, 0:-1])
self.set_logpost(trace[:, -1])
def _ar_roots(self):
"""
Calculate the roots of the CARMA(p,q) characteristic polynomial and add them to the MCMC samples.
"""
var = self._samples['var']
quad_coefs = self._samples['quad_coefs']
self._samples['ar_roots'] = np.empty((var.size, self.p), dtype=complex)
self._samples['psd_centroid'] = np.empty((var.size, self.p))
self._samples['psd_width'] = np.empty((var.size, self.p))
for i in xrange(self.p / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
self._samples['ar_roots'][:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
self._samples['ar_roots'][:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
self._samples['psd_width'][:, 2 * i] = -np.real(self._samples['ar_roots'][:, 2 * i]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i])) / \
(2.0 * np.pi)
self._samples['psd_width'][:, 2 * i + 1] = -np.real(self._samples['ar_roots'][:, 2 * i + 1]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i + 1] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i + 1])) / \
(2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
self._samples['ar_roots'][:, -1] = -quad_coefs[:, -1]
self._samples['psd_centroid'][:, -1] = 0.0
self._samples['psd_width'][:, -1] = quad_coefs[:, -1] / (2.0 * np.pi)
def _ma_coefs(self, trace):
"""
Calculate the CARMA(p,q) moving average coefficients and add them to the MCMC samples.
"""
nsamples = trace.shape[0]
if self.q == 0:
self._samples['ma_coefs'] = np.ones((nsamples, 1))
else:
quad_coefs = np.exp(trace[:, 3 + self.p:])
roots = np.empty(quad_coefs.shape, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
roots[:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
roots[:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
roots[:, -1] = -quad_coefs[:, -1]
coefs = np.empty((nsamples, self.q + 1), dtype=complex)
for i in xrange(nsamples):
coefs_i = np.poly(roots[i, :])
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
coefs[i, :] = (coefs_i / coefs_i[self.q])[::-1]
self._samples['ma_coefs'] = coefs.real
def _ar_coefs(self):
"""
Calculate the CARMA(p,q) autoregressive coefficients and add them to the MCMC samples.
"""
roots = self._samples['ar_roots']
coefs = np.empty((roots.shape[0], self.p + 1), dtype=complex)
for i in xrange(roots.shape[0]):
coefs[i, :] = np.poly(roots[i, :])
self._samples['ar_coefs'] = coefs.real
def _sigma_noise(self):
"""
Calculate the MCMC samples of the standard deviation of the white noise driving process and add them to the
MCMC samples.
"""
# get the CARMA(p,q) model variance of the time series
var = self._samples['var']
# get the roots of the AR(p) characteristic polynomial
ar_roots = self._samples['ar_roots']
# get the moving average coefficients
ma_coefs = self._samples['ma_coefs']
# calculate the variance of a CAR(p) process, assuming sigma = 1.0
sigma1_variance = np.zeros_like(var) + 0j
for k in xrange(self.p):
denom = -2.0 * ar_roots[:, k].real + 0j
for l in xrange(self.p):
if l != k:
denom *= (ar_roots[:, l] - ar_roots[:, k]) * (np.conjugate(ar_roots[:, l]) + ar_roots[:, k])
ma_sum1 = np.zeros_like(ar_roots[:, 0])
ma_sum2 = ma_sum1.copy()
for l in xrange(ma_coefs.shape[1]):
ma_sum1 += ma_coefs[:, l] * ar_roots[:, k] ** l
ma_sum2 += ma_coefs[:, l] * (-1.0 * ar_roots[:, k]) ** l
numer = ma_sum1 * ma_sum2
sigma1_variance += numer / denom
sigsqr = var / sigma1_variance.real
# add the white noise sigmas to the MCMC samples
self._samples['sigma'] = np.sqrt(sigsqr)
def plot_power_spectrum(self, percentile=68.0, nsamples=None, plot_log=True, color="b", alpha=0.5, sp=None,
doShow=True):
"""
Plot the posterior median and the credibility interval corresponding to percentile of the CAR(p) PSD. This
function returns a tuple containing the lower and upper PSD credibility intervals as a function of
frequency, the median PSD as a function of frequency, and the frequencies.
:rtype : A tuple of numpy arrays, (lower PSD, upper PSD, median PSD, frequencies).
:param percentile: The percentile of the PSD credibility interval to plot.
:param nsamples: The number of MCMC samples to use to estimate the credibility interval. The default is all
of them.
:param plot_log: A boolean. If true, then a logarithmic plot is made.
"""
sigmas = self._samples['sigma']
ar_coefs = self._samples['ar_coefs']
ma_coefs = self._samples['ma_coefs']
if nsamples is None:
# Use all of the MCMC samples
nsamples = sigmas.shape[0]
else:
try:
nsamples <= sigmas.shape[0]
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = sigmas.shape[0]
index = np.arange(nsamples) * (nsamples0 / nsamples)
sigmas = sigmas[index]
ar_coefs = ar_coefs[index]
ma_coefs = ma_coefs[index]
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 1.0 / (dt_min / 2.0)
freq_min = (1.0 / (2.0 * dt_max))
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
# Compute the PSDs from the MCMC samples
omega = 2.0 * np.pi * 1j * frequencies
ar_poly = np.zeros((nfreq, nsamples), dtype=complex)
ma_poly = np.zeros_like(ar_poly)
for k in xrange(self.p):
# Here we compute:
# alpha(omega) = ar_coefs[0] * omega^p + ar_coefs[1] * omega^(p-1) + ... + ar_coefs[p]
# Note that ar_coefs[0] = 1.0.
argrid, omgrid = np.meshgrid(ar_coefs[:, k], omega)
ar_poly += argrid * (omgrid ** (self.p - k))
ar_poly += ar_coefs[:, self.p]
for k in xrange(ma_coefs.shape[1]):
# Here we compute:
# delta(omega) = ma_coefs[0] + ma_coefs[1] * omega + ... + ma_coefs[q] * omega^q
magrid, omgrid = np.meshgrid(ma_coefs[:, k], omega)
ma_poly += magrid * (omgrid ** k)
psd_samples = np.squeeze(sigmas) ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
# Now compute credibility interval for power spectrum
psd_credint[:, 0] = np.percentile(psd_samples, lower, axis=1)
psd_credint[:, 2] = np.percentile(psd_samples, upper, axis=1)
psd_credint[:, 1] = np.median(psd_samples, axis=1)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=alpha)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
if doShow:
plt.show()
if sp == None:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies, fig)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
ar_roots = self._samples['ar_roots'][max_index]
ma_coefs = self._samples['ma_coefs'][max_index]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
ar_roots = np.median(self._samples['ar_roots'], axis=0)
ma_coefs = np.median(self._samples['ma_coefs'], axis=0)
else:
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
ar_roots = np.mean(self._samples['ar_roots'], axis=0)
ma_coefs = np.mean(self._samples['ma_coefs'], axis=0)
kfilter = carmcmcLib.KalmanFilterp(self.arrayToVec(self.time),
self.arrayToVec(self.y - mu),
self.arrayToVec(self.ysig),
sigsqr,
self.arrayToVec(ar_roots, carmcmcLib.vecC),
self.arrayToVec(ma_coefs))
return kfilter, mu
def plot_models(self, bestfit="median", nplot=256, doShow=True, dtPredict=0):
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
fig = plt.figure()
sp = fig.add_subplot(111)
sp.errorbar(self.time, self.y, yerr=self.ysig, fmt='ko', label='Data', ms=4, capsize=1)
# The kalman filter seems to exactly recover the data, no point in this...
if False:
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
kmean = np.empty(self.time.size)
kvar = np.empty(self.time.size)
for i in xrange(self.time.size):
kpred = kfilter.Predict(self.time[i])
kmean[i] = kpred.first
kvar[i] = kpred.second
sp.plot(self.time, kmean + mu, '-r', label='Kalman Filter')
# compute the marginal mean and variance of the predicted values
time_predict = np.linspace(self.time.min(), self.time.max() + dtPredict, nplot)
predicted_mean, predicted_var = self.predict_lightcurve(time_predict, bestfit=bestfit)
sp.plot(time_predict, predicted_mean, '-r', label='Kalman Filter')
# NOTE we can get negative variance here in the first/last indices
idx = np.where(predicted_var > 0)
time_predict = time_predict[idx]
predicted_mean = predicted_mean[idx]
predicted_var = predicted_var[idx]
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
sp.fill_between(time_predict, predicted_low, predicted_high,
edgecolor=None, facecolor='blue', alpha=0.25, label="1-sigma range")
sp.set_xlabel('Time')
sp.set_xlim(self.time.min(), self.time.max())
sp.legend(loc=1)
if doShow:
plt.show()
def assess_fit(self, bestfit="map", nplot=256, doShow=True):
"""
Display plots and provide useful information for assessing the quality of the CARMA(p.q) model fit.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean") or the posterior median ("median").
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
fig = plt.figure()
# compute the marginal mean and variance of the predicted values
time_predict = np.linspace(self.time[1:].min(), self.time.max(), nplot)
predicted_mean, predicted_var = self.predict_lightcurve(time_predict, bestfit=bestfit)
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
# plot the time series and the marginal 1-sigma error bands
plt.subplot(221)
plt.fill_between(time_predict, predicted_low, predicted_high, color='cyan')
plt.plot(time_predict, predicted_mean, '-b', label='Interpolation')
plt.plot(self.time, self.y, 'k.', label='Data')
plt.xlabel('Time')
plt.xlim(self.time.min(), self.time.max())
#plt.legend()
# plot the standardized residuals and compare with the standard normal
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
kmean = np.asarray(kfilter.GetMean())
kvar = np.asarray(kfilter.GetVar())
standardized_residuals = (self.y - mu - kmean) / np.sqrt(kvar)
plt.subplot(222)
plt.xlabel('Time')
plt.xlim(self.time.min(), self.time.max())
# Now add the histogram of values to the standardized residuals plot
pdf, bin_edges = np.histogram(standardized_residuals, bins=10)
bin_edges = bin_edges[0:pdf.size]
# Stretch the PDF so that it is readable on the residual plot when plotted horizontally
pdf = pdf / float(pdf.max()) * 0.4 * self.time.max()
# Add the histogram to the plot
plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0])
# now overplot the expected standard normal distribution
expected_pdf = np.exp(-0.5 * bin_edges ** 2)
expected_pdf = expected_pdf / expected_pdf.max() * 0.4 * self.time.max()
plt.plot(expected_pdf, bin_edges, 'DarkOrange', lw=2)
plt.plot(self.time, standardized_residuals, '.k')
# plot the autocorrelation function of the residuals and compare with the 95% confidence intervals for white
# noise
plt.subplot(223)
maxlag = 50
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(standardized_residuals, maxlags=maxlag, lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Residuals')
# plot the autocorrelation function of the squared residuals and compare with the 95% confidence intervals for
# white noise
plt.subplot(224)
squared_residuals = standardized_residuals ** 2
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(squared_residuals - squared_residuals.mean(), maxlags=maxlag,
lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Sqrd. Resid.')
plt.tight_layout()
if doShow:
plt.show()
else:
return fig
def predict_lightcurve(self, time, bestfit='median'):
"""
Return the predicted value of the lightcurve and its standard deviation at the input time(s) given the best-fit
value of the CARMA(p,q) model and the measured lightcurve.
:param time: A scalar or numpy array containing the time values to predict the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean") or the posterior median ("median").
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
if np.isscalar(time):
pred = kfilter.Predict(time)
yhat = pred.first
yhat_var = pred.second
else:
yhat = np.empty(time.size)
yhat_var = np.empty(time.size)
for i in xrange(time.size):
pred = kfilter.Predict(time[i])
yhat[i] = pred.first
yhat_var[i] = pred.second
yhat += mu # add mean back into time series
return yhat, yhat_var
def simulate_lightcurve(self, time, bestfit='median'):
"""
Simulate a lightcurve at the input time(s) given the best-fit value of the CARMA(p,q) model and the measured
lightcurve.
:param time: A scalar or numpy array containing the time values to simulate the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean") or the posterior median ("median").
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
vtime = carmcmcLib.vecD()
if np.isscalar(time):
vtime.append(time)
else:
vtime.extend(time)
ysim = np.asarray(kfilter.Simulate(vtime))
ysim += mu # add mean back into time series
return ysim
def DIC(self):
"""
Calculate the Deviance Information Criterion for the model.
The deviance is -2 * log-likelihood, and the DIC is:
DIC = mean(deviance) + 0.5 * variance(deviance)
"""
deviance = -2.0 * self._samples['loglik']
mean_deviance = np.mean(deviance, axis=0)
effect_npar = 0.5 * np.var(deviance, axis=0)
dic = mean_deviance + effect_npar
return dic
class CarSample1(CarmaSample):
def __init__(self, time, y, ysig, sampler, filename=None):
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.p = 1 # How many AR terms
self.q = 0 # How many MA terms
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the use knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
def generate_from_trace(self, trace):
names = ['sigma', 'measerr_scale', 'mu', 'log_omega']
if names != self._samples.keys():
self._samples['var'] = trace[:, 0]
self._samples['measerr_scale'] = trace[:, 1]
self._samples['mu'] = trace[:, 2]
self._samples['log_omega'] = trace[:, 3]
def _ar_roots(self):
print "_ar_roots not supported for CAR1"
return
def _ar_coefs(self):
print "_ar_coefs not supported for CAR1"
return
def _sigma_noise(self):
self._samples['sigma'] = np.sqrt(2.0 * self._samples['var'] * np.exp(self._samples['log_omega']))
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
log_omega = self._samples['log_omega'][max_index][0]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
log_omega = np.median(self._samples['log_omega'])
else:
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
log_omega = np.mean(self._samples['log_omega'])
kfilter = carmcmcLib.KalmanFilter1(self.arrayToVec(self.time),
self.arrayToVec(self.y - mu),
self.arrayToVec(self.ysig),
sigsqr,
10**(log_omega))
return kfilter, mu
def plot_power_spectrum(self, percentile=68.0, plot_log=True, color="b", sp=None, doShow=True):
sigmas = self._samples['sigma']
log_omegas = self._samples['log_omega']
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 1.0 / (dt_min / 2.0)
freq_min = (1.0 / (2.0 * dt_max))
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
numer = 0.5 / np.pi * sigmas ** 2
for i in xrange(nfreq):
denom = 10 ** log_omegas ** 2 + frequencies[i] ** 2
psd_samples = numer / denom
# Now compute credibility interval for power spectrum
psd_credint[i, 0] = np.percentile(psd_samples, lower, axis=0)
psd_credint[i, 2] = np.percentile(psd_samples, upper, axis=0)
psd_credint[i, 1] = np.median(psd_samples, axis=0)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=0.5)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
if doShow:
plt.show()
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies), fig
def plot_2dpdf(self, name1, name2, doShow=False):
print "Plotting 2d PDF"
trace1 = self._samples[name1]
trace2 = self._samples[name2]
fig = plt.figure()
# joint distribution
axJ = fig.add_axes([0.1, 0.1, 0.7, 0.7]) # [left, bottom, width, height]
# y histogram
axY = fig.add_axes([0.8, 0.1, 0.125, 0.7], sharey=axJ)
# x histogram
axX = fig.add_axes([0.1, 0.8, 0.7, 0.125], sharex=axJ)
axJ.plot(trace1, trace2, 'ro', ms=1, alpha=0.5)
axX.hist(trace1, bins=100)
axY.hist(trace2, orientation='horizontal', bins=100)
axJ.set_xlabel("%s" % (name1))
axJ.set_ylabel("%s" % (name2))
plt.setp(axX.get_xticklabels() + axX.get_yticklabels(), visible=False)
plt.setp(axY.get_xticklabels() + axY.get_yticklabels(), visible=False)
if doShow:
plt.show()
##################
def get_ar_roots(qpo_width, qpo_centroid):
"""
Return the roots of the characteristic polynomial of the CAR(p) process, given the lorentzian widths and centroids.
:rtype : a numpy array
:param qpo_width: The widths of the lorentzian functions defining the PSD.
:param qpo_centroid: The centroids of the lorentzian functions defining the PSD.
"""
p = qpo_centroid.size + qpo_width.size
ar_roots = np.empty(p, dtype=complex)
for i in xrange(p / 2):
ar_roots[2 * i] = qpo_width[i] + 1j * qpo_centroid[i]
ar_roots[2 * i + 1] = np.conjugate(ar_roots[2 * i])
if p % 2 == 1:
# p is odd, so add in low-frequency component
ar_roots[-1] = qpo_width[-1]
return -2.0 * np.pi * ar_roots
def power_spectrum(freq, sigma, ar_coef, ma_coefs=[1.0]):
"""
Return the power spectrum for a CAR(p) process calculated at the input frequencies.
:param freq: The frequencies at which to calculate the PSD.
:param sigma: The standard deviation driving white noise.
:param ar_coef: The CAR(p) model autoregressive coefficients.
:param ma_coefs: Coefficients of the moving average polynomial
:rtype : A numpy array.
"""
try:
len(ma_coefs) <= len(ar_coef)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
ma_poly = np.polyval(ma_coefs[::-1], 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD numerator
ar_poly = np.polyval(ar_coef, 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD denominator
pspec = sigma ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
return pspec
def carma_variance(sigsqr, ar_roots, ma_coefs=[1.0], lag=0.0):
"""
Return the autocovariance function of a CARMA(p,q) process.
:param sigsqr: The variance in the driving white noise.
:param ar_roots: The roots of the AR characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:param lag: The lag at which to calculate the autocovariance function.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
if len(ma_coefs) < len(ar_roots):
# add extra zeros to end of ma_coefs
ma_coefs = np.resize(np.array(ma_coefs), len(ar_roots))
ma_coefs[1:] = 0.0
sigma1_variance = 0.0 + 0j
p = ar_roots.size
for k in xrange(p):
denom_product = 1.0 + 0j
for l in xrange(p):
if l != k:
denom_product *= (ar_roots[l] - ar_roots[k]) * (np.conjugate(ar_roots[l]) + ar_roots[k])
denom = -2.0 * denom_product * ar_roots[k].real
ma_sum1 = 0.0 + 0j
ma_sum2 = 0.0 + 0j
for l in xrange(p):
ma_sum1 += ma_coefs[l] * ar_roots[k] ** l
ma_sum2 += ma_coefs[l] * (-1.0 * ar_roots[k]) ** l
numer = ma_sum1 * ma_sum2 * np.exp(ar_roots[k] * abs(lag))
sigma1_variance += numer / denom
return sigsqr * sigma1_variance.real
def carma_process(time, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Generate a CARMA(p,q) process.
:param time: The time values at which to generate the CARMA(p,q) process at.
:param sigsqr: The variance in the driving white noise term.
:param ar_roots: The roots of the CAR(p) characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:rtype : A numpy array containing the simulated CARMA(p,q) process values at time.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
p = len(ar_roots)
if len(ma_coefs) < p:
# add extra zeros to end of ma_coefs
ma_coefs = np.resize(np.array(ma_coefs), len(ar_roots))
ma_coefs[1:] = 0.0
time.sort()
# make sure process is stationary
try:
np.any(ar_roots.real < 0)
except ValueError:
"Process is not stationary, real part of roots must be negative."
# make sure the roots are unique
tol = 1e-8
roots_grid = np.meshgrid(ar_roots, ar_roots)
roots_grid1 = roots_grid[0].ravel()
roots_grid2 = roots_grid[1].ravel()
diff_roots = np.abs(roots_grid1 - roots_grid2) / np.abs(roots_grid1 + roots_grid2)
try:
np.any(diff_roots > tol)
except ValueError:
"Roots are not unique."
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((p, p), dtype=complex)
EigenMat[1, :] = ar_roots
for k in xrange(2, p):
EigenMat[k, :] = ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((p, p), dtype=complex)
for j in xrange(p):
StateVar[:, j] = -sigsqr * Jvector * np.conjugate(Jvector[j]) / (ar_roots + np.conjugate(ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
StateVector = np.matrix(StateVector).T
StateVar = np.matrix(StateVar)
PredictionVar = np.matrix(PredictionVar)
rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
StateTransition = np.zeros_like(StateVector)
KalmanGain = np.zeros_like(StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
kalman_mean = 0.0
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the first time series value
y = np.empty_like(time)
y[0] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# Initialize the innovations, i.e., the KF residuals
innovation = y[0]
for i in xrange(1, time.size):
# First compute the Kalman gain
KalmanGain = PredictionVar * rotated_MA_coefs.H / kalman_var
# update the state vector
StateVector += innovation * KalmanGain
# update the state one-step prediction error variance
PredictionVar -= kalman_var * (KalmanGain * KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = time[i] - time[i - 1]
StateTransition = np.matrix(np.exp(ar_roots * dt)).T
StateVector = np.multiply(StateVector, StateTransition)
# update the predicted state covariance matrix
PredictionVar = np.multiply(StateTransition * StateTransition.H, PredictionVar - StateVar) + StateVar
# now predict the observation and its variance
kalman_mean = np.real(np.asscalar(rotated_MA_coefs * StateVector))
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the next time series value
y[i] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# finally, update the innovation
innovation = y[i] - kalman_mean
return y
##################
# Deprecated
class KalmanFilterDeprecated(object):
def __init__(self, time, y, yvar, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Constructor for Kalman Filter class.
:param time: The time values of the time series.
:param y: The mean-subtracted time series.
:param yvar: The variance in the measurement errors on the time series.
:param sigsqr: The variance of the driving white noise term in the CAR(p) process.
:param ar_roots: The roots of the autoregressive characteristic polynomial.
"""
try:
len(ma_coefs) <= ar_roots.size
except ValueError:
"Order of MA polynomial cannot be larger than order of AR polynomial."
self.time = time
self.y = y
self.yvar = yvar
self.sigsqr = sigsqr
self.ar_roots = ar_roots
self.p = ar_roots.size # order of the CARMA(p,q) process
self.q = len(ma_coefs)
self.ma_coefs = np.append(ma_coefs, np.zeros(self.p - self.q))
def reset(self):
"""
Reset the Kalman Filter to its initial state.
"""
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((self.p, self.p), dtype=complex)
EigenMat[1, :] = self.ar_roots
for k in xrange(2, self.p):
EigenMat[k, :] = self.ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(self.p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = self.ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((self.p, self.p), dtype=complex)
for j in xrange(self.p):
StateVar[:, j] = -self.sigsqr * Jvector * np.conjugate(Jvector[j]) / \
(self.ar_roots + np.conjugate(self.ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(self.p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
self._StateVector = np.matrix(StateVector).T
self._StateVar = np.matrix(StateVar)
self._PredictionVar = np.matrix(PredictionVar)
self._rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
self._StateTransition = np.zeros_like(self._StateVector)
self._KalmanGain = np.zeros_like(self._StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
self.kalman_mean = np.empty_like(self.time)
self.kalman_var = np.empty_like(self.time)
self.kalman_mean[0] = 0.0
self.kalman_var[0] = np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) \
+ self.yvar[0]
# Initialize the innovations, i.e., the KF residuals
self._innovation = self.y[0]
self._current_index = 1
def update(self):
"""
Perform one iteration (update) of the Kalman Filter.
"""
# First compute the Kalman gain
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[self._current_index - 1]
# update the state vector
self._StateVector += self._innovation * self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[self._current_index - 1] * (self._KalmanGain * self._KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = self.time[self._current_index] - self.time[self._current_index - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# now predict the observation and its variance
self.kalman_mean[self._current_index] = np.real(np.asscalar(self._rotated_MA_coefs * self._StateVector))
self.kalman_var[self._current_index] = \
np.real(np.asscalar(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
self.kalman_var[self._current_index] += self.yvar[self._current_index]
# finally, update the innovation
self._innovation = self.y[self._current_index] - self.kalman_mean[self._current_index]
self._current_index += 1
def filter(self):
"""
Perform the Kalman Filter on all points of the time series. The kalman mean and variance are returned upon
completion, and are stored in the instantiated KalmanFilter object.
"""
self.reset()
for i in xrange(self.time.size - 1):
self.update()
return self.kalman_mean, self.kalman_var
def predict(self, time_predict):
"""
Return the predicted value of a lightcurve and its standard deviation at the input time given the input
values of the CARMA(p,q) model parameters and a measured lightcurve.
:rtype : A tuple containing the predicted value and its variance.
:param time_predict: The time at which to predict the lightcurve.
"""
try:
self.time.min() > time_predict
except ValueError:
"backcasting currently not supported: time_predict must be greater than self.time.min()"
self.reset()
# find the index where time[ipredict-1] < time_predict < time[ipredict]
ipredict = np.max(np.where(self.time < time_predict)) + 1
for i in xrange(ipredict - 1):
# run the kalman filter for time < time_predict
self.update()
# predict the value of y[time_predict]
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[ipredict - 1]
self._StateVector += self._innovation * self._KalmanGain
self._PredictionVar -= self.kalman_var[ipredict - 1] * (self._KalmanGain * self._KalmanGain.H)
dt = time_predict - self.time[ipredict - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
ypredict_mean = np.asscalar(np.real(self._rotated_MA_coefs * self._StateVector))
ypredict_var = np.asscalar(np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
# start the running statistics for the conditional mean and precision of the predicted time series value, given
# the measured time series
cprecision = 1.0 / ypredict_var
cmean = cprecision * ypredict_mean
if ipredict >= self.time.size:
# we are forecasting (extrapolating) the value, so no need to run interpolation steps below
return ypredict_mean, ypredict_var
# for time > time_predict we need to compute the coefficients for the linear filter, i.e., at time[j]:
# E(y[j]|{y[i]; j<i}) = alpha[j] + beta[j] * ypredict. we do this using recursions similar to the kalman
# filter.
# first set the initial values.
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / ypredict_var
# initialize the coefficients for predicting the state vector at coefs(time_predict|time_predict)
const_state = self._StateVector - self._KalmanGain * ypredict_mean
slope_state = self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= ypredict_var * (self._KalmanGain * self._KalmanGain.H)
# do coefs(time_predict|time_predict) --> coefs(time[i+1]|time_predict)
dt = self.time[ipredict] - time_predict
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for the linear filter at time[ipredict], and compute the variance in the predicted
# y[ipredict]
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
self.kalman_var[ipredict] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[ipredict]
# update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[ipredict]
cmean += slope * (self.y[ipredict] - const) / self.kalman_var[ipredict]
self.const = np.zeros(self.time.size)
self.slope = np.zeros(self.time.size)
self.const[ipredict] = const
self.slope[ipredict] = slope
# now repeat for time > time_predict
for i in xrange(ipredict + 1, self.time.size):
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[i - 1]
# update the state prediction coefficients: coefs(i|i-1) --> coefs(i|i)
const_state += self._KalmanGain * (self.y[i - 1] - const)
slope_state -= self._KalmanGain * slope
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[i - 1] * (self._KalmanGain * self._KalmanGain.H)
# compute the one-step state prediction coefficients: coefs(i|i) --> coefs(i+1|i)
dt = self.time[i] - self.time[i - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# compute the state one-step prediction error variance
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for predicting y[i]|y[j],j<i as a function of ypredict
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
# compute the variance in predicting y[i]|y[j],j<i
self.kalman_var[i] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[i]
# finally, update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[i]
cmean += slope * (self.y[i] - const) / self.kalman_var[i]
self.const[i] = const
self.slope[i] = slope
cvar = 1.0 / cprecision
cmean *= cvar
return cmean, cvar
def simulate(self, time_simulate):
"""
Simulate a lightcurve at the input time values of time_simulate, given the measured lightcurve and input
CARMA(p,q) parameters.
:rtype : A scalar or numpy array, depending on type of time_simulate.
:param time_simulate: The time(s) at which to simulate a random draw of the lightcurve conditional on the
measured time series and the input parameters.
"""
if np.isscalar(time_simulate):
cmean, cvar = self.predict(time_simulate)
ysimulated = np.random.normal(cmean, np.sqrt(cvar))
return ysimulated
else:
# input is array-like, need to simulate values sequentially, adding each value to the measured time series
# as they are simulated
time0 = self.time # save original values
y0 = self.y
yvar0 = self.yvar
ysimulated = np.empty(time_simulate.size)
time_simulate.sort()
for i in xrange(time_simulate.size):
cmean, cvar = self.predict(time_simulate[i])
ysimulated[i] = np.random.normal(cmean, np.sqrt(cvar)) # simulate the time series value
# find the index where time[isimulate-1] < time_simulate < time[isimulate]
isimulate = np.max(np.where(self.time < time_simulate[i])) + 1
# insert the simulated value into the time series array
self.time = np.insert(self.time, isimulate, time_simulate[i])
self.y = np.insert(self.y, isimulate, ysimulated[i])
self.yvar = np.insert(self.yvar, isimulate, 0.0)
# reset measured time series to original values
self.y = y0
self.time = time0
self.yvar = yvar0
return ysimulated
|
# Copyright (c) 2012 Jonathan Warren
# Copyright (c) 2012 The Bitmessage developers
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#Right now, PyBitmessage only support connecting to stream 1. It doesn't yet contain logic to expand into further streams.
softwareVersion = '0.2.4'
verbose = 2
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 #Equals two days and 12 hours.
lengthOfTimeToLeaveObjectsInInventory = 237600 #Equals two days and 18 hours. This should be longer than maximumAgeOfAnObjectThatIAmWillingToAccept so that we don't process messages twice.
lengthOfTimeToHoldOnToAllPubkeys = 2419200 #Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
maximumAgeOfObjectsThatIAdvertiseToOthers = 216000 #Equals two days and 12 hours
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 #Equals three hours
storeConfigFilesInSameDirectoryAsProgram = False
useVeryEasyProofOfWorkForTesting = False #If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
import sys
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
except Exception, err:
print 'PyBitmessage requires PyQt. You can download it from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\' (without quotes).'
print 'Error message:', err
sys.exit()
import ConfigParser
from bitmessageui import *
from newaddressdialog import *
from newsubscriptiondialog import *
from regenerateaddresses import *
from settings import *
from about import *
from help import *
from iconglossary import *
from addresses import *
import Queue
from defaultKnownNodes import *
import time
import socket
import threading
import rsa
from rsa.bigfile import *
import hashlib
from struct import *
import pickle
import random
import sqlite3
import threading #used for the locks, not for the threads
import cStringIO
from time import strftime, localtime
import os
import string
import socks
#import pyelliptic
import highlevelcrypto
from pyelliptic.openssl import OpenSSL
import ctypes
from pyelliptic import arithmetic
#For each stream to which we connect, one outgoingSynSender thread will exist and will create 8 connections with peers.
class outgoingSynSender(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.selfInitiatedConnectionList = [] #This is a list of current connections (the thread pointers at least)
self.alreadyAttemptedConnectionsList = [] #This is a list of nodes to which we have already attempted a connection
def setup(self,streamNumber):
self.streamNumber = streamNumber
def run(self):
time.sleep(1)
resetTime = int(time.time()) #used below to clear out the alreadyAttemptedConnectionsList periodically so that we will retry connecting to hosts to which we have already tried to connect.
while True:
#time.sleep(999999)#I'm using this to prevent connections for testing.
if len(self.selfInitiatedConnectionList) < 8: #maximum number of outgoing connections = 8
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
while HOST in self.alreadyAttemptedConnectionsList or HOST in connectedHostsList:
#print 'choosing new sample'
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
time.sleep(1)
#Clear out the alreadyAttemptedConnectionsList every half hour so that this program will again attempt a connection to any nodes, even ones it has already tried.
if (int(time.time()) - resetTime) > 1800:
self.alreadyAttemptedConnectionsList = []
resetTime = int(time.time())
self.alreadyAttemptedConnectionsList.append(HOST)
PORT, timeNodeLastSeen = knownNodes[self.streamNumber][HOST]
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(20)
if config.get('bitmessagesettings', 'socksproxytype') == 'none':
printLock.acquire()
print 'Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS4a':
printLock.acquire()
print '(Using SOCKS4a) Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
proxytype = socks.PROXY_TYPE_SOCKS4
sockshostname = config.get('bitmessagesettings', 'sockshostname')
socksport = config.getint('bitmessagesettings', 'socksport')
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = config.get('bitmessagesettings', 'socksusername')
sockspassword = config.get('bitmessagesettings', 'sockspassword')
sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(proxytype, sockshostname, socksport, rdns)
elif config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5':
printLock.acquire()
print '(Using SOCKS5) Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
proxytype = socks.PROXY_TYPE_SOCKS5
sockshostname = config.get('bitmessagesettings', 'sockshostname')
socksport = config.getint('bitmessagesettings', 'socksport')
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = config.get('bitmessagesettings', 'socksusername')
sockspassword = config.get('bitmessagesettings', 'sockspassword')
sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(proxytype, sockshostname, socksport, rdns)
try:
sock.connect((HOST, PORT))
rd = receiveDataThread()
self.emit(SIGNAL("passObjectThrough(PyQt_PyObject)"),rd)
objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
rd.setup(sock,HOST,PORT,self.streamNumber,self.selfInitiatedConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware)
rd.start()
printLock.acquire()
print self, 'connected to', HOST, 'during outgoing attempt.'
printLock.release()
sd = sendDataThread()
sd.setup(sock,HOST,PORT,self.streamNumber,objectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
sd.sendVersionMessage()
except socks.GeneralProxyError, err:
printLock.acquire()
print 'Could NOT connect to', HOST, 'during outgoing attempt.', err
printLock.release()
PORT, timeLastSeen = knownNodes[self.streamNumber][HOST]
if (int(time.time())-timeLastSeen) > 172800 and len(knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
del knownNodes[self.streamNumber][HOST]
print 'deleting ', HOST, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
except socks.Socks5AuthError, err:
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS5 Authentication problem: "+str(err))
except socks.Socks5Error, err:
pass
print 'SOCKS5 error. (It is possible that the server wants authentication).)' ,str(err)
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS5 error. Server might require authentication. "+str(err))
except socks.Socks4Error, err:
print 'Socks4Error:', err
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS4 error: "+str(err))
except socket.error, err:
if config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
print 'Bitmessage MIGHT be having trouble connecting to the SOCKS server. '+str(err)
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Problem: Bitmessage can not connect to the SOCKS server. "+str(err))
else:
printLock.acquire()
print 'Could NOT connect to', HOST, 'during outgoing attempt.', err
printLock.release()
PORT, timeLastSeen = knownNodes[self.streamNumber][HOST]
if (int(time.time())-timeLastSeen) > 172800 and len(knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
del knownNodes[self.streamNumber][HOST]
print 'deleting ', HOST, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
except Exception, err:
print 'An exception has occurred in the outgoingSynSender thread that was not caught by other exception types:', err
time.sleep(0.1)
#Only one singleListener thread will ever exist. It creates the receiveDataThread and sendDataThread for each incoming connection. Note that it cannot set the stream number because it is not known yet- the other node will have to tell us its stream number in a version message. If we don't care about their stream, we will close the connection (within the recversion function of the recieveData thread)
class singleListener(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If they eventually select proxy 'none' then this will start listening for connections.
while config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
time.sleep(300)
print 'Listening for incoming connections.'
HOST = '' # Symbolic name meaning all available interfaces
PORT = config.getint('bitmessagesettings', 'port')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#This option apparently avoids the TIME_WAIT state so that we can rebind faster
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(2)
self.incomingConnectionList = [] #This list isn't used for anything. The reason it exists is because receiveData threads expect that a list be passed to them. They expect this because the outgoingSynSender thread DOES use a similar list to keep track of the number of outgoing connections it has created.
while True:
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If the user eventually select proxy 'none' then this will start listening for connections.
while config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
time.sleep(10)
a,(HOST,PORT) = sock.accept()
#Users are finding that if they run more than one node in the same network (thus with the same public IP), they can not connect with the second node. This is because this section of code won't accept the connection from the same IP. This problem will go away when the Bitmessage network grows beyond being tiny but in the mean time I'll comment out this code section.
"""while HOST in connectedHostsList:
print 'incoming connection is from a host in connectedHostsList (we are already connected to it). Ignoring it.'
a.close()
a,(HOST,PORT) = sock.accept()"""
rd = receiveDataThread()
self.emit(SIGNAL("passObjectThrough(PyQt_PyObject)"),rd)
objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
rd.setup(a,HOST,PORT,-1,self.incomingConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware)
printLock.acquire()
print self, 'connected to', HOST,'during INCOMING request.'
printLock.release()
rd.start()
sd = sendDataThread()
sd.setup(a,HOST,PORT,-1,objectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
#This thread is created either by the synSenderThread(for outgoing connections) or the singleListenerThread(for incoming connectiosn).
class receiveDataThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.data = ''
self.verackSent = False
self.verackReceived = False
def setup(self,sock,HOST,port,streamNumber,selfInitiatedConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware):
self.sock = sock
self.HOST = HOST
self.PORT = port
self.sock.settimeout(600) #We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
self.streamNumber = streamNumber
self.selfInitiatedConnectionList = selfInitiatedConnectionList
self.selfInitiatedConnectionList.append(self)
self.payloadLength = 0 #This is the protocol payload length thus it doesn't include the 24 byte message header
self.receivedgetbiginv = False #Gets set to true once we receive a getbiginv message from our peer. An abusive peer might request it too much so we use this variable to check whether they have already asked for a big inv message.
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = {}
connectedHostsList[self.HOST] = 0 #The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that the outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished = False #set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
if self.streamNumber == -1: #This was an incoming connection. Send out a version message if we accept the other node's version message.
self.initiatedConnection = False
else:
self.initiatedConnection = True
self.ackDataThatWeHaveYetToSend = [] #When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here until we are done processing all other data received from this peer.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
def run(self):
while True:
try:
self.data = self.data + self.sock.recv(65536)
except socket.timeout:
printLock.acquire()
print 'Timeout occurred waiting for data. Closing receiveData thread.'
printLock.release()
break
except Exception, err:
printLock.acquire()
print 'sock.recv error. Closing receiveData thread.', err
printLock.release()
break
#print 'Received', repr(self.data)
if self.data == "":
printLock.acquire()
print 'Connection closed. Closing receiveData thread.'
printLock.release()
break
else:
self.processData()
try:
self.sock.close()
except Exception, err:
print 'Within receiveDataThread run(), self.sock.close() failed.', err
try:
self.selfInitiatedConnectionList.remove(self)
printLock.acquire()
print 'removed self (a receiveDataThread) from ConnectionList'
printLock.release()
except:
pass
broadcastToSendDataQueues((0, 'shutdown', self.HOST))
if self.connectionIsOrWasFullyEstablished: #We don't want to decrement the number of connections and show the result if we never incremented it in the first place (which we only do if the connection is fully established- meaning that both nodes accepted each other's version packets.)
connectionsCountLock.acquire()
connectionsCount[self.streamNumber] -= 1
self.emit(SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"),self.streamNumber,connectionsCount[self.streamNumber])
printLock.acquire()
print 'Updating network status tab with current connections count:', connectionsCount[self.streamNumber]
printLock.release()
connectionsCountLock.release()
try:
del connectedHostsList[self.HOST]
except Exception, err:
print 'Could not delete', self.HOST, 'from connectedHostsList.', err
def processData(self):
global verbose
#if verbose >= 2:
#printLock.acquire()
#print 'self.data is currently ', repr(self.data)
#printLock.release()
if len(self.data) < 20: #if so little of the data has arrived that we can't even unpack the payload length
pass
elif self.data[0:4] != '\xe9\xbe\xb4\xd9':
if verbose >= 2:
printLock.acquire()
sys.stderr.write('The magic bytes were not correct. First 40 bytes of data: %s\n' % repr(self.data[0:40]))
printLock.release()
self.data = ""
else:
self.payloadLength, = unpack('>L',self.data[16:20])
if len(self.data) >= self.payloadLength+24: #check if the whole message has arrived yet. If it has,...
if self.data[20:24] == hashlib.sha512(self.data[24:self.payloadLength+24]).digest()[0:4]:#test the checksum in the message. If it is correct...
#print 'message checksum is correct'
#The time we've last seen this node is obviously right now since we just received valid data from it. So update the knownNodes list so that other peers can be made aware of its existance.
if self.initiatedConnection: #The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
knownNodes[self.streamNumber][self.HOST] = (self.PORT,int(time.time()))
if self.payloadLength <= 180000000: #If the size of the message is greater than 180MB, ignore it. (I get memory errors when processing messages much larger than this though it is concievable that this value will have to be lowered if some systems are less tolarant of large messages.)
remoteCommand = self.data[4:16]
printLock.acquire()
print 'remoteCommand ', remoteCommand, 'from', self.HOST
printLock.release()
if remoteCommand == 'version\x00\x00\x00\x00\x00':
self.recversion()
elif remoteCommand == 'verack\x00\x00\x00\x00\x00\x00':
self.recverack()
elif remoteCommand == 'addr\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recaddr()
elif remoteCommand == 'getpubkey\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recgetpubkey()
elif remoteCommand == 'pubkey\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recpubkey()
elif remoteCommand == 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recinv()
elif remoteCommand == 'getdata\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recgetdata()
elif remoteCommand == 'getbiginv\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendBigInv()
elif remoteCommand == 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recmsg()
elif remoteCommand == 'broadcast\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recbroadcast()
elif remoteCommand == 'getaddr\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendaddr()
elif remoteCommand == 'ping\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendpong()
elif remoteCommand == 'pong\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
pass
elif remoteCommand == 'alert\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
pass
self.data = self.data[self.payloadLength+24:]#take this message out and then process the next message
if self.data == '':
while len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
random.seed()
objectHash, = random.sample(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave, 1)
if objectHash in inventory:
printLock.acquire()
print 'Inventory (in memory) already has object listed in inv message.'
printLock.release()
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]
elif isInSqlInventory(objectHash):
printLock.acquire()
print 'Inventory (SQL on disk) already has object listed in inv message.'
printLock.release()
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]
else:
#print 'processData function making request for object:', objectHash.encode('hex')
self.sendgetdata(objectHash)
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash] #It is possible that the remote node doesn't respond with the object. In that case, we'll very likely get it from someone else anyway.
break
if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
printLock.acquire()
print 'within processData, number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now', len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)
printLock.release()
if len(self.ackDataThatWeHaveYetToSend) > 0:
self.data = self.ackDataThatWeHaveYetToSend.pop()
self.processData()
else:
print 'Checksum incorrect. Clearing this message.'
self.data = self.data[self.payloadLength+24:]
def isProofOfWorkSufficient(self):
POW, = unpack('>Q',hashlib.sha512(hashlib.sha512(self.data[24:32]+ hashlib.sha512(self.data[32:24+self.payloadLength]).digest()).digest()).digest()[0:8])
#print 'POW:', POW
#Notice that I have divided the averageProofOfWorkNonceTrialsPerByte by two. This makes the POW requirement easier. This gives us wiggle-room: if we decide that we want to make the POW easier, the change won't obsolete old clients because they already expect a lower POW. If we decide that the current work done by clients feels approperate then we can remove this division by 2 and make the requirement match what is actually done by a sending node. If we want to raise the POW requirement then old nodes will HAVE to upgrade no matter what.
return POW < 2**64 / ((self.payloadLength+payloadLengthExtraBytes) * (averageProofOfWorkNonceTrialsPerByte/2))
def sendpong(self):
print 'Sending pong'
self.sock.sendall('\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
def recverack(self):
print 'verack received'
self.verackReceived = True
if self.verackSent == True:
#We have thus both sent and received a verack.
self.connectionFullyEstablished()
def connectionFullyEstablished(self):
self.connectionIsOrWasFullyEstablished = True
if not self.initiatedConnection:
self.emit(SIGNAL("setStatusIcon(PyQt_PyObject)"),'green')
#Update the 'Network Status' tab
connectionsCountLock.acquire()
connectionsCount[self.streamNumber] += 1
self.emit(SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"),self.streamNumber,connectionsCount[self.streamNumber])
connectionsCountLock.release()
remoteNodeIncomingPort, remoteNodeSeenTime = knownNodes[self.streamNumber][self.HOST]
printLock.acquire()
print 'Connection fully established with', self.HOST, remoteNodeIncomingPort
print 'broadcasting addr from within connectionFullyEstablished function.'
printLock.release()
self.broadcastaddr([(int(time.time()), self.streamNumber, 1, self.HOST, remoteNodeIncomingPort)]) #This lets all of our peers know about this new node.
self.sendaddr() #This is one large addr message to this one peer.
if connectionsCount[self.streamNumber] > 150:
printLock.acquire()
print 'We are connected to too many people. Closing connection.'
printLock.release()
self.sock.close()
return
self.sendBigInv()
def sendBigInv(self): #I used capitals in for this function name because there is no such Bitmessage command as 'biginv'.
if self.receivedgetbiginv:
print 'We have already sent a big inv message to this peer. Ignoring request.'
return
else:
self.receivedgetbiginv = True
sqlLock.acquire()
#Select all hashes which are younger than two days old and in this stream.
t = (int(time.time())-maximumAgeOfObjectsThatIAdvertiseToOthers,self.streamNumber)
sqlSubmitQueue.put('''SELECT hash FROM inventory WHERE receivedtime>? and streamnumber=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
bigInvList = {}
for row in queryreturn:
hash, = row
if hash not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
bigInvList[hash] = 0
else:
printLock.acquire()
print 'Not including an object hash in a big inv message because the remote node is already aware of it.'#This line is here to check that this feature is working.
printLock.release()
#We also have messages in our inventory in memory (which is a python dictionary). Let's fetch those too.
for hash, storedValue in inventory.items():
if hash not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
objectType, streamNumber, payload, receivedTime = storedValue
if streamNumber == self.streamNumber and receivedTime > int(time.time())-maximumAgeOfObjectsThatIAdvertiseToOthers:
bigInvList[hash] = 0
else:
printLock.acquire()
print 'Not including an object hash in a big inv message because the remote node is already aware of it.'#This line is here to check that this feature is working.
printLock.release()
numberOfObjectsInInvMessage = 0
payload = ''
#Now let us start appending all of these hashes together. They will be sent out in a big inv message to our new peer.
for hash, storedValue in bigInvList.items():
payload += hash
numberOfObjectsInInvMessage += 1
if numberOfObjectsInInvMessage >= 50000: #We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
self.sendinvMessageToJustThisOnePeer(numberOfObjectsInInvMessage,payload)
payload = ''
numberOfObjectsInInvMessage = 0
if numberOfObjectsInInvMessage > 0:
self.sendinvMessageToJustThisOnePeer(numberOfObjectsInInvMessage,payload)
#Self explanatory. Notice that there is also a broadcastinv function for broadcasting invs to everyone in our stream.
def sendinvMessageToJustThisOnePeer(self,numberOfObjects,payload):
payload = encodeVarint(numberOfObjects) + payload
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
print 'Sending huge inv message with', numberOfObjects, 'objects to just this one peer'
self.sock.send(headerData + payload)
#We have received a broadcast message
def recbroadcast(self):
self.messageProcessingStartTime = time.time()
#First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in broadcast message insufficient.'
return
embeddedTime, = unpack('>I',self.data[32:36])
if embeddedTime > (int(time.time())+10800): #prevent funny business
print 'The embedded time in this broadcast message is more than three hours in the future. That doesn\'t make sense. Ignoring message.'
return
if embeddedTime < (int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept):
print 'The embedded time in this broadcast message is too old. Ignoring message.'
return
if self.payloadLength < 66: #todo: When version 1 addresses are completely abandoned, this should be changed to 180
print 'The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.'
return
inventoryLock.acquire()
self.inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
if self.inventoryHash in inventory:
print 'We have already received this broadcast object. Ignoring.'
inventoryLock.release()
return
elif isInSqlInventory(self.inventoryHash):
print 'We have already received this broadcast object (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
#It is valid so far. Let's let our peers know about it.
objectType = 'broadcast'
inventory[self.inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
self.broadcastinv(self.inventoryHash)
self.emit(SIGNAL("incrementNumberOfBroadcastsProcessed()"))
self.processbroadcast()#When this function returns, we will have either successfully processed this broadcast because we are interested in it, ignored it because we aren't interested in it, or found problem with the broadcast that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are mostly the same values used for msg messages although broadcast messages are processed faster.
if self.payloadLength > 100000000: #Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds.
elif self.payloadLength > 10000000: #Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds.
elif self.payloadLength > 1000000: #Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds.
else: #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .1 #seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.messageProcessingStartTime)
if sleepTime > 0:
printLock.acquire()
print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
printLock.release()
time.sleep(sleepTime)
printLock.acquire()
print 'Total message processing time:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
#A broadcast message has a valid time and POW and requires processing. The recbroadcast function calls this one.
def processbroadcast(self):
readPosition = 36
broadcastVersion, broadcastVersionLength = decodeVarint(self.data[readPosition:readPosition+9])
if broadcastVersion <> 1:
#Cannot decode incoming broadcast versions higher than 1. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
return
readPosition += broadcastVersionLength
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersAddressVersion == 0 or sendersAddressVersion >=3:
#Cannot decode senderAddressVersion higher than 2. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
return
readPosition += sendersAddressVersionLength
if sendersAddressVersion == 2:
sendersStream, sendersStreamLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersStream <= 0 or sendersStream <> self.streamNumber:
return
readPosition += sendersStreamLength
behaviorBitfield = self.data[readPosition:readPosition+4]
readPosition += 4
sendersPubSigningKey = '\x04' + self.data[readPosition:readPosition+64]
readPosition += 64
sendersPubEncryptionKey = '\x04' + self.data[readPosition:readPosition+64]
readPosition += 64
sendersHash = self.data[readPosition:readPosition+20]
if sendersHash not in broadcastSendersForWhichImWatching:
#Display timing data
printLock.acquire()
print 'Time spent deciding that we are not interested in this broadcast:', time.time()- self.messageProcessingStartTime
printLock.release()
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition += 20
sha = hashlib.new('sha512')
sha.update(sendersPubSigningKey+sendersPubEncryptionKey)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
if ripe.digest() != sendersHash:
#The sender of this message lied.
return
messageEncodingType, messageEncodingTypeLength = decodeVarint(self.data[readPosition:readPosition+9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += messageLengthLength
message = self.data[readPosition:readPosition+messageLength]
readPosition += messageLength
readPositionAtBottomOfMessage = readPosition
signatureLength, signatureLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += signatureLengthLength
signature = self.data[readPosition:readPosition+signatureLength]
try:
highlevelcrypto.verify(self.data[36:readPositionAtBottomOfMessage],signature,sendersPubSigningKey.encode('hex'))
print 'ECDSA verify passed'
except Exception, err:
print 'ECDSA verify failed', err
return
#verify passed
fromAddress = encodeAddress(sendersAddressVersion,sendersStream,ripe.digest())
print 'fromAddress:', fromAddress
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#Display timing data
printLock.acquire()
print 'Time spent processing this interesting broadcast:', time.time()- self.messageProcessingStartTime
printLock.release()
elif sendersAddressVersion == 1:
sendersStream, sendersStreamLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersStream <= 0:
return
readPosition += sendersStreamLength
sendersHash = self.data[readPosition:readPosition+20]
if sendersHash not in broadcastSendersForWhichImWatching:
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition += 20
nLength, nLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
if nLength < 1:
return
readPosition += nLengthLength
nString = self.data[readPosition:readPosition+nLength]
readPosition += nLength
eLength, eLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
if eLength < 1:
return
readPosition += eLengthLength
eString = self.data[readPosition:readPosition+eLength]
#We are now ready to hash the public key and verify that its hash matches the hash claimed in the message
readPosition += eLength
sha = hashlib.new('sha512')
sha.update(nString+eString)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
if ripe.digest() != sendersHash:
#The sender of this message lied.
return
readPositionAtBeginningOfMessageEncodingType = readPosition
messageEncodingType, messageEncodingTypeLength = decodeVarint(self.data[readPosition:readPosition+9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += messageLengthLength
message = self.data[readPosition:readPosition+messageLength]
readPosition += messageLength
signature = self.data[readPosition:readPosition+nLength]
sendersPubkey = rsa.PublicKey(convertStringToInt(nString),convertStringToInt(eString))
#print 'senders Pubkey', sendersPubkey
try:
rsa.verify(self.data[readPositionAtBeginningOfMessageEncodingType:readPositionAtBeginningOfMessageEncodingType+messageEncodingTypeLength+messageLengthLength+messageLength],signature,sendersPubkey)
print 'verify passed'
except Exception, err:
print 'verify failed', err
return
#verify passed
fromAddress = encodeAddress(sendersAddressVersion,sendersStream,ripe.digest())
print 'fromAddress:', fromAddress
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#We have received a msg message.
def recmsg(self):
self.messageProcessingStartTime = time.time()
#First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in msg message insufficient.'
return
readPosition = 32
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
if embeddedTime > int(time.time())+10800:
print 'The time in the msg message is too new. Ignoring it. Time:', embeddedTime
return
if embeddedTime < int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in the msg message is too old. Ignoring it. Time:', embeddedTime
return
readPosition += 4
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint(self.data[readPosition:readPosition+9])
if streamNumberAsClaimedByMsg != self.streamNumber:
print 'The stream number encoded in this msg (' + str(streamNumberAsClaimedByMsg) + ') message does not match the stream number on which it was received. Ignoring it.'
return
readPosition += streamNumberAsClaimedByMsgLength
self.inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if self.inventoryHash in inventory:
print 'We have already received this msg message. Ignoring.'
inventoryLock.release()
return
elif isInSqlInventory(self.inventoryHash):
print 'We have already received this msg message (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
#This msg message is valid. Let's let our peers know about it.
objectType = 'msg'
inventory[self.inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
self.broadcastinv(self.inventoryHash)
self.emit(SIGNAL("incrementNumberOfMessagesProcessed()"))
self.processmsg(readPosition) #When this function returns, we will have either successfully processed the message bound for us, ignored it because it isn't bound for us, or found problem with the message that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are based on test timings and you may change them at-will.
if self.payloadLength > 100000000: #Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 MB message: 3.7 seconds.
elif self.payloadLength > 10000000: #Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 10 MB message: 0.53 seconds. Actual length of time it takes in practice when processing a real message: 1.44 seconds.
elif self.payloadLength > 1000000: #Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 1 MB message: 0.18 seconds. Actual length of time it takes in practice when processing a real message: 0.30 seconds.
else: #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .6 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 KB message: 0.15 seconds. Actual length of time it takes in practice when processing a real message: 0.25 seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.messageProcessingStartTime)
if sleepTime > 0:
printLock.acquire()
print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
printLock.release()
time.sleep(sleepTime)
printLock.acquire()
print 'Total message processing time:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
#This section is for my RSA keys (version 1 addresses). If we don't have any version 1 addresses it will never run. This code will soon be removed.
initialDecryptionSuccessful = False
infile = cStringIO.StringIO(self.data[readPosition:self.payloadLength+24])
outfile = cStringIO.StringIO()
#print 'len(myRSAAddressHashes.items()):', len(myRSAAddressHashes.items())
for key, value in myRSAAddressHashes.items():
try:
decrypt_bigfile(infile, outfile, value)
#The initial decryption passed though there is a small chance that the message isn't actually for me. We'll need to check that the 20 zeros are present.
#print 'initial decryption successful using key', repr(key)
initialDecryptionSuccessful = True
printLock.acquire()
print 'Initial decryption passed'
printLock.release()
break
except Exception, err:
infile.seek(0)
#print 'Exception:', err
#print 'outfile len is:', len(outfile.getvalue()),'data is:', repr(outfile.getvalue())
#print 'Initial decryption failed using key', value
#decryption failed for this key. The message is for someone else (or for a different key of mine).
if initialDecryptionSuccessful and outfile.getvalue()[:20] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00': #this run of 0s allows the true message receiver to identify his message
#This is clearly a message bound for me.
outfile.seek(0)
data = outfile.getvalue()
readPosition = 20 #To start reading past the 20 zero bytes
messageVersion, messageVersionLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageVersionLength
if messageVersion == 1:
bitfieldBehavior = data[readPosition:readPosition+4]
readPosition += 4
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersAddressVersionNumber == 1:
readPosition += sendersAddressVersionNumberLength
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersStreamNumber == 0:
print 'sendersStreamNumber = 0. Ignoring message'
else:
readPosition += sendersStreamNumberLength
sendersNLength, sendersNLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersNLengthLength
sendersN = data[readPosition:readPosition+sendersNLength]
readPosition += sendersNLength
sendersELength, sendersELengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersELengthLength
sendersE = data[readPosition:readPosition+sendersELength]
readPosition += sendersELength
endOfThePublicKeyPosition = readPosition
messageEncodingType, messageEncodingTypeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageEncodingTypeLength
print 'Message Encoding Type:', messageEncodingType
messageLength, messageLengthLength = decodeVarint(data[readPosition:readPosition+10])
print 'message length:', messageLength
readPosition += messageLengthLength
message = data[readPosition:readPosition+messageLength]
#print 'First 150 characters of message:', repr(message[:150])
readPosition += messageLength
ackLength, ackLengthLength = decodeVarint(data[readPosition:readPosition+10])
#print 'ackLength:', ackLength
readPosition += ackLengthLength
ackData = data[readPosition:readPosition+ackLength]
readPosition += ackLength
payloadSigniture = data[readPosition:readPosition+sendersNLength] #We're using the length of the sender's n because it should match the signiture size.
sendersPubkey = rsa.PublicKey(convertStringToInt(sendersN),convertStringToInt(sendersE))
print 'sender\'s Pubkey', sendersPubkey
#Check the cryptographic signiture
verifyPassed = False
try:
rsa.verify(data[:-len(payloadSigniture)],payloadSigniture, sendersPubkey)
print 'verify passed'
verifyPassed = True
except Exception, err:
print 'verify failed', err
if verifyPassed:
#calculate the fromRipe.
sha = hashlib.new('sha512')
sha.update(sendersN+sendersE)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce in order to send out a pubkey message so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+data[20+messageVersionLength:endOfThePublicKeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
fromAddress = encodeAddress(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest())
if config.get('bitmessagesettings', 'blackwhitelist') == 'black': #If we are using a blacklist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM blacklist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
label, enabled = row
if enabled:
print 'Message ignored because address is in blacklist.'
blockMessage = True
else: #We're using a whitelist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
print 'Message ignored because address not in whitelist.'
blockMessage = True
for row in queryreturn: #It could be in the whitelist but disabled. Let's check.
label, enabled = row
if not enabled:
print 'Message ignored because address in whitelist but not enabled.'
blockMessage = True
if not blockMessage:
print 'fromAddress:', fromAddress
print 'First 150 characters of message:', repr(message[:150])
#Look up the destination address (my address) based on the destination ripe hash.
#I realize that I could have a data structure devoted to this task, or maintain an indexed table
#in the sql database, but I would prefer to minimize the number of data structures this program
#uses. Searching linearly through the user's short list of addresses doesn't take very long anyway.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if hash == key:
toAddress = addressInKeysFile
toLabel = config.get(addressInKeysFile, 'label')
if toLabel == '':
toLabel = addressInKeysFile
break
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
print 'within recmsg, self.inventoryHash is', repr(self.inventoryHash)
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#Now let us worry about the acknowledgement data
#We'll need to make sure that our client will properly process the ackData; if the packet is malformed, it might cause us to clear out self.data and an attacker could use that behavior to determine that we decoded this message.
ackDataValidThusFar = True
if len(ackData) < 24:
print 'The length of ackData is unreasonably short. Not sending ackData.'
ackDataValidThusFar = False
if ackData[0:4] != '\xe9\xbe\xb4\xd9':
print 'Ackdata magic bytes were wrong. Not sending ackData.'
ackDataValidThusFar = False
if ackDataValidThusFar:
ackDataPayloadLength, = unpack('>L',ackData[16:20])
if len(ackData)-24 != ackDataPayloadLength: #This ackData includes the protocol header which is not counted in the payload length.
print 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.'
ackDataValidThusFar = False
if ackDataValidThusFar:
print 'ackData is valid. Will process it.'
self.ackDataThatWeHaveYetToSend.append(ackData) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
else:
print 'This program cannot decode messages from addresses with versions higher than 1. Ignoring.'
statusbar = 'This program cannot decode messages from addresses with versions higher than 1. Ignoring it.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
else:
statusbar = 'Error: Cannot decode incoming msg versions higher than 1. Assuming the sender isn\' being silly, you should upgrade Bitmessage. Ignoring message.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
else:
printLock.acquire()
print 'Could not decrypt with any RSA keys if you have any.'
printLock.release()
infile.close()
outfile.close()
#A msg message has a valid time and POW and requires processing. The recmsg function calls this one.
def processmsg(self,readPosition):
initialDecryptionSuccessful = False
#Let's check whether this is a message acknowledgement bound for us.
if self.data[readPosition:24+self.payloadLength] in ackdataForWhichImWatching:
printLock.acquire()
print 'This msg IS an acknowledgement bound for me.'
printLock.release()
del ackdataForWhichImWatching[self.data[readPosition:24+self.payloadLength]]
t = ('ackreceived',self.data[readPosition:24+self.payloadLength])
sqlLock.acquire()
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE ackdata=?')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),self.data[readPosition:24+self.payloadLength],'Acknowledgement of the message received just now.')
return
else:
printLock.acquire()
print 'This was NOT an acknowledgement bound for me.' #Msg potential ack data:', repr(self.data[readPosition:24+self.payloadLength])
#print 'ackdataForWhichImWatching', ackdataForWhichImWatching
printLock.release()
#This is not an acknowledgement bound for me. See if it is a message bound for me by trying to decrypt it with my private keys.
for key, cryptorObject in myECAddressHashes.items():
try:
data = cryptorObject.decrypt(self.data[readPosition:self.payloadLength+24])
toRipe = key #This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data.
initialDecryptionSuccessful = True
print 'EC decryption successful using key associated with ripe hash:', key.encode('hex')
break
except Exception, err:
pass
#print 'cryptorObject.decrypt Exception:', err
if not initialDecryptionSuccessful:
#This is not a message bound for me.
printLock.acquire()
print 'Length of time program spent failing to decrypt this message:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
else:
#This is a message bound for me.
readPosition = 0
messageVersion, messageVersionLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageVersionLength
if messageVersion != 1:
print 'Cannot understand message versions other than one. Ignoring message.'
return
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersAddressVersionNumberLength
if sendersAddressVersionNumber == 0:
print 'Cannot understand sendersAddressVersionNumber = 0. Ignoring message.'
return
if sendersAddressVersionNumber >= 3:
print 'Sender\'s address version number', sendersAddressVersionNumber, ' not yet supported. Ignoring message.'
return
if len(data) < 170:
print 'Length of the unencrypted data is unreasonably short. Sanity check failed. Ignoring message.'
return
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersStreamNumber == 0:
print 'sender\'s stream number is 0. Ignoring message.'
return
readPosition += sendersStreamNumberLength
behaviorBitfield = data[readPosition:readPosition+4]
readPosition += 4
pubSigningKey = '\x04' + data[readPosition:readPosition+64]
readPosition += 64
pubEncryptionKey = '\x04' + data[readPosition:readPosition+64]
readPosition += 64
endOfThePublicKeyPosition = readPosition #needed for when we store the pubkey in our database of pubkeys for later use.
if toRipe != data[readPosition:readPosition+20]:
printLock.acquire()
print 'The original sender of this message did not send it to you. Someone is attempting a Surreptitious Forwarding Attack.'
print 'See: http://tools.ietf.org/html/draft-ietf-smime-sender-auth-00'
print 'your toRipe:', toRipe.encode('hex')
print 'embedded destination toRipe:', data[readPosition:readPosition+20].encode('hex')
printLock.release()
return
readPosition += 20
messageEncodingType, messageEncodingTypeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageLengthLength
message = data[readPosition:readPosition+messageLength]
#print 'First 150 characters of message:', repr(message[:150])
readPosition += messageLength
ackLength, ackLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += ackLengthLength
ackData = data[readPosition:readPosition+ackLength]
readPosition += ackLength
positionOfBottomOfAckData = readPosition #needed to mark the end of what is covered by the signature
signatureLength, signatureLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += signatureLengthLength
signature = data[readPosition:readPosition+signatureLength]
try:
highlevelcrypto.verify(data[:positionOfBottomOfAckData],signature,pubSigningKey.encode('hex'))
print 'ECDSA verify passed'
except Exception, err:
print 'ECDSA verify failed', err
return
printLock.acquire()
print 'As a matter of intellectual curiosity, here is the Bitcoin address associated with the keys owned by the other person:', calculateBitcoinAddressFromPubkey(pubSigningKey), ' ..and here is the testnet address:',calculateTestnetAddressFromPubkey(pubSigningKey),'. The other person must take their private signing key from Bitmessage and import it into Bitcoin (or a service like Blockchain.info) for it to be of any use. Do not use this unless you know what you are doing.'
printLock.release()
#calculate the fromRipe.
sha = hashlib.new('sha512')
sha.update(pubSigningKey+pubEncryptionKey)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce or time (which would let us send out a pubkey message) so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+'\xFF\xFF\xFF\xFF'+data[messageVersionLength:endOfThePublicKeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
fromAddress = encodeAddress(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest())
if config.get('bitmessagesettings', 'blackwhitelist') == 'black': #If we are using a blacklist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM blacklist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
label, enabled = row
if enabled:
print 'Message ignored because address is in blacklist.'
blockMessage = True
else: #We're using a whitelist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
print 'Message ignored because address not in whitelist.'
blockMessage = True
for row in queryreturn: #It could be in the whitelist but disabled. Let's check.
label, enabled = row
if not enabled:
print 'Message ignored because address in whitelist but not enabled.'
blockMessage = True
if not blockMessage:
print 'fromAddress:', fromAddress
print 'First 150 characters of message:', repr(message[:150])
#Look up the destination address (my address) based on the destination ripe hash.
#I realize that I could have a data structure devoted to this task, or maintain an indexed table
#in the sql database, but I would prefer to minimize the number of data structures this program
#uses. Searching linearly through the user's short list of addresses doesn't take very long anyway.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if hash == key:
toAddress = addressInKeysFile
toLabel = config.get(addressInKeysFile, 'label')
if toLabel == '':
toLabel = addressInKeysFile
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
print 'within processmsg, self.inventoryHash is', self.inventoryHash.encode('hex')
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#Now let's consider sending the acknowledgement. We'll need to make sure that our client will properly process the ackData; if the packet is malformed, we could clear out self.data and an attacker could use that behavior to determine that we were capable of decoding this message.
ackDataValidThusFar = True
if len(ackData) < 24:
print 'The length of ackData is unreasonably short. Not sending ackData.'
ackDataValidThusFar = False
elif ackData[0:4] != '\xe9\xbe\xb4\xd9':
print 'Ackdata magic bytes were wrong. Not sending ackData.'
ackDataValidThusFar = False
if ackDataValidThusFar:
ackDataPayloadLength, = unpack('>L',ackData[16:20])
if len(ackData)-24 != ackDataPayloadLength:
print 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.'
ackDataValidThusFar = False
if ackDataValidThusFar:
print 'ackData is valid. Will process it.'
self.ackDataThatWeHaveYetToSend.append(ackData) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
#Display timing data
timeRequiredToAttemptToDecryptMessage = time.time()- self.messageProcessingStartTime
successfullyDecryptMessageTimings.append(timeRequiredToAttemptToDecryptMessage)
sum = 0
for item in successfullyDecryptMessageTimings:
sum += item
printLock.acquire()
print 'Time to decrypt this message successfully:', timeRequiredToAttemptToDecryptMessage
print 'Average time for all message decryption successes since startup:', sum / len(successfullyDecryptMessageTimings)
printLock.release()
#We have received a pubkey
def recpubkey(self):
self.pubkeyProcessingStartTime = time.time()
if self.payloadLength < 146: #sanity check
return
#We must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in pubkey message insufficient.'
return
readPosition = 24 #for the message header
readPosition += 8 #for the nonce
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
if embeddedTime < int(time.time())-lengthOfTimeToHoldOnToAllPubkeys-86400: #If the pubkey is more than a month old then reject it. (the 86400 is included to give an extra day of wiggle-room. If the wiggle-room is actually of any use, everyone on the network will delete this pubkey from their database the next time the cleanerThread cleans anyway- except for the node that actually wants the pubkey.)
printLock.acquire()
print 'The embedded time in this pubkey message is too old. Ignoring.'
printLock.release()
return
if embeddedTime > int(time.time()) + 10800:
printLock.acquire()
print 'The embedded time in this pubkey message more than several hours in the future. This is irrational. Ignoring message.'
printLock.release()
return
readPosition += 4 #for the time
addressVersion, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
if self.streamNumber != streamNumber:
print 'stream number embedded in this pubkey doesn\'t match our stream number. Ignoring.'
return
inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if inventoryHash in inventory:
print 'We have already received this pubkey. Ignoring it.'
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
print 'We have already received this pubkey (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], int(time.time()))
inventoryLock.release()
self.broadcastinv(inventoryHash)
self.emit(SIGNAL("incrementNumberOfPubkeysProcessed()"))
self.processpubkey()
lengthOfTimeWeShouldUseToProcessThisMessage = .2
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.pubkeyProcessingStartTime)
if sleepTime > 0:
#printLock.acquire()
#print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
#printLock.release()
time.sleep(sleepTime)
#printLock.acquire()
#print 'Total pubkey processing time:', time.time()- self.pubkeyProcessingStartTime, 'seconds.'
#printLock.release()
def processpubkey(self):
readPosition = 24 #for the message header
readPosition += 8 #for the nonce
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
readPosition += 4 #for the time
addressVersion, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
if addressVersion == 0:
print '(Within processpubkey) addressVersion of 0 doesn\'t make sense.'
return
if addressVersion >= 3:
printLock.acquire()
print 'This version of Bitmessage cannot handle version', addressVersion,'addresses.'
printLock.release()
return
if addressVersion == 2:
if self.payloadLength < 146: #sanity check. This is the minimum possible length.
print 'payloadLength less than 146. Sanity check failed.'
return
bitfieldBehaviors = self.data[readPosition:readPosition+4]
readPosition += 4
publicSigningKey = self.data[readPosition:readPosition+64]
#Is it possible for a public key to be invalid such that trying to encrypt or sign with it will cause an error? If it is, we should probably test these keys here.
readPosition += 64
publicEncryptionKey = self.data[readPosition:readPosition+64]
if len(publicEncryptionKey) < 64:
print 'publicEncryptionKey length less than 64. Sanity check failed.'
return
sha = hashlib.new('sha512')
sha.update('\x04'+publicSigningKey+'\x04'+publicEncryptionKey)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
ripe = ripeHasher.digest()
printLock.acquire()
print 'within recpubkey, addressVersion:', addressVersion, ', streamNumber:', streamNumber
print 'ripe', ripe.encode('hex')
print 'publicSigningKey in hex:', publicSigningKey.encode('hex')
print 'publicEncryptionKey in hex:', publicEncryptionKey.encode('hex')
printLock.release()
t = (ripe,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally='yes' ''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []: #if this pubkey is already in our database and if we have used it personally:
print 'We HAVE used this pubkey personally. Updating time.'
t = (ripe,True,self.data[24:24+self.payloadLength],embeddedTime,'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
else:
print 'We have NOT used this pubkey personally. Inserting in database.'
t = (ripe,True,self.data[24:24+self.payloadLength],embeddedTime,'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
#This code which deals with old RSA addresses will soon be removed.
elif addressVersion == 1:
nLength, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
nString = self.data[readPosition:readPosition+nLength]
readPosition += nLength
eLength, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
eString = self.data[readPosition:readPosition+eLength]
readPosition += eLength
sha = hashlib.new('sha512')
sha.update(nString+eString)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
ripe = ripeHasher.digest()
print 'within recpubkey, addressVersion', addressVersion
print 'streamNumber', streamNumber
print 'ripe', repr(ripe)
print 'n=', convertStringToInt(nString)
print 'e=', convertStringToInt(eString)
t = (ripe,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally='yes' ''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []: #if this pubkey is already in our database and if we have used it personally:
print 'We HAVE used this pubkey personally. Updating time.'
t = (ripe,True,self.data[24:24+self.payloadLength],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
else:
print 'We have NOT used this pubkey personally. Inserting in database.'
t = (ripe,True,self.data[24:24+self.payloadLength],int(time.time()),'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
#We have received a getpubkey message
def recgetpubkey(self):
if not self.isProofOfWorkSufficient():
print 'Proof of work in getpubkey message insufficient.'
return
embeddedTime, = unpack('>I',self.data[32:36])
if embeddedTime > int(time.time())+10800:
print 'The time in this getpubkey message is too new. Ignoring it. Time:', embeddedTime
return
if embeddedTime < int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in this getpubkey message is too old. Ignoring it. Time:', embeddedTime
return
addressVersionNumber, addressVersionLength = decodeVarint(self.data[36:42])
streamNumber, streamNumberLength = decodeVarint(self.data[36+addressVersionLength:42+addressVersionLength])
if streamNumber <> self.streamNumber:
print 'The streamNumber', streamNumber, 'doesn\'t match our stream number:', self.streamNumber
return
inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if inventoryHash in inventory:
print 'We have already received this getpubkey request. Ignoring it.'
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
print 'We have already received this getpubkey request (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[inventoryHash] = 0
objectType = 'getpubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
#This getpubkey request is valid so far. Forward to peers.
self.broadcastinv(inventoryHash)
if addressVersionNumber == 0:
print 'The addressVersionNumber of the pubkey request is zero. That doesn\'t make any sense. Ignoring it.'
return
elif addressVersionNumber > 2:
print 'The addressVersionNumber of the pubkey request is too high. Can\'t understand. Ignoring it.'
return
print 'the hash requested in this getpubkey request is:', self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength].encode('hex')
sqlLock.acquire()
t = (self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength],int(time.time())-lengthOfTimeToHoldOnToAllPubkeys) #this prevents SQL injection
sqlSubmitQueue.put('''SELECT hash, transmitdata, time FROM pubkeys WHERE hash=? AND havecorrectnonce=1 AND time>?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []:
for row in queryreturn:
hash, payload, timeEncodedInPubkey = row
printLock.acquire()
print 'We have the requested pubkey stored in our database of pubkeys. Sending it.'
printLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, payload, timeEncodedInPubkey)#If the time embedded in this pubkey is more than 3 days old then this object isn't going to last very long in the inventory- the cleanerThread is going to come along and move it from the inventory in memory to the SQL inventory and then delete it from the SQL inventory. It should still find its way back to the original requestor if he is online however.
self.broadcastinv(inventoryHash)
else: #the pubkey is not in our database of pubkeys. Let's check if the requested key is ours (which would mean we should do the POW, put it in the pubkey table, and broadcast out the pubkey.)
if self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength] in myECAddressHashes: #if this address hash is one of mine
printLock.acquire()
print 'Found getpubkey-requested-hash in my list of EC hashes. Telling Worker thread to do the POW for a pubkey message and send it out.'
printLock.release()
myAddress = encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength])
workerQueue.put(('doPOWForMyV2Pubkey',myAddress))
#This code which deals with old RSA addresses will soon be removed.
"""elif self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength] in myRSAAddressHashes:
print 'Found getpubkey requested hash in my list of RSA hashes.'
payload = '\x00\x00\x00\x01' #bitfield of features supported by me (see the wiki).
payload += self.data[36:36+addressVersionLength+streamNumberLength]
#print int(config.get(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'n'))
nString = convertIntToString(int(config.get(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'n')))
eString = convertIntToString(config.getint(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'e'))
payload += encodeVarint(len(nString))
payload += nString
payload += encodeVarint(len(eString))
payload += eString
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
t = (self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength],True,payload,int(time.time())+1209600) #after two weeks (1,209,600 seconds), we may remove our own pub key from our database. It will be regenerated and put back in the database if it is requested.
sqlLock.acquire()
#** pubkeys insert query not yet fixed! **
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, payload, int(time.time()))
self.broadcastinv(inventoryHash) """
else:
printLock.acquire()
print 'This getpubkey request is not for any of my keys.'
printLock.release()
#We have received an inv message
def recinv(self):
numberOfItemsInInv, lengthOfVarint = decodeVarint(self.data[24:34])
if numberOfItemsInInv == 1: #we'll just request this data from the person who advertised the object.
for i in range(numberOfItemsInInv):
if len(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]) == 32: #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
if self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)] in inventory:
printLock.acquire()
print 'Inventory (in memory) has inventory item already.'
printLock.release()
elif isInSqlInventory(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]):
print 'Inventory (SQL on disk) has inventory item already.'
else:
self.sendgetdata(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)])
else:
print 'inv message lists', numberOfItemsInInv, 'objects.'
for i in range(numberOfItemsInInv): #upon finishing dealing with an incoming message, the receiveDataThread will request a random object from the peer. This way if we get multiple inv messages from multiple peers which list mostly the same objects, we will make getdata requests for different random objects from the various peers.
if len(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]) == 32: #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
#Send a getdata message to our peer to request the object with the given hash
def sendgetdata(self,hash):
print 'sending getdata to retrieve object with hash:', hash.encode('hex')
payload = '\x01' + hash
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getdata\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
#We have received a getdata request from our peer
def recgetdata(self):
value, lengthOfVarint = decodeVarint(self.data[24:34])
#print 'Number of items in getdata request:', value
try:
for i in xrange(value):
hash = self.data[24+lengthOfVarint+(i*32):56+lengthOfVarint+(i*32)]
printLock.acquire()
print 'received getdata request for item:', hash.encode('hex')
printLock.release()
#print 'inventory is', inventory
if hash in inventory:
objectType, streamNumber, payload, receivedTime = inventory[hash]
self.sendData(objectType,payload)
else:
t = (hash,)
sqlLock.acquire()
sqlSubmitQueue.put('''select objecttype, payload from inventory where hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
objectType, payload = row
self.sendData(objectType,payload)
else:
print 'Someone asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. That shouldn\'t have happened.'
except:
pass #someone is probably trying to cause a program error by, for example, making a request for 10 items but only including the hashes for 5.
#Our peer has requested (in a getdata message) that we send an object.
def sendData(self,objectType,payload):
if objectType == 'pubkey':
print 'sending pubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'pubkey\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'getpubkey':
print 'sending getpubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getpubkey\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'msg':
print 'sending msg'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'broadcast':
print 'sending broadcast'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'broadcast\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'getpubkey' or objectType == 'pubkeyrequest':
print 'sending getpubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getpubkey\x00\x00\x00' #version command
headerData += pack('>L',len(payload)) #payload length
headerData += hashlib.sha512(payload).digest()[0:4]
self.sock.send(headerData + payload)
else:
sys.stderr.write('Error: sendData has been asked to send a strange objectType: %s\n' % str(objectType))
#Send an inv message with just one hash to all of our peers
def broadcastinv(self,hash):
printLock.acquire()
print 'broadcasting inv with hash:', hash.encode('hex')
printLock.release()
broadcastToSendDataQueues((self.streamNumber, 'sendinv', hash))
#We have received an addr message.
def recaddr(self):
listOfAddressDetailsToBroadcastToPeers = []
numberOfAddressesIncluded = 0
numberOfAddressesIncluded, lengthOfNumberOfAddresses = decodeVarint(self.data[24:29])
if verbose >= 1:
print 'addr message contains', numberOfAddressesIncluded, 'IP addresses.'
#print 'lengthOfNumberOfAddresses', lengthOfNumberOfAddresses
if numberOfAddressesIncluded > 1000:
return
needToWriteKnownNodesToDisk = False
for i in range(0,numberOfAddressesIncluded):
try:
if self.data[40+lengthOfNumberOfAddresses+(34*i):52+lengthOfNumberOfAddresses+(34*i)] != '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF':
printLock.acquire()
print 'Skipping IPv6 address.', repr(self.data[40+lengthOfNumberOfAddresses+(34*i):56+lengthOfNumberOfAddresses+(34*i)])
printLock.release()
continue
#print repr(self.data[6+lengthOfNumberOfAddresses+(34*i):18+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (to test for an IPv6 address). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrStream, = unpack('>I',self.data[28+lengthOfNumberOfAddresses+(34*i):32+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrServices, = unpack('>Q',self.data[32+lengthOfNumberOfAddresses+(34*i):40+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrServices). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrPort, = unpack('>H',self.data[56+lengthOfNumberOfAddresses+(34*i):58+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrPort). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
#print 'Within recaddr(): IP', recaddrIP, ', Port', recaddrPort, ', i', i
hostFromAddrMessage = socket.inet_ntoa(self.data[52+lengthOfNumberOfAddresses+(34*i):56+lengthOfNumberOfAddresses+(34*i)])
#print 'hostFromAddrMessage', hostFromAddrMessage
if hostFromAddrMessage == '127.0.0.1':
continue
timeSomeoneElseReceivedMessageFromThisNode, = unpack('>I',self.data[24+lengthOfNumberOfAddresses+(34*i):28+lengthOfNumberOfAddresses+(34*i)]) #This is the 'time' value in the received addr message.
if hostFromAddrMessage not in knownNodes[recaddrStream]:
if len(knownNodes[recaddrStream]) < 20000 and timeSomeoneElseReceivedMessageFromThisNode > (int(time.time())-10800) and timeSomeoneElseReceivedMessageFromThisNode < (int(time.time()) + 10800): #If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
knownNodes[recaddrStream][hostFromAddrMessage] = (recaddrPort, timeSomeoneElseReceivedMessageFromThisNode)
print 'added new node', hostFromAddrMessage, 'to knownNodes.'
needToWriteKnownNodesToDisk = True
hostDetails = (timeSomeoneElseReceivedMessageFromThisNode, recaddrStream, recaddrServices, hostFromAddrMessage, recaddrPort)
listOfAddressDetailsToBroadcastToPeers.append(hostDetails)
else:
PORT, timeLastReceivedMessageFromThisNode = knownNodes[recaddrStream][hostFromAddrMessage]#PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
if (timeLastReceivedMessageFromThisNode < timeSomeoneElseReceivedMessageFromThisNode) and (timeSomeoneElseReceivedMessageFromThisNode < int(time.time())):
knownNodes[recaddrStream][hostFromAddrMessage] = (PORT, timeSomeoneElseReceivedMessageFromThisNode)
if PORT != recaddrPort:
print 'Strange occurance: The port specified in an addr message', str(recaddrPort),'does not match the port',str(PORT),'that this program (or some other peer) used to connect to it',str(hostFromAddrMessage),'. Perhaps they changed their port or are using a strange NAT configuration.'
if needToWriteKnownNodesToDisk: #Runs if any nodes were new to us. Also, share those nodes with our peers.
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
self.broadcastaddr(listOfAddressDetailsToBroadcastToPeers)
print 'knownNodes currently has', len(knownNodes[recaddrStream]), 'nodes for this stream.'
#Function runs when we want to broadcast an addr message to all of our peers. Runs when we learn of nodes that we didn't previously know about and want to share them with our peers.
def broadcastaddr(self,listOfAddressDetailsToBroadcastToPeers):
numberOfAddressesInAddrMessage = len(listOfAddressDetailsToBroadcastToPeers)
payload = ''
for hostDetails in listOfAddressDetailsToBroadcastToPeers:
timeLastReceivedMessageFromThisNode, streamNumber, services, host, port = hostDetails
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',streamNumber)
payload += pack('>q',services) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(host)
payload += pack('>H',port)#remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
datatosend = '\xE9\xBE\xB4\xD9addr\x00\x00\x00\x00\x00\x00\x00\x00'
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
if verbose >= 2:
printLock.acquire()
print 'Broadcasting addr with', numberOfAddressesInAddrMessage, 'entries.'
printLock.release()
broadcastToSendDataQueues((self.streamNumber, 'sendaddr', datatosend))
#Send a big addr message to our peer
def sendaddr(self):
addrsInMyStream = {}
addrsInChildStreamLeft = {}
addrsInChildStreamRight = {}
#print 'knownNodes', knownNodes
#We are going to share a maximum number of 1000 addrs with our peer. 500 from this stream, 250 from the left child stream, and 250 from the right child stream.
if len(knownNodes[self.streamNumber]) > 0:
for i in range(500):
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
addrsInMyStream[HOST] = knownNodes[self.streamNumber][HOST]
if len(knownNodes[self.streamNumber*2]) > 0:
for i in range(250):
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber*2], 1)
addrsInChildStreamLeft[HOST] = knownNodes[self.streamNumber*2][HOST]
if len(knownNodes[(self.streamNumber*2)+1]) > 0:
for i in range(250):
random.seed()
HOST, = random.sample(knownNodes[(self.streamNumber*2)+1], 1)
addrsInChildStreamRight[HOST] = knownNodes[(self.streamNumber*2)+1][HOST]
numberOfAddressesInAddrMessage = 0
payload = ''
print 'addrsInMyStream.items()', addrsInMyStream.items()
for HOST, value in addrsInMyStream.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',self.streamNumber)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
for HOST, value in addrsInChildStreamLeft.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',self.streamNumber*2)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
for HOST, value in addrsInChildStreamRight.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',(self.streamNumber*2)+1)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
datatosend = '\xE9\xBE\xB4\xD9addr\x00\x00\x00\x00\x00\x00\x00\x00'
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
if verbose >= 2:
printLock.acquire()
print 'Sending addr with', numberOfAddressesInAddrMessage, 'entries.'
printLock.release()
self.sock.send(datatosend)
#We have received a version message
def recversion(self):
if self.payloadLength < 83:
#This version message is unreasonably short. Forget it.
return
elif not self.verackSent: #There is a potential exploit if we don't check to make sure that we have not already received and accepted a version message: An attacker could connect directly to us, send a msg message with the ackdata set to an invalid version message which would cause us to close the connection to the attacker thus proving that we were able to decode the message. Checking the connectionIsOrWasFullyEstablished variable would also suffice.
self.remoteProtocolVersion, = unpack('>L',self.data[24:28])
#print 'remoteProtocolVersion', self.remoteProtocolVersion
self.myExternalIP = socket.inet_ntoa(self.data[64:68])
#print 'myExternalIP', self.myExternalIP
self.remoteNodeIncomingPort, = unpack('>H',self.data[94:96])
#print 'remoteNodeIncomingPort', self.remoteNodeIncomingPort
#print 'self.data[96:104]', repr(self.data[96:104])
#print 'eightBytesOfRandomDataUsedToDetectConnectionsToSelf', repr(eightBytesOfRandomDataUsedToDetectConnectionsToSelf)
useragentLength, lengthOfUseragentVarint = decodeVarint(self.data[104:108])
readPosition = 104 + lengthOfUseragentVarint
useragent = self.data[readPosition:readPosition+useragentLength]
readPosition += useragentLength
numberOfStreamsInVersionMessage, lengthOfNumberOfStreamsInVersionMessage = decodeVarint(self.data[readPosition:])
readPosition += lengthOfNumberOfStreamsInVersionMessage
self.streamNumber, lengthOfRemoteStreamNumber = decodeVarint(self.data[readPosition:])
printLock.acquire()
print 'Remote node useragent:', useragent, ' stream number:', self.streamNumber
printLock.release()
#If this was an incoming connection, then the sendData thread doesn't know the stream. We have to set it.
if not self.initiatedConnection:
broadcastToSendDataQueues((0,'setStreamNumber',(self.HOST,self.streamNumber)))
if self.streamNumber != 1:
self.sock.close()
printLock.acquire()
print 'Closed connection to', self.HOST, 'because they are interested in stream', self.steamNumber,'.'
printLock.release()
self.data = ''
return
if self.data[96:104] == eightBytesOfRandomDataUsedToDetectConnectionsToSelf:
self.sock.close()
printLock.acquire()
print 'Closing connection to myself: ', self.HOST
printLock.release()
self.data = ''
return
knownNodes[self.streamNumber][self.HOST] = (self.remoteNodeIncomingPort, int(time.time()))
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
#I've commented out this code because it should be up to the newer node to decide whether their protocol version is incompatiable with the remote node's version.
'''if self.remoteProtocolVersion > 1:
print 'The remote node''s protocol version is too new for this program to understand. Disconnecting. It is:', self.remoteProtocolVersion
self.sock.close()
self.selfInitiatedConnectionList.remove(self)
else:'''
self.sendverack()
if self.initiatedConnection == False:
self.sendversion()
#Sends a version message
def sendversion(self):
global softwareVersion
payload = ''
payload += pack('>L',1) #protocol version.
payload += pack('>q',1) #bitflags of the services I offer.
payload += pack('>q',int(time.time()))
payload += pack('>q',1) #boolservices offered by the remote node. This data is ignored by the remote host because how could We know what Their services are without them telling us?
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(self.HOST)
payload += pack('>H',self.PORT)#remote IPv6 and port
payload += pack('>q',1) #bitflags of the services I offer.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L',2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
payload += pack('>H',config.getint('bitmessagesettings', 'port'))#my external IPv6 and port
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + softwareVersion + '/' #Length of userAgent must be less than 253.
payload += pack('>B',len(userAgent)) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload += userAgent
payload += encodeVarint(1) #The number of streams about which I care. PyBitmessage currently only supports 1.
payload += encodeVarint(self.streamNumber)
datatosend = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + 'version\x00\x00\x00\x00\x00' #version command
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
printLock.acquire()
print 'Sending version message'
printLock.release()
self.sock.send(datatosend)
#self.versionSent = 1
#Sends a verack message
def sendverack(self):
printLock.acquire()
print 'Sending verack'
printLock.release()
self.sock.sendall('\xE9\xBE\xB4\xD9\x76\x65\x72\x61\x63\x6B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
#cf 83 e1 35
self.verackSent = True
if self.verackReceived == True:
self.connectionFullyEstablished()
#Every connection to a peer has a sendDataThread (and also a receiveDataThread).
class sendDataThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.mailbox = Queue.Queue()
sendDataQueues.append(self.mailbox)
self.data = ''
def setup(self,sock,HOST,PORT,streamNumber,objectsOfWhichThisRemoteNodeIsAlreadyAware):
self.sock = sock
self.HOST = HOST
self.PORT = PORT
self.streamNumber = streamNumber
self.lastTimeISentData = int(time.time()) #If this value increases beyond five minutes ago, we'll send a pong message to keep the connection alive.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
printLock.acquire()
print 'The streamNumber of this sendDataThread (ID:', id(self),') at setup() is', self.streamNumber
printLock.release()
def sendVersionMessage(self):
#Note that there is another copy of this version-sending code in the receiveData class which would need to be changed if you make changes here.
global softwareVersion
payload = ''
payload += pack('>L',1) #protocol version.
payload += pack('>q',1) #bitflags of the services I offer.
payload += pack('>q',int(time.time()))
payload += pack('>q',1) #boolservices of remote connection. How can I even know this for sure? This is probably ignored by the remote host.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(self.HOST)
payload += pack('>H',self.PORT)#remote IPv6 and port
payload += pack('>q',1) #bitflags of the services I offer.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L',2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
payload += pack('>H',config.getint('bitmessagesettings', 'port'))#my external IPv6 and port
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + softwareVersion + '/' #Length of userAgent must be less than 253.
payload += pack('>B',len(userAgent)) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload += userAgent
payload += encodeVarint(1) #The number of streams about which I care. PyBitmessage currently only supports 1 per connection.
payload += encodeVarint(self.streamNumber)
datatosend = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + 'version\x00\x00\x00\x00\x00' #version command
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
printLock.acquire()
print 'Sending version packet: ', repr(datatosend)
printLock.release()
self.sock.send(datatosend)
self.versionSent = 1
def run(self):
while True:
deststream,command,data = self.mailbox.get()
#printLock.acquire()
#print 'sendDataThread, destream:', deststream, ', Command:', command, ', ID:',id(self), ', HOST:', self.HOST
#printLock.release()
if deststream == self.streamNumber or deststream == 0:
if command == 'shutdown':
if data == self.HOST or data == 'all':
printLock.acquire()
print 'sendDataThread thread (associated with', self.HOST,') ID:',id(self), 'shutting down now.'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'len of sendDataQueues', len(sendDataQueues)
printLock.release()
break
#When you receive an incoming connection, a sendDataThread is created even though you don't yet know what stream number the remote peer is interested in. They will tell you in a version message and if you too are interested in that stream then you will continue on with the connection and will set the streamNumber of this send data thread here:
elif command == 'setStreamNumber':
hostInMessage, specifiedStreamNumber = data
if hostInMessage == self.HOST:
printLock.acquire()
print 'setting the stream number in the sendData thread (ID:',id(self), ') to', specifiedStreamNumber
printLock.release()
self.streamNumber = specifiedStreamNumber
elif command == 'sendaddr':
try:
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time unless we have a long list of messages in our queue to send.
random.seed()
time.sleep(random.randrange(0, 10))
self.sock.sendall(data)
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.sendall failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
elif command == 'sendinv':
if data not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
payload = '\x01' + data
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time
random.seed()
time.sleep(random.randrange(0, 10))
try:
self.sock.sendall(headerData + payload)
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.sendall failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
elif command == 'pong':
if self.lastTimeISentData < (int(time.time()) - 298):
#Send out a pong message to keep the connection alive.
printLock.acquire()
print 'Sending pong to', self.HOST, 'to keep connection alive.'
printLock.release()
try:
self.sock.sendall('\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.send pong failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
else:
printLock.acquire()
print 'sendDataThread ID:',id(self),'ignoring command', command,'because it is not in stream',deststream
printLock.release()
#Wen you want to command a sendDataThread to do something, like shutdown or send some data, this function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are responsible for putting their queue into (and out of) the sendDataQueues list.
def broadcastToSendDataQueues(data):
#print 'running broadcastToSendDataQueues'
for q in sendDataQueues:
q.put((data))
def flushInventory():
#Note that the singleCleanerThread clears out the inventory dictionary from time to time, although it only clears things that have been in the dictionary for a long time. This clears the inventory dictionary Now.
sqlLock.acquire()
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, receivedTime = storedValue
t = (hash,objectType,streamNumber,payload,receivedTime)
sqlSubmitQueue.put('''INSERT INTO inventory VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
del inventory[hash]
sqlLock.release()
def isInSqlInventory(hash):
t = (hash,)
sqlLock.acquire()
sqlSubmitQueue.put('''select hash from inventory where hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
return False
else:
return True
def convertIntToString(n):
a = __builtins__.hex(n)
if a[-1:] == 'L':
a = a[:-1]
if (len(a) % 2) == 0:
return a[2:].decode('hex')
else:
return ('0'+a[2:]).decode('hex')
def convertStringToInt(s):
return int(s.encode('hex'), 16)
def decodeWalletImportFormat(WIFstring):
fullString = arithmetic.changebase(WIFstring,58,256)
privkey = fullString[:-4]
if fullString[-4:] != hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]:
sys.stderr.write('Major problem! When trying to decode one of your private keys, the checksum failed. Here is the PRIVATE key: %s\n' % str(WIFstring))
return ""
else:
#checksum passed
if privkey[0] == '\x80':
return privkey[1:]
else:
sys.stderr.write('Major problem! When trying to decode one of your private keys, the checksum passed but the key doesn\'t begin with hex 80. Here is the PRIVATE key: %s\n' % str(WIFstring))
return ""
def reloadMyAddressHashes():
printLock.acquire()
print 'reloading keys from keys.dat file'
printLock.release()
myRSAAddressHashes.clear()
myECAddressHashes.clear()
#myPrivateKeys.clear()
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
if isEnabled:
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 2:
privEncryptionKey = decodeWalletImportFormat(config.get(addressInKeysFile, 'privencryptionkey')).encode('hex') #returns a simple 32 bytes of information encoded in 64 Hex characters, or null if there was an error
if len(privEncryptionKey) == 64:#It is 32 bytes encoded as 64 hex characters
myECAddressHashes[hash] = highlevelcrypto.makeCryptor(privEncryptionKey)
elif addressVersionNumber == 1:
n = config.getint(addressInKeysFile, 'n')
e = config.getint(addressInKeysFile, 'e')
d = config.getint(addressInKeysFile, 'd')
p = config.getint(addressInKeysFile, 'p')
q = config.getint(addressInKeysFile, 'q')
myRSAAddressHashes[hash] = rsa.PrivateKey(n,e,d,p,q)
#This function expects that pubkey begin with \x04
def calculateBitcoinAddressFromPubkey(pubkey):
if len(pubkey)!= 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey),'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x00' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress,256,58)
return "1"*numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def calculateTestnetAddressFromPubkey(pubkey):
if len(pubkey)!= 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey),'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x6F' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress,256,58)
return "1"*numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
#This thread exists because SQLITE3 is so un-threadsafe that we must submit queries to it and it puts results back in a different queue. They won't let us just use locks.
class sqlThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
self.conn = sqlite3.connect(appdata + 'messages.dat' )
self.conn.text_factory = str
self.cur = self.conn.cursor()
try:
self.cur.execute( '''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text, received text, message text, folder text, UNIQUE(msgid) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, lastactiontime integer, status text, pubkeyretrynumber integer, msgretrynumber integer, folder text)''' )
self.cur.execute( '''CREATE TABLE subscriptions (label text, address text, enabled bool)''' )
self.cur.execute( '''CREATE TABLE addressbook (label text, address text)''' )
self.cur.execute( '''CREATE TABLE blacklist (label text, address text, enabled bool)''' )
self.cur.execute( '''CREATE TABLE whitelist (label text, address text, enabled bool)''' )
#Explanation of what is in the pubkeys table:
# The hash is the RIPEMD160 hash that is encoded in the Bitmessage address.
# If you or someone else did the POW for this pubkey, then havecorrectnonce will be true. If you received the pubkey in a msg message then havecorrectnonce will be false. You won't have the correct nonce and won't be able to send the message to peers if they request the pubkey.
# transmitdata is literally the data that was included in the Bitmessage pubkey message when it arrived, except for the 24 byte protocol header- ie, it starts with the POW nonce.
# time is the time that the pubkey was broadcast on the network same as with every other type of Bitmessage object.
# usedpersonally is set to "yes" if we have used the key personally. This keeps us from deleting it because we may want to reply to a message in the future. This field is not a bool because we may need more flexability in the future and it doesn't take up much more space anyway.
self.cur.execute( '''CREATE TABLE pubkeys (hash blob, havecorrectnonce bool, transmitdata blob, time blob, usedpersonally text, UNIQUE(hash, havecorrectnonce) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE knownnodes (timelastseen int, stream int, services blob, host blob, port blob, UNIQUE(host, stream, port) ON CONFLICT REPLACE)''' ) #This table isn't used in the program yet but I have a feeling that we'll need it.
self.cur.execute( '''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx',1)''')
self.conn.commit()
print 'Created messages database file'
except Exception, err:
if str(err) == 'table inbox already exists':
print 'Database file already exists.'
else:
sys.stderr.write('ERROR trying to create database file (message.dat). Error message: %s\n' % str(err))
sys.exit()
#People running earlier versions of PyBitmessage do not have the usedpersonally field in their pubkeys table. Let's add it.
if config.getint('bitmessagesettings','settingsversion') == 2:
item = '''ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' '''
parameters = ''
self.cur.execute(item, parameters)
self.conn.commit()
config.set('bitmessagesettings','settingsversion','3')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
try:
testpayload = '\x00\x00'
t = ('1234','True',testpayload,'12345678','no')
self.cur.execute( '''INSERT INTO pubkeys VALUES(?,?,?,?,?)''',t)
self.conn.commit()
self.cur.execute('''SELECT transmitdata FROM pubkeys WHERE hash='1234' ''')
queryreturn = self.cur.fetchall()
for row in queryreturn:
transmitdata, = row
self.cur.execute('''DELETE FROM pubkeys WHERE hash='1234' ''')
self.conn.commit()
if transmitdata == '':
sys.stderr.write('Problem: The version of SQLite you have cannot store Null values. Please download and install the latest revision of your version of Python (for example, the latest Python 2.7 revision) and try again.\n')
sys.stderr.write('PyBitmessage will now exist very abruptly. You may now see threading errors related to this abrupt exit but the problem you need to solve is related to SQLite.\n\n')
sys.exit()
except Exception, err:
print err
while True:
item = sqlSubmitQueue.get()
parameters = sqlSubmitQueue.get()
#print 'item', item
#print 'parameters', parameters
self.cur.execute(item, parameters)
sqlReturnQueue.put(self.cur.fetchall())
sqlSubmitQueue.task_done()
self.conn.commit()
'''The singleCleaner class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy.
It cleans these data structures in memory:
inventory (moves data to the on-disk sql database)
It cleans these tables on the disk:
inventory (clears data more than 2 days and 12 hours old)
pubkeys (clears pubkeys older than 4 weeks old which we have not used personally)
It resends messages when there has been no response:
resends getpubkey messages in two days (then 4 days, then 8 days, etc...)
resends msg messages in two days (then 4 days, then 8 days, etc...)
'''
class singleCleaner(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
timeWeLastClearedInventoryAndPubkeysTables = 0
while True:
time.sleep(300)
sqlLock.acquire()
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, receivedTime = storedValue
if int(time.time())- 3600 > receivedTime:
t = (hash,objectType,streamNumber,payload,receivedTime)
sqlSubmitQueue.put('''INSERT INTO inventory VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
del inventory[hash]
sqlLock.release()
broadcastToSendDataQueues((0, 'pong', 'no data')) #commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
#inventory (moves data from the inventory data structure to the on-disk sql database)
sqlLock.acquire()
#inventory (clears data more than 2 days and 12 hours old)
t = (int(time.time())-lengthOfTimeToLeaveObjectsInInventory,)
sqlSubmitQueue.put('''DELETE FROM inventory WHERE receivedtime<?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#pubkeys
t = (int(time.time())-lengthOfTimeToHoldOnToAllPubkeys,)
sqlSubmitQueue.put('''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
t = ()
sqlSubmitQueue.put('''select toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber FROM sent WHERE ((status='findingpubkey' OR status='sentmessage') AND folder='sent') ''') #If the message's folder='trash' then we'll ignore it.
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber = row
if status == 'findingpubkey':
if int(time.time()) - lastactiontime > (maximumAgeOfAnObjectThatIAmWillingToAccept * (2 ** (pubkeyretrynumber))):
print 'It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.'
try:
del neededPubkeys[toripe] #We need to take this entry out of the neededPubkeys structure because the workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
except:
pass
workerQueue.put(('sendmessage',toaddress))
t = (int(time.time()),pubkeyretrynumber+1,toripe)
sqlSubmitQueue.put('''UPDATE sent SET lastactiontime=?, pubkeyretrynumber=? WHERE toripe=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),toripe,'Public key requested again. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
else:# status == sentmessage
if int(time.time()) - lastactiontime > (maximumAgeOfAnObjectThatIAmWillingToAccept * (2 ** (msgretrynumber))):
print 'It has been a long time and we haven\'t heard an acknowledgement to our msg. Sending again.'
t = (int(time.time()),msgretrynumber+1,'findingpubkey',ackdata)
sqlSubmitQueue.put('''UPDATE sent SET lastactiontime=?, msgretrynumber=?, status=? WHERE ackdata=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Message sent again because the acknowledgement was never received. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
workerQueue.put(('sendmessage',toaddress))
sqlLock.release()
#Clear the status bar in case a message has been sitting there for a while.
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"")
#This thread, of which there is only one, does the heavy lifting: calculating POWs.
class singleWorker(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT toripe FROM sent WHERE (status=? AND folder='sent')''')
sqlSubmitQueue.put(('findingpubkey',))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toripe, = row
neededPubkeys[toripe] = 0
self.sendBroadcast() #just in case there are any proof of work tasks for Broadcasts that have yet to be sent.
#Now let us see if there are any proofs of work for msg messages that we have yet to complete..
sqlLock.acquire()
t = ('doingpow',)
sqlSubmitQueue.put('SELECT toripe FROM sent WHERE status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toripe, = row
self.sendMsg(toripe)
while True:
command, data = workerQueue.get()
#statusbar = 'The singleWorker thread is working on work.'
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
if command == 'sendmessage':
toAddress = data
toStatus,toAddressVersionNumber,toStreamNumber,toRipe = decodeAddress(toAddress)
#print 'message type', type(message)
#print repr(message.toUtf8())
#print str(message.toUtf8())
sqlLock.acquire()
sqlSubmitQueue.put('SELECT * FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
#print 'queryreturn', queryreturn
if queryreturn == []:
#We'll need to request the pub key because we don't have it.
if not toRipe in neededPubkeys:
neededPubkeys[toRipe] = 0
print 'requesting pubkey:', toRipe.encode('hex')
self.requestPubKey(toAddressVersionNumber,toStreamNumber,toRipe)
else:
print 'We have already requested this pubkey (the ripe hash is in neededPubkeys). We will re-request again soon.'
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),toRipe,'Public key was requested earlier. Receiver must be offline. Will retry.')
else:
print 'We already have the necessary public key.'
self.sendMsg(toRipe) #by calling this function, we are asserting that we already have the pubkey for toRipe
elif command == 'sendbroadcast':
print 'Within WorkerThread, processing sendbroadcast command.'
fromAddress,subject,message = data
self.sendBroadcast()
elif command == 'doPOWForMyV2Pubkey':
self.doPOWForMyV2Pubkey(data)
elif command == 'newpubkey':
toAddressVersion,toStreamNumber,toRipe = data
if toRipe in neededPubkeys:
print 'We have been awaiting the arrival of this pubkey.'
del neededPubkeys[toRipe]
self.sendMsg(toRipe)
else:
print 'We don\'t need this pub key. We didn\'t ask for it. Pubkey hash:', toRipe.encode('hex')
workerQueue.task_done()
def doPOWForMyV2Pubkey(self,myAddress): #This function also broadcasts out the pubkey message once it is done with the POW
status,addressVersionNumber,streamNumber,hash = decodeAddress(myAddress)
embeddedTime = int(time.time())+random.randrange(-300, 300) #the current time plus or minus five minutes
payload = pack('>I',(embeddedTime))
payload += encodeVarint(2) #Address version number
payload += encodeVarint(streamNumber)
payload += '\x00\x00\x00\x01' #bitfield of features supported by me (see the wiki).
try:
privSigningKeyBase58 = config.get(myAddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(myAddress, 'privencryptionkey')
except Exception, err:
printLock.acquire()
sys.stderr.write('Error within doPOWForMyV2Pubkey. Could not read the keys from the keys.dat file for a requested address. %s\n' % err)
printLock.release()
return
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload += pubSigningKey[1:]
payload += pubEncryptionKey[1:]
#Do the POW for this pubkey message
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
t = (hash,True,payload,embeddedTime,'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, streamNumber, payload, embeddedTime)
printLock.acquire()
print 'broadcasting inv with hash:', inventoryHash.encode('hex')
printLock.release()
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"")
def sendBroadcast(self):
sqlLock.acquire()
t = ('broadcastpending',)
sqlSubmitQueue.put('SELECT fromaddress, subject, message, ackdata FROM sent WHERE status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
fromaddress, subject, body, ackdata = row
status,addressVersionNumber,streamNumber,ripe = decodeAddress(fromaddress)
if addressVersionNumber == 2:
#We need to convert our private keys to public keys in order to include them.
privSigningKeyBase58 = config.get(fromaddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(fromaddress, 'privencryptionkey')
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex') #At this time these pubkeys are 65 bytes long because they include the encoding byte which we won't be sending in the broadcast message.
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload = pack('>I',(int(time.time())+random.randrange(-300, 300)))#the current time plus or minus five minutes
payload += encodeVarint(1) #broadcast version
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += '\x00\x00\x00\x01' #behavior bitfield
payload += pubSigningKey[1:]
payload += pubEncryptionKey[1:]
payload += ripe
payload += '\x02' #message encoding type
payload += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding.
payload += 'Subject:' + subject + '\n' + 'Body:' + body
signature = highlevelcrypto.sign(payload,privSigningKeyHex)
payload += encodeVarint(len(signature))
payload += signature
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For broadcast message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (within sendBroadcast function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Broadcast sent at '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
sqlLock.acquire()
t = ('broadcastsent',int(time.time()),fromaddress, subject, body,'broadcastpending')
sqlSubmitQueue.put('UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
elif addressVersionNumber == 1: #This whole section can be taken out soon because we aren't supporting v1 addresses for much longer.
messageToTransmit = '\x02' #message encoding type
messageToTransmit += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding.
messageToTransmit += 'Subject:' + subject + '\n' + 'Body:' + body
#We need the all the integers for our private key in order to sign our message, and we need our public key to send with the message.
n = config.getint(fromaddress, 'n')
e = config.getint(fromaddress, 'e')
d = config.getint(fromaddress, 'd')
p = config.getint(fromaddress, 'p')
q = config.getint(fromaddress, 'q')
nString = convertIntToString(n)
eString = convertIntToString(e)
#myPubkey = rsa.PublicKey(n,e)
myPrivatekey = rsa.PrivateKey(n,e,d,p,q)
#The payload of the broadcast message starts with a POW, but that will be added later.
payload = pack('>I',(int(time.time())))
payload += encodeVarint(1) #broadcast version
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += ripe
payload += encodeVarint(len(nString))
payload += nString
payload += encodeVarint(len(eString))
payload += eString
payload += messageToTransmit
signature = rsa.sign(messageToTransmit,myPrivatekey,'SHA-512')
#print 'signature', signature.encode('hex')
payload += signature
#print 'nString', repr(nString)
#print 'eString', repr(eString)
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For broadcast message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (within sendBroadcast function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Broadcast sent at '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
sqlLock.acquire()
t = ('broadcastsent',int(time.time()),fromaddress, subject, body,'broadcastpending')
sqlSubmitQueue.put('UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
else:
printLock.acquire()
print 'In the singleWorker thread, the sendBroadcast function doesn\'t understand the address version'
printLock.release()
def sendMsg(self,toRipe):
sqlLock.acquire()
t = ('doingpow','findingpubkey',toRipe)
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE status=? AND toripe=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
t = ('doingpow',toRipe)
sqlSubmitQueue.put('SELECT toaddress, fromaddress, subject, message, ackdata FROM sent WHERE status=? AND toripe=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toaddress, fromaddress, subject, message, ackdata = row
ackdataForWhichImWatching[ackdata] = 0
toStatus,toAddressVersionNumber,toStreamNumber,toHash = decodeAddress(toaddress)
fromStatus,fromAddressVersionNumber,fromStreamNumber,fromHash = decodeAddress(fromaddress)
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Doing work necessary to send the message.')
printLock.acquire()
print 'Found a message in our database that needs to be sent with this pubkey.'
print 'First 150 characters of message:', message[:150]
printLock.release()
embeddedTime = pack('>I',(int(time.time())+random.randrange(-300, 300)))#the current time plus or minus five minutes. We will use this time both for our message and for the ackdata packed within our message.
if fromAddressVersionNumber == 2:
payload = '\x01' #Message version.
payload += encodeVarint(fromAddressVersionNumber)
payload += encodeVarint(fromStreamNumber)
payload += '\x00\x00\x00\x01' #Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
#We need to convert our private keys to public keys in order to include them.
privSigningKeyBase58 = config.get(fromaddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(fromaddress, 'privencryptionkey')
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload += pubSigningKey[1:] #The \x04 on the beginning of the public keys are not sent. This way there is only one acceptable way to encode and send a public key.
payload += pubEncryptionKey[1:]
payload += toHash #This hash will be checked by the receiver of the message to verify that toHash belongs to them. This prevents a Surreptitious Forwarding Attack.
payload += '\x02' #Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
messageToTransmit = 'Subject:' + subject + '\n' + 'Body:' + message
payload += encodeVarint(len(messageToTransmit))
payload += messageToTransmit
fullAckPayload = self.generateFullAckMessage(ackdata,toStreamNumber,embeddedTime)#The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
payload += encodeVarint(len(fullAckPayload))
payload += fullAckPayload
signature = highlevelcrypto.sign(payload,privSigningKeyHex)
payload += encodeVarint(len(signature))
payload += signature
elif fromAddressVersionNumber == 1: #This code is for old version 1 (RSA) addresses. It will soon be removed.
payload = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' #this run of nulls allows the true message receiver to identify his message
payload += '\x01' #Message version.
payload += '\x00\x00\x00\x01'
payload += encodeVarint(fromAddressVersionNumber)
payload += encodeVarint(fromStreamNumber)
try:
sendersN = convertIntToString(config.getint(fromaddress, 'n'))
except:
printLock.acquire()
print 'Error: Could not find', fromaddress, 'in our keys.dat file. You must have deleted it. Aborting the send.'
printLock.release()
return
payload += encodeVarint(len(sendersN))
payload += sendersN
sendersE = convertIntToString(config.getint(fromaddress, 'e'))
payload += encodeVarint(len(sendersE))
payload += sendersE
payload += '\x02' #Type 2 is simple UTF-8 message encoding.
messageToTransmit = 'Subject:' + subject + '\n' + 'Body:' + message
payload += encodeVarint(len(messageToTransmit))
payload += messageToTransmit
#Later, if anyone impliments clients that don't send the ack_data, then we should probably check here to make sure that the receiver will make use of this ack_data and not attach it if not.
fullAckPayload = self.generateFullAckMessage(ackdata,toStreamNumber,embeddedTime)
payload += encodeVarint(len(fullAckPayload))
payload += fullAckPayload
sendersPrivKey = rsa.PrivateKey(config.getint(fromaddress, 'n'),config.getint(fromaddress, 'e'),config.getint(fromaddress, 'd'),config.getint(fromaddress, 'p'),config.getint(fromaddress, 'q'))
payload += rsa.sign(payload,sendersPrivKey,'SHA-512')
#We have assembled the data that will be encrypted. Now let us fetch the recipient's public key out of our database and do the encryption.
if toAddressVersionNumber == 2:
sqlLock.acquire()
sqlSubmitQueue.put('SELECT transmitdata FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
pubkeyPayload, = row
#The pubkey is stored the way we originally received it which means that we need to read beyond things like the nonce and time to get to the public keys.
readPosition = 8 #to bypass the nonce
readPosition += 4 #to bypass the embedded time
readPosition += 1 #to bypass the address version whose length is definitely 1
streamNumber, streamNumberLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += streamNumberLength
behaviorBitfield = pubkeyPayload[readPosition:readPosition+4]
readPosition += 4 #to bypass the bitfield of behaviors
#pubSigningKeyBase256 = pubkeyPayload[readPosition:readPosition+64] #We don't use this key for anything here.
readPosition += 64
pubEncryptionKeyBase256 = pubkeyPayload[readPosition:readPosition+64]
readPosition += 64
encrypted = highlevelcrypto.encrypt(payload,"04"+pubEncryptionKeyBase256.encode('hex'))
elif toAddressVersionNumber == 1:
sqlLock.acquire()
sqlSubmitQueue.put('SELECT transmitdata FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
pubkeyPayload, = row
readPosition = 8 #to bypass the nonce
behaviorBitfield = pubkeyPayload[8:12]
readPosition += 4 #to bypass the bitfield of behaviors
addressVersion, addressVersionLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += addressVersionLength
streamNumber, streamNumberLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += streamNumberLength
nLength, nLengthLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += nLengthLength
n = convertStringToInt(pubkeyPayload[readPosition:readPosition+nLength])
readPosition += nLength
eLength, eLengthLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += eLengthLength
e = convertStringToInt(pubkeyPayload[readPosition:readPosition+eLength])
receiversPubkey = rsa.PublicKey(n,e)
infile = cStringIO.StringIO(payload)
outfile = cStringIO.StringIO()
#print 'Encrypting using public key:', receiversPubkey
encrypt_bigfile(infile,outfile,receiversPubkey)
encrypted = outfile.getvalue()
infile.close()
outfile.close()
nonce = 0
trialValue = 99999999999999999999
encodedStreamNumber = encodeVarint(toStreamNumber)
#We are now dropping the unencrypted data in payload since it has already been encrypted and replacing it with the encrypted payload that we will send out.
payload = embeddedTime + encodedStreamNumber + encrypted
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For msg message) Doing proof of work. Target:', target
powStartTime = time.time()
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For msg message) Found proof of work', trialValue, 'Nonce:', nonce
print 'POW took', int(time.time()-powStartTime), 'seconds.', nonce/(time.time()-powStartTime), 'nonce trials per second.'
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'msg'
inventory[inventoryHash] = (objectType, toStreamNumber, payload, int(time.time()))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Message sent. Waiting on acknowledgement. Sent on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
print 'sending inv (within sendmsg function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
#Update the status of the message in the 'sent' table to have a 'sent' status
sqlLock.acquire()
t = ('sentmessage',toaddress, fromaddress, subject, message,'doingpow')
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE toaddress=? AND fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
t = (toRipe,)
sqlSubmitQueue.put('''UPDATE pubkeys SET usedpersonally='yes' WHERE hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
def requestPubKey(self,addressVersionNumber,streamNumber,ripe):
payload = pack('>I',int(time.time()))
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += ripe
printLock.acquire()
print 'making request for pubkey with ripe:', ripe.encode('hex')
printLock.release()
nonce = 0
trialValue = 99999999999999999999
#print 'trial value', trialValue
statusbar = 'Doing the computations necessary to request the recipient\'s public key.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),ripe,'Doing work necessary to request public key.')
print 'Doing proof-of-work necessary to send getpubkey message.'
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
printLock.acquire()
print 'Found proof of work', trialValue, 'Nonce:', nonce
printLock.release()
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'getpubkey'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (for the getpubkey message)'
#payload = '\x01' + pack('>H',objectType) + hash
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Broacasting the public key request. This program will auto-retry if they are offline.')
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),ripe,'Sending public key request. Waiting for reply. Requested at ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
def generateFullAckMessage(self,ackdata,toStreamNumber,embeddedTime):
nonce = 0
trialValue = 99999999999999999999
encodedStreamNumber = encodeVarint(toStreamNumber)
payload = embeddedTime + encodedStreamNumber + ackdata
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
printLock.acquire()
print '(For ack message) Doing proof of work...'
printLock.release()
powStartTime = time.time()
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
printLock.acquire()
print '(For ack message) Found proof of work', trialValue, 'Nonce:', nonce
print 'POW took', int(time.time()-powStartTime), 'seconds.', nonce/(time.time()-powStartTime), 'nonce trials per second.'
printLock.release()
payload = pack('>Q',nonce) + payload
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
return headerData + payload
class addressGenerator(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def setup(self,addressVersionNumber,streamNumber,label="(no label)",numberOfAddressesToMake=1,deterministicPassphrase="",eighteenByteRipe=False):
self.addressVersionNumber = addressVersionNumber
self.streamNumber = streamNumber
self.label = label
self.numberOfAddressesToMake = numberOfAddressesToMake
self.deterministicPassphrase = deterministicPassphrase
self.eighteenByteRipe = eighteenByteRipe
def run(self):
if self.addressVersionNumber == 2:
if self.deterministicPassphrase == "":
statusbar = 'Generating one new address'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that starts with either \x00 or \x00\x00. Then when we pack them into a Bitmessage address,
#we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time.time()
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
potentialPrivSigningKey = OpenSSL.rand(32)
potentialPubSigningKey = self.pointMult(potentialPrivSigningKey)
while True:
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
potentialPrivEncryptionKey = OpenSSL.rand(32)
potentialPubEncryptionKey = self.pointMult(potentialPrivEncryptionKey)
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(potentialPubSigningKey+potentialPubEncryptionKey)
ripe.update(sha.digest())
#print 'potential ripe.digest', ripe.digest().encode('hex')
if self.eighteenByteRipe:
if ripe.digest()[:2] == '\x00\x00':
break
else:
if ripe.digest()[:1] == '\x00':
break
print 'Generated address with ripe digest:', ripe.digest().encode('hex')
print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix/(time.time()-startTime),'addresses per second before finding one with the correct ripe-prefix.'
if ripe.digest()[:2] == '\x00\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[2:])
elif ripe.digest()[:1] == '\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[1:])
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = '\x80'+potentialPrivSigningKey
checksum = hashlib.sha256(hashlib.sha256(privSigningKey).digest()).digest()[0:4]
privSigningKeyWIF = arithmetic.changebase(privSigningKey + checksum,256,58)
#print 'privSigningKeyWIF',privSigningKeyWIF
privEncryptionKey = '\x80'+potentialPrivEncryptionKey
checksum = hashlib.sha256(hashlib.sha256(privEncryptionKey).digest()).digest()[0:4]
privEncryptionKeyWIF = arithmetic.changebase(privEncryptionKey + checksum,256,58)
#print 'privEncryptionKeyWIF',privEncryptionKeyWIF
config.add_section(address)
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'privSigningKey',privSigningKeyWIF)
config.set(address,'privEncryptionKey',privEncryptionKeyWIF)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address. Doing work necessary to broadcast it...')
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
reloadMyAddressHashes()
workerQueue.put(('doPOWForMyV2Pubkey',address))
else: #There is something in the deterministicPassphrase variable thus we are going to do this deterministically.
statusbar = 'Generating '+str(self.numberOfAddressesToMake) + ' new addresses.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
signingKeyNonce = 0
encryptionKeyNonce = 1
for i in range(self.numberOfAddressesToMake):
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that has a RIPEMD hash that starts with either \x00 or \x00\x00. Then when we pack them
#into a Bitmessage address, we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time.time()
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
while True:
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
potentialPrivSigningKey = hashlib.sha512(self.deterministicPassphrase + encodeVarint(signingKeyNonce)).digest()[:32]
potentialPrivEncryptionKey = hashlib.sha512(self.deterministicPassphrase + encodeVarint(encryptionKeyNonce)).digest()[:32]
potentialPubSigningKey = self.pointMult(potentialPrivSigningKey)
potentialPubEncryptionKey = self.pointMult(potentialPrivEncryptionKey)
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
signingKeyNonce += 2
encryptionKeyNonce += 2
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(potentialPubSigningKey+potentialPubEncryptionKey)
ripe.update(sha.digest())
#print 'potential ripe.digest', ripe.digest().encode('hex')
if self.eighteenByteRipe:
if ripe.digest()[:2] == '\x00\x00':
break
else:
if ripe.digest()[:1] == '\x00':
break
print 'ripe.digest', ripe.digest().encode('hex')
print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix/(time.time()-startTime),'keys per second.'
if ripe.digest()[:2] == '\x00\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[2:])
elif ripe.digest()[:1] == '\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[1:])
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = '\x80'+potentialPrivSigningKey
checksum = hashlib.sha256(hashlib.sha256(privSigningKey).digest()).digest()[0:4]
privSigningKeyWIF = arithmetic.changebase(privSigningKey + checksum,256,58)
privEncryptionKey = '\x80'+potentialPrivEncryptionKey
checksum = hashlib.sha256(hashlib.sha256(privEncryptionKey).digest()).digest()[0:4]
privEncryptionKeyWIF = arithmetic.changebase(privEncryptionKey + checksum,256,58)
try:
config.add_section(address)
print 'self.label', self.label
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'privSigningKey',privSigningKeyWIF)
config.set(address,'privEncryptionKey',privEncryptionKeyWIF)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
except:
print address,'already exists. Not adding it again.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address')
reloadMyAddressHashes()
#This code which deals with old RSA addresses will soon be removed.
elif self.addressVersionNumber == 1:
statusbar = 'Generating new ' + str(config.getint('bitmessagesettings', 'bitstrength')) + ' bit RSA key. This takes a minute on average. If you want to generate multiple addresses now, you can; they will queue.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
(pubkey, privkey) = rsa.newkeys(config.getint('bitmessagesettings', 'bitstrength'))
print privkey['n']
print privkey['e']
print privkey['d']
print privkey['p']
print privkey['q']
sha = hashlib.new('sha512')
#sha.update(str(pubkey.n)+str(pubkey.e))
sha.update(convertIntToString(pubkey.n)+convertIntToString(pubkey.e))
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
address = encodeAddress(1,self.streamNumber,ripe.digest())
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
config.add_section(address)
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'n',str(privkey['n']))
config.set(address,'e',str(privkey['e']))
config.set(address,'d',str(privkey['d']))
config.set(address,'p',str(privkey['p']))
config.set(address,'q',str(privkey['q']))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address')
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
reloadMyAddressHashes()
#Does an EC point multiplication; turns a private key into a public key.
def pointMult(self,secret):
#ctx = OpenSSL.BN_CTX_new() #This value proved to cause Seg Faults on Linux. It turns out that it really didn't speed up EC_POINT_mul anyway.
k = OpenSSL.EC_KEY_new_by_curve_name(OpenSSL.get_curve('secp256k1'))
priv_key = OpenSSL.BN_bin2bn(secret, 32, 0)
group = OpenSSL.EC_KEY_get0_group(k)
pub_key = OpenSSL.EC_POINT_new(group)
OpenSSL.EC_POINT_mul(group, pub_key, priv_key, None, None, None)
OpenSSL.EC_KEY_set_private_key(k, priv_key)
OpenSSL.EC_KEY_set_public_key(k, pub_key)
#print 'priv_key',priv_key
#print 'pub_key',pub_key
size = OpenSSL.i2o_ECPublicKey(k, 0)
mb = ctypes.create_string_buffer(size)
OpenSSL.i2o_ECPublicKey(k, ctypes.byref(ctypes.pointer(mb)))
#print 'mb.raw', mb.raw.encode('hex'), 'length:', len(mb.raw)
#print 'mb.raw', mb.raw, 'length:', len(mb.raw)
OpenSSL.EC_POINT_free(pub_key)
#OpenSSL.BN_CTX_free(ctx)
OpenSSL.BN_free(priv_key)
OpenSSL.EC_KEY_free(k)
return mb.raw
class iconGlossaryDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_iconGlossaryDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelPortNumber.setText('You are using TCP port ' + str(config.getint('bitmessagesettings', 'port')) + '. (This can be changed in the settings).')
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class helpDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_helpDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelHelpURI.setOpenExternalLinks(True)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class aboutDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_aboutDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelVersion.setText('version ' + softwareVersion)
class regenerateAddressesDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_regenerateAddressesDialog()
self.ui.setupUi(self)
self.parent = parent
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class settingsDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_settingsDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.checkBoxStartOnLogon.setChecked(config.getboolean('bitmessagesettings', 'startonlogon'))
self.ui.checkBoxMinimizeToTray.setChecked(config.getboolean('bitmessagesettings', 'minimizetotray'))
self.ui.checkBoxShowTrayNotifications.setChecked(config.getboolean('bitmessagesettings', 'showtraynotifications'))
self.ui.checkBoxStartInTray.setChecked(config.getboolean('bitmessagesettings', 'startintray'))
if 'darwin' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxShowTrayNotifications.setDisabled(True)
self.ui.checkBoxStartInTray.setDisabled(True)
self.ui.labelSettingsNote.setText('Options have been disabled because they either arn\'t applicable or because they haven\'t yet been implimented for your operating system.')
elif 'linux' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxStartInTray.setDisabled(True)
self.ui.labelSettingsNote.setText('Options have been disabled because they either arn\'t applicable or because they haven\'t yet been implimented for your operating system.')
#On the Network settings tab:
self.ui.lineEditTCPPort.setText(str(config.get('bitmessagesettings', 'port')))
self.ui.checkBoxAuthentication.setChecked(config.getboolean('bitmessagesettings', 'socksauthentication'))
if str(config.get('bitmessagesettings', 'socksproxytype')) == 'none':
self.ui.comboBoxProxyType.setCurrentIndex(0)
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
elif str(config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS4a':
self.ui.comboBoxProxyType.setCurrentIndex(1)
self.ui.lineEditTCPPort.setEnabled(False)
elif str(config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS5':
self.ui.comboBoxProxyType.setCurrentIndex(2)
self.ui.lineEditTCPPort.setEnabled(False)
self.ui.lineEditSocksHostname.setText(str(config.get('bitmessagesettings', 'sockshostname')))
self.ui.lineEditSocksPort.setText(str(config.get('bitmessagesettings', 'socksport')))
self.ui.lineEditSocksUsername.setText(str(config.get('bitmessagesettings', 'socksusername')))
self.ui.lineEditSocksPassword.setText(str(config.get('bitmessagesettings', 'sockspassword')))
QtCore.QObject.connect(self.ui.comboBoxProxyType, QtCore.SIGNAL("currentIndexChanged(int)"), self.comboBoxProxyTypeChanged)
def comboBoxProxyTypeChanged(self,comboBoxIndex):
if comboBoxIndex == 0:
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
self.ui.lineEditTCPPort.setEnabled(True)
elif comboBoxIndex == 1 or comboBoxIndex == 2:
self.ui.lineEditSocksHostname.setEnabled(True)
self.ui.lineEditSocksPort.setEnabled(True)
self.ui.checkBoxAuthentication.setEnabled(True)
if self.ui.checkBoxAuthentication.isChecked():
self.ui.lineEditSocksUsername.setEnabled(True)
self.ui.lineEditSocksPassword.setEnabled(True)
self.ui.lineEditTCPPort.setEnabled(False)
class NewSubscriptionDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewSubscriptionDialog() #Jonathan changed this line
self.ui.setupUi(self) #Jonathan left this line alone
self.parent = parent
QtCore.QObject.connect(self.ui.lineEditSubscriptionAddress, QtCore.SIGNAL("textChanged(QString)"), self.subscriptionAddressChanged)
def subscriptionAddressChanged(self,QString):
status,a,b,c = decodeAddress(str(QString))
if status == 'missingbm':
self.ui.labelSubscriptionAddressCheck.setText('The address should start with ''BM-''')
elif status == 'checksumfailed':
self.ui.labelSubscriptionAddressCheck.setText('The address is not typed or copied correctly (the checksum failed).')
elif status == 'versiontoohigh':
self.ui.labelSubscriptionAddressCheck.setText('The version number of this address is higher than this software can support. Please upgrade Bitmessage.')
elif status == 'invalidcharacters':
self.ui.labelSubscriptionAddressCheck.setText('The address contains invalid characters.')
elif status == 'success':
self.ui.labelSubscriptionAddressCheck.setText('Address is valid.')
class NewAddressDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewAddressDialog()
self.ui.setupUi(self)
self.parent = parent
row = 1
#Let's fill out the 'existing address' combo box with addresses from the 'Your Identities' tab.
while self.parent.ui.tableWidgetYourIdentities.item(row-1,1):
self.ui.radioButtonExisting.click()
#print self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text()
self.ui.comboBoxExisting.addItem(self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text())
row += 1
self.ui.groupBoxDeterministic.setHidden(True)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class MyForm(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#Ask the user if we may delete their old version 1 addresses if they have any.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 1:
displayMsg = "One of your addresses, "+addressInKeysFile+", is an old version 1 address. Version 1 addresses are no longer supported. May we delete it now?"
reply = QtGui.QMessageBox.question(self, 'Message',displayMsg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
config.remove_section(addressInKeysFile)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#Configure Bitmessage to start on startup (or remove the configuration) based on the setting in the keys.dat file
if 'win32' in sys.platform or 'win64' in sys.platform:
#Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
self.settings.remove("PyBitmessage") #In case the user moves the program and the registry entry is no longer valid, this will delete the old registry entry.
if config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage",sys.argv[0])
elif 'darwin' in sys.platform:
#startup for mac
pass
elif 'linux' in sys.platform:
#startup for linux
pass
self.trayIcon = QtGui.QSystemTrayIcon(self)
self.trayIcon.setIcon( QtGui.QIcon(':/newPrefix/images/can-icon-16px.png') )
traySignal = "activated(QSystemTrayIcon::ActivationReason)"
QtCore.QObject.connect(self.trayIcon, QtCore.SIGNAL(traySignal), self.__icon_activated)
menu = QtGui.QMenu()
self.exitAction = menu.addAction("Exit", self.close)
self.trayIcon.setContextMenu(menu)
#I'm currently under the impression that Mac users have different expectations for the tray icon. They don't necessairly expect it to open the main window when clicked and they still expect a program showing a tray icon to also be in the dock.
if 'darwin' in sys.platform:
self.trayIcon.show()
#FILE MENU and other buttons
QtCore.QObject.connect(self.ui.actionExit, QtCore.SIGNAL("triggered()"), self.close)
QtCore.QObject.connect(self.ui.actionManageKeys, QtCore.SIGNAL("triggered()"), self.click_actionManageKeys)
QtCore.QObject.connect(self.ui.actionRegenerateDeterministicAddresses, QtCore.SIGNAL("triggered()"), self.click_actionRegenerateDeterministicAddresses)
QtCore.QObject.connect(self.ui.actionManageKeys, QtCore.SIGNAL("triggered()"), self.click_actionManageKeys)
QtCore.QObject.connect(self.ui.pushButtonNewAddress, QtCore.SIGNAL("clicked()"), self.click_NewAddressDialog)
QtCore.QObject.connect(self.ui.comboBoxSendFrom, QtCore.SIGNAL("activated(int)"),self.redrawLabelFrom)
QtCore.QObject.connect(self.ui.pushButtonAddAddressBook, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddAddressBook)
QtCore.QObject.connect(self.ui.pushButtonAddSubscription, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddSubscription)
QtCore.QObject.connect(self.ui.pushButtonAddBlacklist, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddBlacklist)
QtCore.QObject.connect(self.ui.pushButtonSend, QtCore.SIGNAL("clicked()"), self.click_pushButtonSend)
QtCore.QObject.connect(self.ui.pushButtonLoadFromAddressBook, QtCore.SIGNAL("clicked()"), self.click_pushButtonLoadFromAddressBook)
QtCore.QObject.connect(self.ui.radioButtonBlacklist, QtCore.SIGNAL("clicked()"), self.click_radioButtonBlacklist)
QtCore.QObject.connect(self.ui.radioButtonWhitelist, QtCore.SIGNAL("clicked()"), self.click_radioButtonWhitelist)
QtCore.QObject.connect(self.ui.pushButtonStatusIcon, QtCore.SIGNAL("clicked()"), self.click_pushButtonStatusIcon)
QtCore.QObject.connect(self.ui.actionSettings, QtCore.SIGNAL("triggered()"), self.click_actionSettings)
QtCore.QObject.connect(self.ui.actionAbout, QtCore.SIGNAL("triggered()"), self.click_actionAbout)
QtCore.QObject.connect(self.ui.actionHelp, QtCore.SIGNAL("triggered()"), self.click_actionHelp)
#Popup menu for the Inbox tab
self.ui.inboxContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionReply = self.ui.inboxContextMenuToolbar.addAction("Reply", self.on_action_InboxReply)
self.actionAddSenderToAddressBook = self.ui.inboxContextMenuToolbar.addAction("Add sender to your Address Book", self.on_action_InboxAddSenderToAddressBook)
self.actionTrashInboxMessage = self.ui.inboxContextMenuToolbar.addAction("Move to Trash", self.on_action_InboxTrash)
self.ui.tableWidgetInbox.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuInbox)
self.popMenuInbox = QtGui.QMenu( self )
self.popMenuInbox.addAction( self.actionReply )
self.popMenuInbox.addAction( self.actionAddSenderToAddressBook )
self.popMenuInbox.addSeparator()
self.popMenuInbox.addAction( self.actionTrashInboxMessage )
#Popup menu for the Your Identities tab
self.ui.addressContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionNew = self.ui.addressContextMenuToolbar.addAction("New", self.on_action_YourIdentitiesNew)
self.actionEnable = self.ui.addressContextMenuToolbar.addAction("Enable", self.on_action_YourIdentitiesEnable)
self.actionDisable = self.ui.addressContextMenuToolbar.addAction("Disable", self.on_action_YourIdentitiesDisable)
self.actionClipboard = self.ui.addressContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_YourIdentitiesClipboard)
self.ui.tableWidgetYourIdentities.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuYourIdentities)
self.popMenu = QtGui.QMenu( self )
self.popMenu.addAction( self.actionNew )
self.popMenu.addSeparator()
self.popMenu.addAction( self.actionClipboard )
self.popMenu.addSeparator()
self.popMenu.addAction( self.actionEnable )
self.popMenu.addAction( self.actionDisable )
#Popup menu for the Address Book page
self.ui.addressBookContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionAddressBookNew = self.ui.addressBookContextMenuToolbar.addAction("New", self.on_action_AddressBookNew)
self.actionAddressBookDelete = self.ui.addressBookContextMenuToolbar.addAction("Delete", self.on_action_AddressBookDelete)
self.actionAddressBookClipboard = self.ui.addressBookContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_AddressBookClipboard)
self.actionAddressBookSend = self.ui.addressBookContextMenuToolbar.addAction("Send message to this address", self.on_action_AddressBookSend)
self.ui.tableWidgetAddressBook.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuAddressBook)
self.popMenuAddressBook = QtGui.QMenu( self )
self.popMenuAddressBook.addAction( self.actionAddressBookNew )
self.popMenuAddressBook.addAction( self.actionAddressBookDelete )
self.popMenuAddressBook.addSeparator()
self.popMenuAddressBook.addAction( self.actionAddressBookSend )
self.popMenuAddressBook.addAction( self.actionAddressBookClipboard )
#Popup menu for the Subscriptions page
self.ui.subscriptionsContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionsubscriptionsNew = self.ui.subscriptionsContextMenuToolbar.addAction("New", self.on_action_SubscriptionsNew)
self.actionsubscriptionsDelete = self.ui.subscriptionsContextMenuToolbar.addAction("Delete", self.on_action_SubscriptionsDelete)
self.actionsubscriptionsClipboard = self.ui.subscriptionsContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_SubscriptionsClipboard)
self.ui.tableWidgetSubscriptions.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuSubscriptions)
self.popMenuSubscriptions = QtGui.QMenu( self )
self.popMenuSubscriptions.addAction( self.actionsubscriptionsNew )
self.popMenuSubscriptions.addAction( self.actionsubscriptionsDelete )
self.popMenuSubscriptions.addSeparator()
self.popMenuSubscriptions.addAction( self.actionsubscriptionsClipboard )
#Popup menu for the Sent page
self.ui.sentContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionTrashSentMessage = self.ui.sentContextMenuToolbar.addAction("Move to Trash", self.on_action_SentTrash)
self.ui.tableWidgetSent.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetSent, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuSent)
self.popMenuSent = QtGui.QMenu( self )
self.popMenuSent.addAction( self.actionTrashSentMessage )
#Initialize the user's list of addresses on the 'Your Identities' tab.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
newItem = QtGui.QTableWidgetItem(unicode(config.get(addressInKeysFile, 'label'),'utf-8)'))
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.insertRow(0)
self.ui.tableWidgetYourIdentities.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(addressInKeysFile)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(str(addressStream(addressInKeysFile)))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
if isEnabled:
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
self.sqlLookup = sqlThread()
self.sqlLookup.start()
reloadMyAddressHashes()
self.reloadBroadcastSendersForWhichImWatching()
#Load inbox from messages database file
sqlSubmitQueue.put('''SELECT msgid, toaddress, fromaddress, subject, received, message FROM inbox where folder='inbox' ORDER BY received''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, = row
try:
if toAddress == '[Broadcast subscribers]':
toLabel = '[Broadcast subscribers]'
else:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
fromLabel = ''
t = (fromAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetInbox.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetInbox.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetInbox.setItem(0,2,newItem)
newItem = myTableWidgetItem(strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(received))))
newItem.setData(Qt.UserRole,QByteArray(msgid))
newItem.setData(33,int(received))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetInbox.setItem(0,3,newItem)
#self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(0,2).data(Qt.UserRole).toPyObject())
#Load Sent items from database
sqlSubmitQueue.put('''SELECT toaddress, fromaddress, subject, message, status, ackdata, lastactiontime FROM sent where folder = 'sent' ORDER BY lastactiontime''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
toAddress, fromAddress, subject, message, status, ackdata, lastactiontime = row
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = ''
t = (toAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,2,newItem)
if status == 'findingpubkey':
newItem = myTableWidgetItem('Waiting on their public key. Will request it again soon.')
elif status == 'sentmessage':
newItem = myTableWidgetItem('Message sent. Waiting on acknowledgement. Sent at ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(lastactiontime)))
elif status == 'doingpow':
newItem = myTableWidgetItem('Need to do work to send message. Work is queued.')
elif status == 'ackreceived':
newItem = myTableWidgetItem('Acknowledgement of the message received ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
elif status == 'broadcastpending':
newItem = myTableWidgetItem('Doing the work necessary to send broadcast...')
elif status == 'broadcastsent':
newItem = myTableWidgetItem('Broadcast on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
else:
newItem = myTableWidgetItem('Unknown status. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(lastactiontime))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,3,newItem)
#Initialize the address book
sqlSubmitQueue.put('SELECT * FROM addressbook')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
#Initialize the Subscriptions
sqlSubmitQueue.put('SELECT label, address FROM subscriptions')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
#Initialize the Blacklist or Whitelist
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
self.loadBlackWhiteList()
else:
self.ui.tabWidget.setTabText(6,'Whitelist')
self.ui.radioButtonWhitelist.click()
self.loadBlackWhiteList()
#Initialize the ackdataForWhichImWatching data structure using data from the sql database.
sqlSubmitQueue.put('''SELECT ackdata FROM sent where (status='sentmessage' OR status='doingpow')''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
ackdata, = row
print 'Watching for ackdata', ackdata.encode('hex')
ackdataForWhichImWatching[ackdata] = 0
QtCore.QObject.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetYourIdentitiesItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetAddressBookItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetSubscriptionsItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL("itemSelectionChanged ()"), self.tableWidgetInboxItemClicked)
QtCore.QObject.connect(self.ui.tableWidgetSent, QtCore.SIGNAL("itemSelectionChanged ()"), self.tableWidgetSentItemClicked)
#Put the colored icon on the status bar
#self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
self.statusbar = self.statusBar()
self.statusbar.insertPermanentWidget(0,self.ui.pushButtonStatusIcon)
self.ui.labelStartupTime.setText('Since startup on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
self.numberOfMessagesProcessed = 0
self.numberOfBroadcastsProcessed = 0
self.numberOfPubkeysProcessed = 0
#Below this point, it would be good if all of the necessary global data structures were initialized.
self.rerenderComboBoxSendFrom()
self.listOfOutgoingSynSenderThreads = [] #if we don't maintain this list, the threads will get garbage-collected.
self.connectToStream(1)
self.singleListenerThread = singleListener()
self.singleListenerThread.start()
QtCore.QObject.connect(self.singleListenerThread, QtCore.SIGNAL("passObjectThrough(PyQt_PyObject)"), self.connectObjectToSignals)
self.singleCleanerThread = singleCleaner()
self.singleCleanerThread.start()
QtCore.QObject.connect(self.singleCleanerThread, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.singleCleanerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.workerThread = singleWorker()
self.workerThread.start()
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
def click_actionManageKeys(self):
if 'darwin' in sys.platform or 'linux' in sys.platform:
reply = QtGui.QMessageBox.information(self, 'keys.dat?','You may manage your keys by editing the keys.dat file stored in\n' + appdata + '\nIt is important that you back up this file.', QMessageBox.Ok)
elif sys.platform == 'win32' or sys.platform == 'win64':
reply = QtGui.QMessageBox.question(self, 'Open keys.dat?','You may manage your keys by editing the keys.dat file stored in\n' + appdata + '\nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.openKeysFile()
else:
pass
def click_actionRegenerateDeterministicAddresses(self):
self.regenerateAddressesDialogInstance = regenerateAddressesDialog(self)
if self.regenerateAddressesDialogInstance.exec_():
if self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, "bad passphrase", "You must type your passphrase. If you don\'t have one then this is not the form for you.")
else:
streamNumberForAddress = int(self.regenerateAddressesDialogInstance.ui.lineEditStreamNumber.text())
addressVersionNumber = int(self.regenerateAddressesDialogInstance.ui.lineEditAddressVersionNumber.text())
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(addressVersionNumber,streamNumberForAddress,"unused address",self.regenerateAddressesDialogInstance.ui.spinBoxNumberOfAddressesToMake.value(),self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text().toUtf8(),self.regenerateAddressesDialogInstance.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
self.ui.tabWidget.setCurrentIndex(3)
def openKeysFile(self):
if 'linux' in sys.platform:
subprocess.call(["xdg-open", file])
else:
os.startfile(appdata + '\\keys.dat')
def changeEvent(self, event):
if config.getboolean('bitmessagesettings', 'minimizetotray') and not 'darwin' in sys.platform:
if event.type() == QtCore.QEvent.WindowStateChange:
if self.windowState() & QtCore.Qt.WindowMinimized:
self.hide()
self.trayIcon.show()
#self.hidden = True
if 'win32' in sys.platform or 'win64' in sys.platform:
self.setWindowFlags(Qt.ToolTip)
elif event.oldState() & QtCore.Qt.WindowMinimized:
#The window state has just been changed to Normal/Maximised/FullScreen
pass
#QtGui.QWidget.changeEvent(self, event)
def __icon_activated(self, reason):
if reason == QtGui.QSystemTrayIcon.Trigger:
if 'linux' in sys.platform:
self.trayIcon.hide()
self.setWindowFlags(Qt.Window)
self.show()
elif 'win32' in sys.platform or 'win64' in sys.platform:
self.trayIcon.hide()
self.setWindowFlags(Qt.Window)
self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
elif 'darwin' in sys.platform:
#self.trayIcon.hide() #this line causes a segmentation fault
#self.setWindowFlags(Qt.Window)
#self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
def incrementNumberOfMessagesProcessed(self):
self.numberOfMessagesProcessed += 1
self.ui.labelMessageCount.setText('Processed ' + str(self.numberOfMessagesProcessed) + ' person-to-person messages.')
def incrementNumberOfBroadcastsProcessed(self):
self.numberOfBroadcastsProcessed += 1
self.ui.labelBroadcastCount.setText('Processed ' + str(self.numberOfBroadcastsProcessed) + ' broadcast messages.')
def incrementNumberOfPubkeysProcessed(self):
self.numberOfPubkeysProcessed += 1
self.ui.labelPubkeyCount.setText('Processed ' + str(self.numberOfPubkeysProcessed) + ' public keys.')
def updateNetworkStatusTab(self,streamNumber,connectionCount):
global statusIconColor
#print 'updating network status tab'
totalNumberOfConnectionsFromAllStreams = 0 #One would think we could use len(sendDataQueues) for this, but sendData threads don't remove themselves from sendDataQueues fast enough for len(sendDataQueues) to be accurate here.
for currentRow in range(self.ui.tableWidgetConnectionCount.rowCount()):
rowStreamNumber = int(self.ui.tableWidgetConnectionCount.item(currentRow,0).text())
if streamNumber == rowStreamNumber:
self.ui.tableWidgetConnectionCount.item(currentRow,1).setText(str(connectionCount))
totalNumberOfConnectionsFromAllStreams += connectionCount
self.ui.labelTotalConnections.setText('Total Connections: ' + str(totalNumberOfConnectionsFromAllStreams))
if totalNumberOfConnectionsFromAllStreams > 0 and statusIconColor == 'red': #FYI: The 'singlelistener' thread sets the icon color to green when it receives an incoming connection, meaning that the user's firewall is configured correctly.
self.setStatusIcon('yellow')
elif totalNumberOfConnectionsFromAllStreams == 0:
self.setStatusIcon('red')
def setStatusIcon(self,color):
global statusIconColor
#print 'setting status icon color'
if color == 'red':
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/redicon.png"))
statusIconColor = 'red'
if color == 'yellow':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
statusIconColor = 'yellow'
if color == 'green':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/greenicon.png"))
statusIconColor = 'green'
def updateSentItemStatusByHash(self,toRipe,textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if ripe == toRipe:
self.ui.tableWidgetSent.item(i,3).setText(unicode(textToDisplay,'utf-8'))
def updateSentItemStatusByAckdata(self,ackdata,textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
tableAckdata = self.ui.tableWidgetSent.item(i,3).data(Qt.UserRole).toPyObject()
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if ackdata == tableAckdata:
self.ui.tableWidgetSent.item(i,3).setText(unicode(textToDisplay,'utf-8'))
def rerenderInboxFromLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
addressToLookup = str(self.ui.tableWidgetInbox.item(i,1).data(Qt.UserRole).toPyObject())
fromLabel = ''
t = (addressToLookup,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.item(i,1).setText(unicode(fromLabel,'utf-8'))
else:
#It might be a broadcast message. We should check for that label.
sqlLock.acquire()
sqlSubmitQueue.put('''select label from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.item(i,1).setText(unicode(fromLabel,'utf-8'))
def rerenderInboxToLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
toAddress = str(self.ui.tableWidgetInbox.item(i,0).data(Qt.UserRole).toPyObject())
try:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
self.ui.tableWidgetInbox.item(i,0).setText(unicode(toLabel,'utf-8'))
def rerenderSentFromLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
fromAddress = str(self.ui.tableWidgetSent.item(i,1).data(Qt.UserRole).toPyObject())
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
self.ui.tableWidgetSent.item(i,1).setText(unicode(fromLabel,'utf-8'))
def rerenderSentToLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
addressToLookup = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
toLabel = ''
t = (addressToLookup,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.item(i,0).setText(unicode(toLabel,'utf-8'))
def click_pushButtonSend(self):
self.statusBar().showMessage('')
toAddresses = str(self.ui.lineEditTo.text())
fromAddress = str(self.ui.labelFrom.text())
subject = str(self.ui.lineEditSubject.text().toUtf8())
message = str(self.ui.textEditMessage.document().toPlainText().toUtf8())
if self.ui.radioButtonSpecific.isChecked(): #To send a message to specific people (rather than broadcast)
toAddressesList = [s.strip() for s in toAddresses.replace(',', ';').split(';')]
toAddressesList = list(set(toAddressesList)) #remove duplicate addresses. If the user has one address with a BM- and the same address without the BM-, this will not catch it. They'll send the message to the person twice.
for toAddress in toAddressesList:
if toAddress <> '':
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if status <> 'success':
printLock.acquire()
print 'Status bar:', 'Error: Could not decode', toAddress, ':', status
printLock.release()
if status == 'missingbm':
self.statusBar().showMessage('Error: Bitmessage addresses start with BM- Please check ' + toAddress)
if status == 'checksumfailed':
self.statusBar().showMessage('Error: The address ' + toAddress+' is not typed or copied correctly. Please check it.')
if status == 'invalidcharacters':
self.statusBar().showMessage('Error: The address '+ toAddress+ ' contains invalid characters. Please check it.')
if status == 'versiontoohigh':
self.statusBar().showMessage('Error: The address version in '+ toAddress+ ' is too high. Either you need to upgrade your Bitmessage software or your acquaintance is being clever.')
elif fromAddress == '':
self.statusBar().showMessage('Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab.')
else:
toAddress = addBMIfNotPresent(toAddress)
try:
config.get(toAddress, 'enabled')
#The toAddress is one owned by me. We cannot send messages to ourselves without significant changes to the codebase.
QMessageBox.about(self, "Sending to your address", "Error: One of the addresses to which you are sending a message, "+toAddress+", is yours. Unfortunately the Bitmessage client cannot process its own messages. Please try running a second client on a different computer or within a VM.")
continue
except:
pass
if addressVersionNumber > 2 or addressVersionNumber == 0:
QMessageBox.about(self, "Address version number", "Concerning the address "+toAddress+", Bitmessage cannot understand address version numbers of "+str(addressVersionNumber)+". Perhaps upgrade Bitmessage to the latest version.")
continue
if streamNumber > 1 or streamNumber == 0:
QMessageBox.about(self, "Stream number", "Concerning the address "+toAddress+", Bitmessage cannot handle stream numbers of "+str(streamNumber)+". Perhaps upgrade Bitmessage to the latest version.")
continue
self.statusBar().showMessage('')
try:
if connectionsCount[streamNumber] == 0:
self.statusBar().showMessage('Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.')
except:
self.statusBar().showMessage('Warning: The address uses a stream number currently not supported by this Bitmessage version. Perhaps upgrade.')
ackdata = OpenSSL.rand(32)
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'findingpubkey',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('sendmessage',toAddress))
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = ''
t = (toAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
newItem = myTableWidgetItem('Just pressed ''send'' '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.labelFrom.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0,0)
else:
self.statusBar().showMessage('Your \'To\' field is empty.')
else: #User selected 'Broadcast'
if fromAddress == '':
self.statusBar().showMessage('Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab.')
else:
self.statusBar().showMessage('')
#We don't actually need the ackdata for acknowledgement since this is a broadcast message, but we can use it to update the user interface when the POW is done generating.
ackdata = OpenSSL.rand(32)
toAddress = '[Broadcast subscribers]'
ripe = ''
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'broadcastpending',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('sendbroadcast',(fromAddress,subject,message)))
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = '[Broadcast subscribers]'
self.ui.tableWidgetSent.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
#newItem = QtGui.QTableWidgetItem('Doing work necessary to send broadcast...'+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem = myTableWidgetItem('Doing work necessary to send broadcast...')
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.labelFrom.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0,0)
def click_pushButtonLoadFromAddressBook(self):
self.ui.tabWidget.setCurrentIndex(5)
for i in range(4):
time.sleep(0.1)
self.statusBar().showMessage('')
time.sleep(0.1)
self.statusBar().showMessage('Right click an entry in your address book and select \'Send message to this address\'.')
def redrawLabelFrom(self,index):
self.ui.labelFrom.setText(self.ui.comboBoxSendFrom.itemData(index).toPyObject())
def rerenderComboBoxSendFrom(self):
self.ui.comboBoxSendFrom.clear()
self.ui.labelFrom.setText('')
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled') #I realize that this is poor programming practice but I don't care. It's easier for others to read.
if isEnabled:
self.ui.comboBoxSendFrom.insertItem(0,unicode(config.get(addressInKeysFile, 'label'),'utf-8'),addressInKeysFile)
self.ui.comboBoxSendFrom.insertItem(0,'','')
if(self.ui.comboBoxSendFrom.count() == 2):
self.ui.comboBoxSendFrom.setCurrentIndex(1)
self.redrawLabelFrom(self.ui.comboBoxSendFrom.currentIndex())
else:
self.ui.comboBoxSendFrom.setCurrentIndex(0)
def connectToStream(self,streamNumber):
connectionsCount[streamNumber] = 0
#Add a line to the Connection Count table on the Network Status tab with a 'zero' connection count. This will be updated as necessary by another function.
self.ui.tableWidgetConnectionCount.insertRow(0)
newItem = QtGui.QTableWidgetItem(str(streamNumber))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem('0')
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,1,newItem)
a = outgoingSynSender()
self.listOfOutgoingSynSenderThreads.append(a)
QtCore.QObject.connect(a, QtCore.SIGNAL("passObjectThrough(PyQt_PyObject)"), self.connectObjectToSignals)
QtCore.QObject.connect(a, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
a.setup(streamNumber)
a.start()
def connectObjectToSignals(self,object):
QtCore.QObject.connect(object, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
QtCore.QObject.connect(object, QtCore.SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewMessage)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"), self.updateNetworkStatusTab)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfMessagesProcessed()"), self.incrementNumberOfMessagesProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfPubkeysProcessed()"), self.incrementNumberOfPubkeysProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfBroadcastsProcessed()"), self.incrementNumberOfBroadcastsProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("setStatusIcon(PyQt_PyObject)"), self.setStatusIcon)
def displayNewMessage(self,inventoryHash,toAddress,fromAddress,subject,message):
'''print 'test signals displayNewMessage'
print 'toAddress', toAddress
print 'fromAddress', fromAddress
print 'message', message'''
fromLabel = ''
sqlLock.acquire()
t = (fromAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
else:
#There might be a label in the subscriptions table
sqlLock.acquire()
t = (fromAddress,)
sqlSubmitQueue.put('''select label from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
try:
if toAddress == '[Broadcast subscribers]':
toLabel = '[Broadcast subscribers]'
else:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
#msgid, toaddress, fromaddress, subject, received, message = row
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetInbox.insertRow(0)
self.ui.tableWidgetInbox.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
if config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.trayIcon.showMessage('New Message', 'New message from '+ fromAddress, 1, 2000)
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
if config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.trayIcon.showMessage('New Message', 'New message from '+fromLabel, 1, 2000)
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetInbox.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetInbox.setItem(0,2,newItem)
newItem = myTableWidgetItem(strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(inventoryHash))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetInbox.setItem(0,3,newItem)
self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.tableWidgetInbox.setCurrentCell(0,0)
def click_pushButtonAddAddressBook(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),)
sqlSubmitQueue.put('''select * from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
t = (str(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())))
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO addressbook VALUES (?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def click_pushButtonAddSubscription(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),)
sqlSubmitQueue.put('''select * from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
t = (str(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),True)
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO subscriptions VALUES (?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
self.reloadBroadcastSendersForWhichImWatching()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your subsciptions twice. Perhaps rename the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def loadBlackWhiteList(self):
#Initialize the Blacklist or Whitelist table
listType = config.get('bitmessagesettings', 'blackwhitelist')
if listType == 'black':
sqlSubmitQueue.put('''SELECT label, address FROM blacklist''')
else:
sqlSubmitQueue.put('''SELECT label, address FROM whitelist''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetBlacklist.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetBlacklist.setItem(0,1,newItem)
def click_pushButtonStatusIcon(self):
print 'click_pushButtonStatusIcon'
self.iconGlossaryInstance = iconGlossaryDialog(self)
if self.iconGlossaryInstance.exec_():
pass
def click_actionHelp(self):
self.helpDialogInstance = helpDialog(self)
self.helpDialogInstance.exec_()
def click_actionAbout(self):
self.aboutDialogInstance = aboutDialog(self)
self.aboutDialogInstance.exec_()
def click_actionSettings(self):
global statusIconColor
self.settingsDialogInstance = settingsDialog(self)
if self.settingsDialogInstance.exec_():
config.set('bitmessagesettings', 'startonlogon', str(self.settingsDialogInstance.ui.checkBoxStartOnLogon.isChecked()))
config.set('bitmessagesettings', 'minimizetotray', str(self.settingsDialogInstance.ui.checkBoxMinimizeToTray.isChecked()))
config.set('bitmessagesettings', 'showtraynotifications', str(self.settingsDialogInstance.ui.checkBoxShowTrayNotifications.isChecked()))
config.set('bitmessagesettings', 'startintray', str(self.settingsDialogInstance.ui.checkBoxStartInTray.isChecked()))
if int(config.get('bitmessagesettings','port')) != int(self.settingsDialogInstance.ui.lineEditTCPPort.text()):
QMessageBox.about(self, "Restart", "You must restart Bitmessage for the port number change to take effect.")
config.set('bitmessagesettings', 'port', str(self.settingsDialogInstance.ui.lineEditTCPPort.text()))
if config.get('bitmessagesettings', 'socksproxytype') == 'none' and str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText())[0:5] == 'SOCKS':
if statusIconColor != 'red':
QMessageBox.about(self, "Restart", "Bitmessage will use your proxy from now on now but you may want to manually restart Bitmessage now to close existing connections.")
if config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS' and str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText()) == 'none':
self.statusBar().showMessage('')
config.set('bitmessagesettings', 'socksproxytype', str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText()))
config.set('bitmessagesettings', 'socksauthentication', str(self.settingsDialogInstance.ui.checkBoxAuthentication.isChecked()))
config.set('bitmessagesettings', 'sockshostname', str(self.settingsDialogInstance.ui.lineEditSocksHostname.text()))
config.set('bitmessagesettings', 'socksport', str(self.settingsDialogInstance.ui.lineEditSocksPort.text()))
config.set('bitmessagesettings', 'socksusername', str(self.settingsDialogInstance.ui.lineEditSocksUsername.text()))
config.set('bitmessagesettings', 'sockspassword', str(self.settingsDialogInstance.ui.lineEditSocksPassword.text()))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
if 'win32' in sys.platform or 'win64' in sys.platform:
#Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
if config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage",sys.argv[0])
else:
self.settings.remove("PyBitmessage")
elif 'darwin' in sys.platform:
#startup for mac
pass
elif 'linux' in sys.platform:
#startup for linux
pass
def click_radioButtonBlacklist(self):
if config.get('bitmessagesettings', 'blackwhitelist') == 'white':
config.set('bitmessagesettings','blackwhitelist','black')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6,'Blacklist')
def click_radioButtonWhitelist(self):
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
config.set('bitmessagesettings','blackwhitelist','white')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6,'Whitelist')
def click_pushButtonAddBlacklist(self):
self.NewBlacklistDialogInstance = NewSubscriptionDialog(self)
if self.NewBlacklistDialogInstance.exec_():
if self.NewBlacklistDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text())),)
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''select * from blacklist where address=?''')
else:
sqlSubmitQueue.put('''select * from whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewBlacklistDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetBlacklist.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetBlacklist.setItem(0,1,newItem)
t = (str(self.NewBlacklistDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text())),True)
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''INSERT INTO blacklist VALUES (?,?,?)''')
else:
sqlSubmitQueue.put('''INSERT INTO whitelist VALUES (?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your list twice. Perhaps rename the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def click_NewAddressDialog(self):
self.dialog = NewAddressDialog(self)
# For Modal dialogs
if self.dialog.exec_():
#self.dialog.ui.buttonBox.enabled = False
if self.dialog.ui.radioButtonRandomAddress.isChecked():
if self.dialog.ui.radioButtonMostAvailable.isChecked():
streamNumberForAddress = 1
else:
#User selected 'Use the same stream as an existing address.'
streamNumberForAddress = addressStream(self.dialog.ui.comboBoxExisting.currentText())
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,str(self.dialog.ui.newaddresslabel.text().toUtf8()),1,"",self.dialog.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
else:
if self.dialog.ui.lineEditPassphrase.text() != self.dialog.ui.lineEditPassphraseAgain.text():
QMessageBox.about(self, "Passphrase mismatch", "The passphrase you entered twice doesn\'t match. Try again.")
elif self.dialog.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, "Choose a passphrase", "You really do need a passphrase.")
else:
streamNumberForAddress = 1 #this will eventually have to be replaced by logic to determine the most available stream number.
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,"unused address",self.dialog.ui.spinBoxNumberOfAddressesToMake.value(),self.dialog.ui.lineEditPassphrase.text().toUtf8(),self.dialog.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
else:
print 'new address dialog box rejected'
def closeEvent(self, event):
'''quit_msg = "Are you sure you want to exit Bitmessage?"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()'''
broadcastToSendDataQueues((0, 'shutdown', 'all'))
printLock.acquire()
print 'Closing. Flushing inventory in memory out to disk...'
printLock.release()
self.statusBar().showMessage('Flushing inventory in memory out to disk.')
flushInventory()
#This one last useless query will guarantee that the previous query committed before we close the program.
sqlLock.acquire()
sqlSubmitQueue.put('SELECT address FROM subscriptions')
sqlSubmitQueue.put('')
sqlReturnQueue.get()
sqlLock.release()
self.statusBar().showMessage('Saving the knownNodes list of peers to disk...')
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
self.trayIcon.hide()
printLock.acquire()
print 'Done.'
printLock.release()
self.statusBar().showMessage('All done. Closing user interface...')
event.accept()
raise SystemExit
def on_action_InboxReply(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
toAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,0).data(Qt.UserRole).toPyObject())
fromAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,1).data(Qt.UserRole).toPyObject())
if toAddressAtCurrentInboxRow == '[Broadcast subscribers]':
self.ui.labelFrom.setText('')
else:
if not config.get(toAddressAtCurrentInboxRow,'enabled'):
self.statusBar().showMessage('Error: The address from which you are trying to send is disabled. Enable it from the \'Your Identities\' tab first.')
return
self.ui.labelFrom.setText(toAddressAtCurrentInboxRow)
self.ui.lineEditTo.setText(str(fromAddressAtCurrentInboxRow))
self.ui.comboBoxSendFrom.setCurrentIndex(0)
#self.ui.comboBoxSendFrom.setEditText(str(self.ui.tableWidgetInbox.item(currentInboxRow,0).text))
self.ui.textEditMessage.setText('\n\n------------------------------------------------------\n'+self.ui.tableWidgetInbox.item(currentInboxRow,2).data(Qt.UserRole).toPyObject())
if self.ui.tableWidgetInbox.item(currentInboxRow,2).text()[0:3] == 'Re:':
self.ui.lineEditSubject.setText(str(self.ui.tableWidgetInbox.item(currentInboxRow,2).text()))
else:
self.ui.lineEditSubject.setText('Re: '+self.ui.tableWidgetInbox.item(currentInboxRow,2).text())
self.ui.radioButtonSpecific.setChecked(True)
self.ui.tabWidget.setCurrentIndex(1)
def on_action_InboxAddSenderToAddressBook(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
#self.ui.tableWidgetInbox.item(currentRow,1).data(Qt.UserRole).toPyObject()
addressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,1).data(Qt.UserRole).toPyObject())
#Let's make sure that it isn't already in the address book
sqlLock.acquire()
t = (addressAtCurrentInboxRow,)
sqlSubmitQueue.put('''select * from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem('--New entry. Change label in Address Book.--')
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addressAtCurrentInboxRow)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
t = ('--New entry. Change label in Address Book.--',addressAtCurrentInboxRow)
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO addressbook VALUES (?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.ui.tabWidget.setCurrentIndex(5)
self.ui.tableWidgetAddressBook.setCurrentCell(0,0)
self.statusBar().showMessage('Entry added to the Address Book. Edit the label to your liking.')
else:
self.statusBar().showMessage('Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want.')
#Send item on the Inbox tab to trash
def on_action_InboxTrash(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
inventoryHashToTrash = str(self.ui.tableWidgetInbox.item(currentRow,3).data(Qt.UserRole).toPyObject())
t = (inventoryHashToTrash,)
sqlLock.acquire()
#sqlSubmitQueue.put('''delete from inbox where msgid=?''')
sqlSubmitQueue.put('''UPDATE inbox SET folder='trash' WHERE msgid=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetInbox.removeRow(currentRow)
self.statusBar().showMessage('Moved item to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back.')
#Send item on the Sent tab to trash
def on_action_SentTrash(self):
currentRow = self.ui.tableWidgetSent.currentRow()
ackdataToTrash = str(self.ui.tableWidgetSent.item(currentRow,3).data(Qt.UserRole).toPyObject())
t = (ackdataToTrash,)
sqlLock.acquire()
sqlSubmitQueue.put('''UPDATE sent SET folder='trash' WHERE ackdata=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetSent.removeRow(currentRow)
self.statusBar().showMessage('Moved item to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back.')
#Group of functions for the Address Book dialog box
def on_action_AddressBookNew(self):
self.click_pushButtonAddAddressBook()
def on_action_AddressBookDelete(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
labelAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
sqlSubmitQueue.put('''DELETE FROM addressbook WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetAddressBook.removeRow(currentRow)
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
self.reloadBroadcastSendersForWhichImWatching()
def on_action_AddressBookClipboard(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_action_AddressBookSend(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
if self.ui.lineEditTo.text() == '':
self.ui.lineEditTo.setText(str(addressAtCurrentRow))
else:
self.ui.lineEditTo.setText(str(self.ui.lineEditTo.text()) + '; '+ str(addressAtCurrentRow))
self.statusBar().showMessage('You have added the address to the \'To\' field on the \'Send\' tab. You may add more recipients if you want. When you are done, go to the \'Send\' tab.')
def on_context_menuAddressBook(self, point):
self.popMenuAddressBook.exec_( self.ui.tableWidgetAddressBook.mapToGlobal(point) )
#Group of functions for the Subscriptions dialog box
def on_action_SubscriptionsNew(self):
self.click_pushButtonAddSubscription()
def on_action_SubscriptionsDelete(self):
print 'clicked Delete'
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
labelAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
sqlSubmitQueue.put('''DELETE FROM subscriptions WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetSubscriptions.removeRow(currentRow)
self.rerenderInboxFromLabels()
def on_action_SubscriptionsClipboard(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuSubscriptions(self, point):
self.popMenuSubscriptions.exec_( self.ui.tableWidgetSubscriptions.mapToGlobal(point) )
#Group of functions for the Your Identities dialog box
def on_action_YourIdentitiesNew(self):
self.click_NewAddressDialog()
def on_action_YourIdentitiesEnable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
config.set(str(addressAtCurrentRow),'enabled','true')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.ui.tableWidgetYourIdentities.item(currentRow,0).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetYourIdentities.item(currentRow,2).setTextColor(QtGui.QColor(0,0,0))
reloadMyAddressHashes()
def on_action_YourIdentitiesDisable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
config.set(str(addressAtCurrentRow),'enabled','false')
self.ui.tableWidgetYourIdentities.item(currentRow,0).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.item(currentRow,2).setTextColor(QtGui.QColor(128,128,128))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
reloadMyAddressHashes()
def on_action_YourIdentitiesClipboard(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuYourIdentities(self, point):
self.popMenu.exec_( self.ui.tableWidgetYourIdentities.mapToGlobal(point) )
def on_context_menuInbox(self, point):
self.popMenuInbox.exec_( self.ui.tableWidgetInbox.mapToGlobal(point) )
def on_context_menuSent(self, point):
self.popMenuSent.exec_( self.ui.tableWidgetSent.mapToGlobal(point) )
def tableWidgetInboxItemClicked(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(currentRow,2).data(Qt.UserRole).toPyObject())
def tableWidgetSentItemClicked(self):
currentRow = self.ui.tableWidgetSent.currentRow()
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(currentRow,2).data(Qt.UserRole).toPyObject())
def tableWidgetYourIdentitiesItemChanged(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
config.set(str(addressAtCurrentRow),'label',str(self.ui.tableWidgetYourIdentities.item(currentRow,0).text().toUtf8()))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.rerenderComboBoxSendFrom()
#self.rerenderInboxFromLabels()
self.rerenderInboxToLabels()
self.rerenderSentFromLabels()
#self.rerenderSentToLabels()
def tableWidgetAddressBookItemChanged(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
sqlLock.acquire()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
t = (str(self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()),str(addressAtCurrentRow))
sqlSubmitQueue.put('''UPDATE addressbook set label=? WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#except Exception, err:
# print 'Program Exception in tableWidgetAddressBookItemChanged:', err
sqlLock.release()
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def tableWidgetSubscriptionsItemChanged(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
sqlLock.acquire()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
t = (str(self.ui.tableWidgetSubscriptions.item(currentRow,0).text().toUtf8()),str(addressAtCurrentRow))
sqlSubmitQueue.put('''UPDATE subscriptions set label=? WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#except Exception, err:
# print 'Program Exception in tableWidgetSubscriptionsItemChanged:', err
sqlLock.release()
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def writeNewAddressToTable(self,label,address,streamNumber):
self.ui.tableWidgetYourIdentities.insertRow(0)
self.ui.tableWidgetYourIdentities.setItem(0, 0, QtGui.QTableWidgetItem(unicode(label,'utf-8')))
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(streamNumber)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
self.rerenderComboBoxSendFrom()
def updateStatusBar(self,data):
printLock.acquire()
print 'Status bar:', data
printLock.release()
self.statusBar().showMessage(data)
def reloadBroadcastSendersForWhichImWatching(self):
broadcastSendersForWhichImWatching.clear()
sqlLock.acquire()
sqlSubmitQueue.put('SELECT address FROM subscriptions')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
address, = row
status,addressVersionNumber,streamNumber,hash = decodeAddress(address)
broadcastSendersForWhichImWatching[hash] = 0
#In order for the time columns on the Inbox and Sent tabs to be sorted correctly (rather than alphabetically), we need to overload the < operator and use this class instead of QTableWidgetItem.
class myTableWidgetItem(QTableWidgetItem):
def __lt__(self,other):
return int(self.data(33).toPyObject()) < int(other.data(33).toPyObject())
sendDataQueues = [] #each sendData thread puts its queue in this list.
myRSAAddressHashes = {}
myECAddressHashes = {}
#myPrivateKeys = {}
inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
workerQueue = Queue.Queue()
sqlSubmitQueue = Queue.Queue() #SQLITE3 is so thread-unsafe that they won't even let you call it from different threads using your own locks. SQL objects can only be called from one thread.
sqlReturnQueue = Queue.Queue()
sqlLock = threading.Lock()
printLock = threading.Lock()
ackdataForWhichImWatching = {}
broadcastSendersForWhichImWatching = {}
statusIconColor = 'red'
connectionsCount = {} #Used for the 'network status' tab.
connectionsCountLock = threading.Lock()
inventoryLock = threading.Lock() #Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack('>Q',random.randrange(1, 18446744073709551615))
connectedHostsList = {} #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender thread won't connect to the same remote node twice.
neededPubkeys = {}
successfullyDecryptMessageTimings = [] #A list of the amounts of time it took to successfully decrypt msg messages
#These constants are not at the top because if changed they will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
averageProofOfWorkNonceTrialsPerByte = 320 #The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work.
payloadLengthExtraBytes = 14000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
if useVeryEasyProofOfWorkForTesting:
averageProofOfWorkNonceTrialsPerByte = averageProofOfWorkNonceTrialsPerByte / 16
payloadLengthExtraBytes = payloadLengthExtraBytes / 7000
if __name__ == "__main__":
# Check the Major version, the first element in the array
if sqlite3.sqlite_version_info[0] < 3:
print 'This program requires sqlite version 3 or higher because 2 and lower cannot store NULL values. I see version:', sqlite3.sqlite_version_info
sys.exit()
if not storeConfigFilesInSameDirectoryAsProgram:
APPNAME = "PyBitmessage"
from os import path, environ
if sys.platform == 'darwin':
if "HOME" in environ:
appdata = path.join(os.environ["HOME"], "Library/Application support/", APPNAME) + '/'
else:
print 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
sys.exit()
elif 'win32' in sys.platform or 'win64' in sys.platform:
appdata = path.join(environ['APPDATA'], APPNAME) + '\\'
else:
appdata = path.expanduser(path.join("~", "." + APPNAME + "/"))
if not os.path.exists(appdata):
os.makedirs(appdata)
else:
appdata = ""
config = ConfigParser.SafeConfigParser()
config.read(appdata + 'keys.dat')
try:
config.get('bitmessagesettings', 'settingsversion')
print 'Loading config files from', appdata
except:
#This appears to be the first time running the program; there is no config file (or it cannot be accessed). Create config file.
config.add_section('bitmessagesettings')
config.set('bitmessagesettings','settingsversion','1')
#config.set('bitmessagesettings','bitstrength','2048')
config.set('bitmessagesettings','port','8444')
config.set('bitmessagesettings','timeformat','%%a, %%d %%b %%Y %%I:%%M %%p')
config.set('bitmessagesettings','blackwhitelist','black')
config.set('bitmessagesettings','startonlogon','false')
if 'linux' in sys.platform:
config.set('bitmessagesettings','minimizetotray','false')#This isn't implimented yet and when True on Ubuntu causes Bitmessage to disappear while running when minimized.
else:
config.set('bitmessagesettings','minimizetotray','true')
config.set('bitmessagesettings','showtraynotifications','true')
config.set('bitmessagesettings','startintray','false')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
print 'Storing config files in', appdata
if config.getint('bitmessagesettings','settingsversion') == 1:
config.set('bitmessagesettings','settingsversion','3') #If the settings version is equal to 2 then the sqlThread will modify the pubkeys table and change the settings version to 3.
config.set('bitmessagesettings','socksproxytype','none')
config.set('bitmessagesettings','sockshostname','localhost')
config.set('bitmessagesettings','socksport','9050')
config.set('bitmessagesettings','socksauthentication','false')
config.set('bitmessagesettings','socksusername','')
config.set('bitmessagesettings','sockspassword','')
config.set('bitmessagesettings','keysencrypted','false')
config.set('bitmessagesettings','messagesencrypted','false')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
try:
pickleFile = open(appdata + 'knownnodes.dat', 'rb')
knownNodes = pickle.load(pickleFile)
pickleFile.close()
except:
createDefaultKnownNodes(appdata)
pickleFile = open(appdata + 'knownnodes.dat', 'rb')
knownNodes = pickle.load(pickleFile)
pickleFile.close()
if config.getint('bitmessagesettings', 'settingsversion') > 3:
print 'Bitmessage cannot read future versions of the keys file (keys.dat). Run the newer version of Bitmessage.'
raise SystemExit
#DNS bootstrap. This could be programmed to use the SOCKS proxy to do the DNS lookup some day but for now we will just rely on the entries in defaultKnownNodes.py. Hopefully either they are up to date or the user has run Bitmessage recently without SOCKS turned on and received good bootstrap nodes using that method.
if config.get('bitmessagesettings', 'socksproxytype') == 'none':
try:
for item in socket.getaddrinfo('bootstrap8080.bitmessage.org',80):
print 'Adding', item[4][0],'to knownNodes based on DNS boostrap method'
knownNodes[1][item[4][0]] = (8080,int(time.time()))
except:
print 'bootstrap8080.bitmessage.org DNS bootstraping failed.'
try:
for item in socket.getaddrinfo('bootstrap8444.bitmessage.org',80):
print 'Adding', item[4][0],'to knownNodes based on DNS boostrap method'
knownNodes[1][item[4][0]] = (8444,int(time.time()))
except:
print 'bootstrap8444.bitmessage.org DNS bootstrapping failed.'
else:
print 'DNS bootstrap skipped because SOCKS is used.'
app = QtGui.QApplication(sys.argv)
app.setStyleSheet("QStatusBar::item { border: 0px solid black }")
myapp = MyForm()
myapp.show()
if config.getboolean('bitmessagesettings', 'startintray'):
myapp.hide()
myapp.trayIcon.show()
#self.hidden = True
#self.setWindowState(self.windowState() & QtCore.Qt.WindowMinimized)
#self.hide()
if 'win32' in sys.platform or 'win64' in sys.platform:
myapp.setWindowFlags(Qt.ToolTip)
sys.exit(app.exec_())
# So far, the Bitmessage protocol, this client, the Wiki, and the forums
# are all a one-man operation. Bitcoin tips are quite appreciated!
# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u
Added shebang to launcher script to enable standalone launching.
#!/usr/bin/env python2.7
# Copyright (c) 2012 Jonathan Warren
# Copyright (c) 2012 The Bitmessage developers
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#Right now, PyBitmessage only support connecting to stream 1. It doesn't yet contain logic to expand into further streams.
softwareVersion = '0.2.4'
verbose = 2
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 #Equals two days and 12 hours.
lengthOfTimeToLeaveObjectsInInventory = 237600 #Equals two days and 18 hours. This should be longer than maximumAgeOfAnObjectThatIAmWillingToAccept so that we don't process messages twice.
lengthOfTimeToHoldOnToAllPubkeys = 2419200 #Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
maximumAgeOfObjectsThatIAdvertiseToOthers = 216000 #Equals two days and 12 hours
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 #Equals three hours
storeConfigFilesInSameDirectoryAsProgram = False
useVeryEasyProofOfWorkForTesting = False #If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
import sys
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
except Exception, err:
print 'PyBitmessage requires PyQt. You can download it from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\' (without quotes).'
print 'Error message:', err
sys.exit()
import ConfigParser
from bitmessageui import *
from newaddressdialog import *
from newsubscriptiondialog import *
from regenerateaddresses import *
from settings import *
from about import *
from help import *
from iconglossary import *
from addresses import *
import Queue
from defaultKnownNodes import *
import time
import socket
import threading
import rsa
from rsa.bigfile import *
import hashlib
from struct import *
import pickle
import random
import sqlite3
import threading #used for the locks, not for the threads
import cStringIO
from time import strftime, localtime
import os
import string
import socks
#import pyelliptic
import highlevelcrypto
from pyelliptic.openssl import OpenSSL
import ctypes
from pyelliptic import arithmetic
#For each stream to which we connect, one outgoingSynSender thread will exist and will create 8 connections with peers.
class outgoingSynSender(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.selfInitiatedConnectionList = [] #This is a list of current connections (the thread pointers at least)
self.alreadyAttemptedConnectionsList = [] #This is a list of nodes to which we have already attempted a connection
def setup(self,streamNumber):
self.streamNumber = streamNumber
def run(self):
time.sleep(1)
resetTime = int(time.time()) #used below to clear out the alreadyAttemptedConnectionsList periodically so that we will retry connecting to hosts to which we have already tried to connect.
while True:
#time.sleep(999999)#I'm using this to prevent connections for testing.
if len(self.selfInitiatedConnectionList) < 8: #maximum number of outgoing connections = 8
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
while HOST in self.alreadyAttemptedConnectionsList or HOST in connectedHostsList:
#print 'choosing new sample'
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
time.sleep(1)
#Clear out the alreadyAttemptedConnectionsList every half hour so that this program will again attempt a connection to any nodes, even ones it has already tried.
if (int(time.time()) - resetTime) > 1800:
self.alreadyAttemptedConnectionsList = []
resetTime = int(time.time())
self.alreadyAttemptedConnectionsList.append(HOST)
PORT, timeNodeLastSeen = knownNodes[self.streamNumber][HOST]
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(20)
if config.get('bitmessagesettings', 'socksproxytype') == 'none':
printLock.acquire()
print 'Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS4a':
printLock.acquire()
print '(Using SOCKS4a) Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
proxytype = socks.PROXY_TYPE_SOCKS4
sockshostname = config.get('bitmessagesettings', 'sockshostname')
socksport = config.getint('bitmessagesettings', 'socksport')
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = config.get('bitmessagesettings', 'socksusername')
sockspassword = config.get('bitmessagesettings', 'sockspassword')
sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(proxytype, sockshostname, socksport, rdns)
elif config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5':
printLock.acquire()
print '(Using SOCKS5) Trying an outgoing connection to', HOST, ':', PORT
printLock.release()
proxytype = socks.PROXY_TYPE_SOCKS5
sockshostname = config.get('bitmessagesettings', 'sockshostname')
socksport = config.getint('bitmessagesettings', 'socksport')
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = config.get('bitmessagesettings', 'socksusername')
sockspassword = config.get('bitmessagesettings', 'sockspassword')
sock.setproxy(proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(proxytype, sockshostname, socksport, rdns)
try:
sock.connect((HOST, PORT))
rd = receiveDataThread()
self.emit(SIGNAL("passObjectThrough(PyQt_PyObject)"),rd)
objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
rd.setup(sock,HOST,PORT,self.streamNumber,self.selfInitiatedConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware)
rd.start()
printLock.acquire()
print self, 'connected to', HOST, 'during outgoing attempt.'
printLock.release()
sd = sendDataThread()
sd.setup(sock,HOST,PORT,self.streamNumber,objectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
sd.sendVersionMessage()
except socks.GeneralProxyError, err:
printLock.acquire()
print 'Could NOT connect to', HOST, 'during outgoing attempt.', err
printLock.release()
PORT, timeLastSeen = knownNodes[self.streamNumber][HOST]
if (int(time.time())-timeLastSeen) > 172800 and len(knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
del knownNodes[self.streamNumber][HOST]
print 'deleting ', HOST, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
except socks.Socks5AuthError, err:
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS5 Authentication problem: "+str(err))
except socks.Socks5Error, err:
pass
print 'SOCKS5 error. (It is possible that the server wants authentication).)' ,str(err)
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS5 error. Server might require authentication. "+str(err))
except socks.Socks4Error, err:
print 'Socks4Error:', err
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"SOCKS4 error: "+str(err))
except socket.error, err:
if config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
print 'Bitmessage MIGHT be having trouble connecting to the SOCKS server. '+str(err)
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"Problem: Bitmessage can not connect to the SOCKS server. "+str(err))
else:
printLock.acquire()
print 'Could NOT connect to', HOST, 'during outgoing attempt.', err
printLock.release()
PORT, timeLastSeen = knownNodes[self.streamNumber][HOST]
if (int(time.time())-timeLastSeen) > 172800 and len(knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
del knownNodes[self.streamNumber][HOST]
print 'deleting ', HOST, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
except Exception, err:
print 'An exception has occurred in the outgoingSynSender thread that was not caught by other exception types:', err
time.sleep(0.1)
#Only one singleListener thread will ever exist. It creates the receiveDataThread and sendDataThread for each incoming connection. Note that it cannot set the stream number because it is not known yet- the other node will have to tell us its stream number in a version message. If we don't care about their stream, we will close the connection (within the recversion function of the recieveData thread)
class singleListener(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If they eventually select proxy 'none' then this will start listening for connections.
while config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
time.sleep(300)
print 'Listening for incoming connections.'
HOST = '' # Symbolic name meaning all available interfaces
PORT = config.getint('bitmessagesettings', 'port')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#This option apparently avoids the TIME_WAIT state so that we can rebind faster
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(2)
self.incomingConnectionList = [] #This list isn't used for anything. The reason it exists is because receiveData threads expect that a list be passed to them. They expect this because the outgoingSynSender thread DOES use a similar list to keep track of the number of outgoing connections it has created.
while True:
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If the user eventually select proxy 'none' then this will start listening for connections.
while config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
time.sleep(10)
a,(HOST,PORT) = sock.accept()
#Users are finding that if they run more than one node in the same network (thus with the same public IP), they can not connect with the second node. This is because this section of code won't accept the connection from the same IP. This problem will go away when the Bitmessage network grows beyond being tiny but in the mean time I'll comment out this code section.
"""while HOST in connectedHostsList:
print 'incoming connection is from a host in connectedHostsList (we are already connected to it). Ignoring it.'
a.close()
a,(HOST,PORT) = sock.accept()"""
rd = receiveDataThread()
self.emit(SIGNAL("passObjectThrough(PyQt_PyObject)"),rd)
objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
rd.setup(a,HOST,PORT,-1,self.incomingConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware)
printLock.acquire()
print self, 'connected to', HOST,'during INCOMING request.'
printLock.release()
rd.start()
sd = sendDataThread()
sd.setup(a,HOST,PORT,-1,objectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
#This thread is created either by the synSenderThread(for outgoing connections) or the singleListenerThread(for incoming connectiosn).
class receiveDataThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.data = ''
self.verackSent = False
self.verackReceived = False
def setup(self,sock,HOST,port,streamNumber,selfInitiatedConnectionList,objectsOfWhichThisRemoteNodeIsAlreadyAware):
self.sock = sock
self.HOST = HOST
self.PORT = port
self.sock.settimeout(600) #We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
self.streamNumber = streamNumber
self.selfInitiatedConnectionList = selfInitiatedConnectionList
self.selfInitiatedConnectionList.append(self)
self.payloadLength = 0 #This is the protocol payload length thus it doesn't include the 24 byte message header
self.receivedgetbiginv = False #Gets set to true once we receive a getbiginv message from our peer. An abusive peer might request it too much so we use this variable to check whether they have already asked for a big inv message.
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = {}
connectedHostsList[self.HOST] = 0 #The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that the outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished = False #set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
if self.streamNumber == -1: #This was an incoming connection. Send out a version message if we accept the other node's version message.
self.initiatedConnection = False
else:
self.initiatedConnection = True
self.ackDataThatWeHaveYetToSend = [] #When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here until we are done processing all other data received from this peer.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
def run(self):
while True:
try:
self.data = self.data + self.sock.recv(65536)
except socket.timeout:
printLock.acquire()
print 'Timeout occurred waiting for data. Closing receiveData thread.'
printLock.release()
break
except Exception, err:
printLock.acquire()
print 'sock.recv error. Closing receiveData thread.', err
printLock.release()
break
#print 'Received', repr(self.data)
if self.data == "":
printLock.acquire()
print 'Connection closed. Closing receiveData thread.'
printLock.release()
break
else:
self.processData()
try:
self.sock.close()
except Exception, err:
print 'Within receiveDataThread run(), self.sock.close() failed.', err
try:
self.selfInitiatedConnectionList.remove(self)
printLock.acquire()
print 'removed self (a receiveDataThread) from ConnectionList'
printLock.release()
except:
pass
broadcastToSendDataQueues((0, 'shutdown', self.HOST))
if self.connectionIsOrWasFullyEstablished: #We don't want to decrement the number of connections and show the result if we never incremented it in the first place (which we only do if the connection is fully established- meaning that both nodes accepted each other's version packets.)
connectionsCountLock.acquire()
connectionsCount[self.streamNumber] -= 1
self.emit(SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"),self.streamNumber,connectionsCount[self.streamNumber])
printLock.acquire()
print 'Updating network status tab with current connections count:', connectionsCount[self.streamNumber]
printLock.release()
connectionsCountLock.release()
try:
del connectedHostsList[self.HOST]
except Exception, err:
print 'Could not delete', self.HOST, 'from connectedHostsList.', err
def processData(self):
global verbose
#if verbose >= 2:
#printLock.acquire()
#print 'self.data is currently ', repr(self.data)
#printLock.release()
if len(self.data) < 20: #if so little of the data has arrived that we can't even unpack the payload length
pass
elif self.data[0:4] != '\xe9\xbe\xb4\xd9':
if verbose >= 2:
printLock.acquire()
sys.stderr.write('The magic bytes were not correct. First 40 bytes of data: %s\n' % repr(self.data[0:40]))
printLock.release()
self.data = ""
else:
self.payloadLength, = unpack('>L',self.data[16:20])
if len(self.data) >= self.payloadLength+24: #check if the whole message has arrived yet. If it has,...
if self.data[20:24] == hashlib.sha512(self.data[24:self.payloadLength+24]).digest()[0:4]:#test the checksum in the message. If it is correct...
#print 'message checksum is correct'
#The time we've last seen this node is obviously right now since we just received valid data from it. So update the knownNodes list so that other peers can be made aware of its existance.
if self.initiatedConnection: #The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
knownNodes[self.streamNumber][self.HOST] = (self.PORT,int(time.time()))
if self.payloadLength <= 180000000: #If the size of the message is greater than 180MB, ignore it. (I get memory errors when processing messages much larger than this though it is concievable that this value will have to be lowered if some systems are less tolarant of large messages.)
remoteCommand = self.data[4:16]
printLock.acquire()
print 'remoteCommand ', remoteCommand, 'from', self.HOST
printLock.release()
if remoteCommand == 'version\x00\x00\x00\x00\x00':
self.recversion()
elif remoteCommand == 'verack\x00\x00\x00\x00\x00\x00':
self.recverack()
elif remoteCommand == 'addr\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recaddr()
elif remoteCommand == 'getpubkey\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recgetpubkey()
elif remoteCommand == 'pubkey\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recpubkey()
elif remoteCommand == 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recinv()
elif remoteCommand == 'getdata\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recgetdata()
elif remoteCommand == 'getbiginv\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendBigInv()
elif remoteCommand == 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recmsg()
elif remoteCommand == 'broadcast\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.recbroadcast()
elif remoteCommand == 'getaddr\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendaddr()
elif remoteCommand == 'ping\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
self.sendpong()
elif remoteCommand == 'pong\x00\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
pass
elif remoteCommand == 'alert\x00\x00\x00\x00\x00\x00\x00' and self.connectionIsOrWasFullyEstablished:
pass
self.data = self.data[self.payloadLength+24:]#take this message out and then process the next message
if self.data == '':
while len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
random.seed()
objectHash, = random.sample(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave, 1)
if objectHash in inventory:
printLock.acquire()
print 'Inventory (in memory) already has object listed in inv message.'
printLock.release()
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]
elif isInSqlInventory(objectHash):
printLock.acquire()
print 'Inventory (SQL on disk) already has object listed in inv message.'
printLock.release()
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]
else:
#print 'processData function making request for object:', objectHash.encode('hex')
self.sendgetdata(objectHash)
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash] #It is possible that the remote node doesn't respond with the object. In that case, we'll very likely get it from someone else anyway.
break
if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
printLock.acquire()
print 'within processData, number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now', len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)
printLock.release()
if len(self.ackDataThatWeHaveYetToSend) > 0:
self.data = self.ackDataThatWeHaveYetToSend.pop()
self.processData()
else:
print 'Checksum incorrect. Clearing this message.'
self.data = self.data[self.payloadLength+24:]
def isProofOfWorkSufficient(self):
POW, = unpack('>Q',hashlib.sha512(hashlib.sha512(self.data[24:32]+ hashlib.sha512(self.data[32:24+self.payloadLength]).digest()).digest()).digest()[0:8])
#print 'POW:', POW
#Notice that I have divided the averageProofOfWorkNonceTrialsPerByte by two. This makes the POW requirement easier. This gives us wiggle-room: if we decide that we want to make the POW easier, the change won't obsolete old clients because they already expect a lower POW. If we decide that the current work done by clients feels approperate then we can remove this division by 2 and make the requirement match what is actually done by a sending node. If we want to raise the POW requirement then old nodes will HAVE to upgrade no matter what.
return POW < 2**64 / ((self.payloadLength+payloadLengthExtraBytes) * (averageProofOfWorkNonceTrialsPerByte/2))
def sendpong(self):
print 'Sending pong'
self.sock.sendall('\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
def recverack(self):
print 'verack received'
self.verackReceived = True
if self.verackSent == True:
#We have thus both sent and received a verack.
self.connectionFullyEstablished()
def connectionFullyEstablished(self):
self.connectionIsOrWasFullyEstablished = True
if not self.initiatedConnection:
self.emit(SIGNAL("setStatusIcon(PyQt_PyObject)"),'green')
#Update the 'Network Status' tab
connectionsCountLock.acquire()
connectionsCount[self.streamNumber] += 1
self.emit(SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"),self.streamNumber,connectionsCount[self.streamNumber])
connectionsCountLock.release()
remoteNodeIncomingPort, remoteNodeSeenTime = knownNodes[self.streamNumber][self.HOST]
printLock.acquire()
print 'Connection fully established with', self.HOST, remoteNodeIncomingPort
print 'broadcasting addr from within connectionFullyEstablished function.'
printLock.release()
self.broadcastaddr([(int(time.time()), self.streamNumber, 1, self.HOST, remoteNodeIncomingPort)]) #This lets all of our peers know about this new node.
self.sendaddr() #This is one large addr message to this one peer.
if connectionsCount[self.streamNumber] > 150:
printLock.acquire()
print 'We are connected to too many people. Closing connection.'
printLock.release()
self.sock.close()
return
self.sendBigInv()
def sendBigInv(self): #I used capitals in for this function name because there is no such Bitmessage command as 'biginv'.
if self.receivedgetbiginv:
print 'We have already sent a big inv message to this peer. Ignoring request.'
return
else:
self.receivedgetbiginv = True
sqlLock.acquire()
#Select all hashes which are younger than two days old and in this stream.
t = (int(time.time())-maximumAgeOfObjectsThatIAdvertiseToOthers,self.streamNumber)
sqlSubmitQueue.put('''SELECT hash FROM inventory WHERE receivedtime>? and streamnumber=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
bigInvList = {}
for row in queryreturn:
hash, = row
if hash not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
bigInvList[hash] = 0
else:
printLock.acquire()
print 'Not including an object hash in a big inv message because the remote node is already aware of it.'#This line is here to check that this feature is working.
printLock.release()
#We also have messages in our inventory in memory (which is a python dictionary). Let's fetch those too.
for hash, storedValue in inventory.items():
if hash not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
objectType, streamNumber, payload, receivedTime = storedValue
if streamNumber == self.streamNumber and receivedTime > int(time.time())-maximumAgeOfObjectsThatIAdvertiseToOthers:
bigInvList[hash] = 0
else:
printLock.acquire()
print 'Not including an object hash in a big inv message because the remote node is already aware of it.'#This line is here to check that this feature is working.
printLock.release()
numberOfObjectsInInvMessage = 0
payload = ''
#Now let us start appending all of these hashes together. They will be sent out in a big inv message to our new peer.
for hash, storedValue in bigInvList.items():
payload += hash
numberOfObjectsInInvMessage += 1
if numberOfObjectsInInvMessage >= 50000: #We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
self.sendinvMessageToJustThisOnePeer(numberOfObjectsInInvMessage,payload)
payload = ''
numberOfObjectsInInvMessage = 0
if numberOfObjectsInInvMessage > 0:
self.sendinvMessageToJustThisOnePeer(numberOfObjectsInInvMessage,payload)
#Self explanatory. Notice that there is also a broadcastinv function for broadcasting invs to everyone in our stream.
def sendinvMessageToJustThisOnePeer(self,numberOfObjects,payload):
payload = encodeVarint(numberOfObjects) + payload
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
print 'Sending huge inv message with', numberOfObjects, 'objects to just this one peer'
self.sock.send(headerData + payload)
#We have received a broadcast message
def recbroadcast(self):
self.messageProcessingStartTime = time.time()
#First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in broadcast message insufficient.'
return
embeddedTime, = unpack('>I',self.data[32:36])
if embeddedTime > (int(time.time())+10800): #prevent funny business
print 'The embedded time in this broadcast message is more than three hours in the future. That doesn\'t make sense. Ignoring message.'
return
if embeddedTime < (int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept):
print 'The embedded time in this broadcast message is too old. Ignoring message.'
return
if self.payloadLength < 66: #todo: When version 1 addresses are completely abandoned, this should be changed to 180
print 'The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.'
return
inventoryLock.acquire()
self.inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
if self.inventoryHash in inventory:
print 'We have already received this broadcast object. Ignoring.'
inventoryLock.release()
return
elif isInSqlInventory(self.inventoryHash):
print 'We have already received this broadcast object (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
#It is valid so far. Let's let our peers know about it.
objectType = 'broadcast'
inventory[self.inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
self.broadcastinv(self.inventoryHash)
self.emit(SIGNAL("incrementNumberOfBroadcastsProcessed()"))
self.processbroadcast()#When this function returns, we will have either successfully processed this broadcast because we are interested in it, ignored it because we aren't interested in it, or found problem with the broadcast that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are mostly the same values used for msg messages although broadcast messages are processed faster.
if self.payloadLength > 100000000: #Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds.
elif self.payloadLength > 10000000: #Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds.
elif self.payloadLength > 1000000: #Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds.
else: #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .1 #seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.messageProcessingStartTime)
if sleepTime > 0:
printLock.acquire()
print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
printLock.release()
time.sleep(sleepTime)
printLock.acquire()
print 'Total message processing time:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
#A broadcast message has a valid time and POW and requires processing. The recbroadcast function calls this one.
def processbroadcast(self):
readPosition = 36
broadcastVersion, broadcastVersionLength = decodeVarint(self.data[readPosition:readPosition+9])
if broadcastVersion <> 1:
#Cannot decode incoming broadcast versions higher than 1. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
return
readPosition += broadcastVersionLength
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersAddressVersion == 0 or sendersAddressVersion >=3:
#Cannot decode senderAddressVersion higher than 2. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
return
readPosition += sendersAddressVersionLength
if sendersAddressVersion == 2:
sendersStream, sendersStreamLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersStream <= 0 or sendersStream <> self.streamNumber:
return
readPosition += sendersStreamLength
behaviorBitfield = self.data[readPosition:readPosition+4]
readPosition += 4
sendersPubSigningKey = '\x04' + self.data[readPosition:readPosition+64]
readPosition += 64
sendersPubEncryptionKey = '\x04' + self.data[readPosition:readPosition+64]
readPosition += 64
sendersHash = self.data[readPosition:readPosition+20]
if sendersHash not in broadcastSendersForWhichImWatching:
#Display timing data
printLock.acquire()
print 'Time spent deciding that we are not interested in this broadcast:', time.time()- self.messageProcessingStartTime
printLock.release()
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition += 20
sha = hashlib.new('sha512')
sha.update(sendersPubSigningKey+sendersPubEncryptionKey)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
if ripe.digest() != sendersHash:
#The sender of this message lied.
return
messageEncodingType, messageEncodingTypeLength = decodeVarint(self.data[readPosition:readPosition+9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += messageLengthLength
message = self.data[readPosition:readPosition+messageLength]
readPosition += messageLength
readPositionAtBottomOfMessage = readPosition
signatureLength, signatureLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += signatureLengthLength
signature = self.data[readPosition:readPosition+signatureLength]
try:
highlevelcrypto.verify(self.data[36:readPositionAtBottomOfMessage],signature,sendersPubSigningKey.encode('hex'))
print 'ECDSA verify passed'
except Exception, err:
print 'ECDSA verify failed', err
return
#verify passed
fromAddress = encodeAddress(sendersAddressVersion,sendersStream,ripe.digest())
print 'fromAddress:', fromAddress
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#Display timing data
printLock.acquire()
print 'Time spent processing this interesting broadcast:', time.time()- self.messageProcessingStartTime
printLock.release()
elif sendersAddressVersion == 1:
sendersStream, sendersStreamLength = decodeVarint(self.data[readPosition:readPosition+9])
if sendersStream <= 0:
return
readPosition += sendersStreamLength
sendersHash = self.data[readPosition:readPosition+20]
if sendersHash not in broadcastSendersForWhichImWatching:
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition += 20
nLength, nLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
if nLength < 1:
return
readPosition += nLengthLength
nString = self.data[readPosition:readPosition+nLength]
readPosition += nLength
eLength, eLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
if eLength < 1:
return
readPosition += eLengthLength
eString = self.data[readPosition:readPosition+eLength]
#We are now ready to hash the public key and verify that its hash matches the hash claimed in the message
readPosition += eLength
sha = hashlib.new('sha512')
sha.update(nString+eString)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
if ripe.digest() != sendersHash:
#The sender of this message lied.
return
readPositionAtBeginningOfMessageEncodingType = readPosition
messageEncodingType, messageEncodingTypeLength = decodeVarint(self.data[readPosition:readPosition+9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(self.data[readPosition:readPosition+9])
readPosition += messageLengthLength
message = self.data[readPosition:readPosition+messageLength]
readPosition += messageLength
signature = self.data[readPosition:readPosition+nLength]
sendersPubkey = rsa.PublicKey(convertStringToInt(nString),convertStringToInt(eString))
#print 'senders Pubkey', sendersPubkey
try:
rsa.verify(self.data[readPositionAtBeginningOfMessageEncodingType:readPositionAtBeginningOfMessageEncodingType+messageEncodingTypeLength+messageLengthLength+messageLength],signature,sendersPubkey)
print 'verify passed'
except Exception, err:
print 'verify failed', err
return
#verify passed
fromAddress = encodeAddress(sendersAddressVersion,sendersStream,ripe.digest())
print 'fromAddress:', fromAddress
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#We have received a msg message.
def recmsg(self):
self.messageProcessingStartTime = time.time()
#First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in msg message insufficient.'
return
readPosition = 32
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
if embeddedTime > int(time.time())+10800:
print 'The time in the msg message is too new. Ignoring it. Time:', embeddedTime
return
if embeddedTime < int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in the msg message is too old. Ignoring it. Time:', embeddedTime
return
readPosition += 4
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint(self.data[readPosition:readPosition+9])
if streamNumberAsClaimedByMsg != self.streamNumber:
print 'The stream number encoded in this msg (' + str(streamNumberAsClaimedByMsg) + ') message does not match the stream number on which it was received. Ignoring it.'
return
readPosition += streamNumberAsClaimedByMsgLength
self.inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if self.inventoryHash in inventory:
print 'We have already received this msg message. Ignoring.'
inventoryLock.release()
return
elif isInSqlInventory(self.inventoryHash):
print 'We have already received this msg message (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
#This msg message is valid. Let's let our peers know about it.
objectType = 'msg'
inventory[self.inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
self.broadcastinv(self.inventoryHash)
self.emit(SIGNAL("incrementNumberOfMessagesProcessed()"))
self.processmsg(readPosition) #When this function returns, we will have either successfully processed the message bound for us, ignored it because it isn't bound for us, or found problem with the message that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are based on test timings and you may change them at-will.
if self.payloadLength > 100000000: #Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 MB message: 3.7 seconds.
elif self.payloadLength > 10000000: #Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 10 MB message: 0.53 seconds. Actual length of time it takes in practice when processing a real message: 1.44 seconds.
elif self.payloadLength > 1000000: #Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 1 MB message: 0.18 seconds. Actual length of time it takes in practice when processing a real message: 0.30 seconds.
else: #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .6 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 KB message: 0.15 seconds. Actual length of time it takes in practice when processing a real message: 0.25 seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.messageProcessingStartTime)
if sleepTime > 0:
printLock.acquire()
print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
printLock.release()
time.sleep(sleepTime)
printLock.acquire()
print 'Total message processing time:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
#This section is for my RSA keys (version 1 addresses). If we don't have any version 1 addresses it will never run. This code will soon be removed.
initialDecryptionSuccessful = False
infile = cStringIO.StringIO(self.data[readPosition:self.payloadLength+24])
outfile = cStringIO.StringIO()
#print 'len(myRSAAddressHashes.items()):', len(myRSAAddressHashes.items())
for key, value in myRSAAddressHashes.items():
try:
decrypt_bigfile(infile, outfile, value)
#The initial decryption passed though there is a small chance that the message isn't actually for me. We'll need to check that the 20 zeros are present.
#print 'initial decryption successful using key', repr(key)
initialDecryptionSuccessful = True
printLock.acquire()
print 'Initial decryption passed'
printLock.release()
break
except Exception, err:
infile.seek(0)
#print 'Exception:', err
#print 'outfile len is:', len(outfile.getvalue()),'data is:', repr(outfile.getvalue())
#print 'Initial decryption failed using key', value
#decryption failed for this key. The message is for someone else (or for a different key of mine).
if initialDecryptionSuccessful and outfile.getvalue()[:20] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00': #this run of 0s allows the true message receiver to identify his message
#This is clearly a message bound for me.
outfile.seek(0)
data = outfile.getvalue()
readPosition = 20 #To start reading past the 20 zero bytes
messageVersion, messageVersionLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageVersionLength
if messageVersion == 1:
bitfieldBehavior = data[readPosition:readPosition+4]
readPosition += 4
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersAddressVersionNumber == 1:
readPosition += sendersAddressVersionNumberLength
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersStreamNumber == 0:
print 'sendersStreamNumber = 0. Ignoring message'
else:
readPosition += sendersStreamNumberLength
sendersNLength, sendersNLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersNLengthLength
sendersN = data[readPosition:readPosition+sendersNLength]
readPosition += sendersNLength
sendersELength, sendersELengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersELengthLength
sendersE = data[readPosition:readPosition+sendersELength]
readPosition += sendersELength
endOfThePublicKeyPosition = readPosition
messageEncodingType, messageEncodingTypeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageEncodingTypeLength
print 'Message Encoding Type:', messageEncodingType
messageLength, messageLengthLength = decodeVarint(data[readPosition:readPosition+10])
print 'message length:', messageLength
readPosition += messageLengthLength
message = data[readPosition:readPosition+messageLength]
#print 'First 150 characters of message:', repr(message[:150])
readPosition += messageLength
ackLength, ackLengthLength = decodeVarint(data[readPosition:readPosition+10])
#print 'ackLength:', ackLength
readPosition += ackLengthLength
ackData = data[readPosition:readPosition+ackLength]
readPosition += ackLength
payloadSigniture = data[readPosition:readPosition+sendersNLength] #We're using the length of the sender's n because it should match the signiture size.
sendersPubkey = rsa.PublicKey(convertStringToInt(sendersN),convertStringToInt(sendersE))
print 'sender\'s Pubkey', sendersPubkey
#Check the cryptographic signiture
verifyPassed = False
try:
rsa.verify(data[:-len(payloadSigniture)],payloadSigniture, sendersPubkey)
print 'verify passed'
verifyPassed = True
except Exception, err:
print 'verify failed', err
if verifyPassed:
#calculate the fromRipe.
sha = hashlib.new('sha512')
sha.update(sendersN+sendersE)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce in order to send out a pubkey message so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+data[20+messageVersionLength:endOfThePublicKeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
fromAddress = encodeAddress(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest())
if config.get('bitmessagesettings', 'blackwhitelist') == 'black': #If we are using a blacklist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM blacklist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
label, enabled = row
if enabled:
print 'Message ignored because address is in blacklist.'
blockMessage = True
else: #We're using a whitelist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
print 'Message ignored because address not in whitelist.'
blockMessage = True
for row in queryreturn: #It could be in the whitelist but disabled. Let's check.
label, enabled = row
if not enabled:
print 'Message ignored because address in whitelist but not enabled.'
blockMessage = True
if not blockMessage:
print 'fromAddress:', fromAddress
print 'First 150 characters of message:', repr(message[:150])
#Look up the destination address (my address) based on the destination ripe hash.
#I realize that I could have a data structure devoted to this task, or maintain an indexed table
#in the sql database, but I would prefer to minimize the number of data structures this program
#uses. Searching linearly through the user's short list of addresses doesn't take very long anyway.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if hash == key:
toAddress = addressInKeysFile
toLabel = config.get(addressInKeysFile, 'label')
if toLabel == '':
toLabel = addressInKeysFile
break
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
print 'within recmsg, self.inventoryHash is', repr(self.inventoryHash)
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#Now let us worry about the acknowledgement data
#We'll need to make sure that our client will properly process the ackData; if the packet is malformed, it might cause us to clear out self.data and an attacker could use that behavior to determine that we decoded this message.
ackDataValidThusFar = True
if len(ackData) < 24:
print 'The length of ackData is unreasonably short. Not sending ackData.'
ackDataValidThusFar = False
if ackData[0:4] != '\xe9\xbe\xb4\xd9':
print 'Ackdata magic bytes were wrong. Not sending ackData.'
ackDataValidThusFar = False
if ackDataValidThusFar:
ackDataPayloadLength, = unpack('>L',ackData[16:20])
if len(ackData)-24 != ackDataPayloadLength: #This ackData includes the protocol header which is not counted in the payload length.
print 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.'
ackDataValidThusFar = False
if ackDataValidThusFar:
print 'ackData is valid. Will process it.'
self.ackDataThatWeHaveYetToSend.append(ackData) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
else:
print 'This program cannot decode messages from addresses with versions higher than 1. Ignoring.'
statusbar = 'This program cannot decode messages from addresses with versions higher than 1. Ignoring it.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
else:
statusbar = 'Error: Cannot decode incoming msg versions higher than 1. Assuming the sender isn\' being silly, you should upgrade Bitmessage. Ignoring message.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
else:
printLock.acquire()
print 'Could not decrypt with any RSA keys if you have any.'
printLock.release()
infile.close()
outfile.close()
#A msg message has a valid time and POW and requires processing. The recmsg function calls this one.
def processmsg(self,readPosition):
initialDecryptionSuccessful = False
#Let's check whether this is a message acknowledgement bound for us.
if self.data[readPosition:24+self.payloadLength] in ackdataForWhichImWatching:
printLock.acquire()
print 'This msg IS an acknowledgement bound for me.'
printLock.release()
del ackdataForWhichImWatching[self.data[readPosition:24+self.payloadLength]]
t = ('ackreceived',self.data[readPosition:24+self.payloadLength])
sqlLock.acquire()
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE ackdata=?')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),self.data[readPosition:24+self.payloadLength],'Acknowledgement of the message received just now.')
return
else:
printLock.acquire()
print 'This was NOT an acknowledgement bound for me.' #Msg potential ack data:', repr(self.data[readPosition:24+self.payloadLength])
#print 'ackdataForWhichImWatching', ackdataForWhichImWatching
printLock.release()
#This is not an acknowledgement bound for me. See if it is a message bound for me by trying to decrypt it with my private keys.
for key, cryptorObject in myECAddressHashes.items():
try:
data = cryptorObject.decrypt(self.data[readPosition:self.payloadLength+24])
toRipe = key #This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data.
initialDecryptionSuccessful = True
print 'EC decryption successful using key associated with ripe hash:', key.encode('hex')
break
except Exception, err:
pass
#print 'cryptorObject.decrypt Exception:', err
if not initialDecryptionSuccessful:
#This is not a message bound for me.
printLock.acquire()
print 'Length of time program spent failing to decrypt this message:', time.time()- self.messageProcessingStartTime, 'seconds.'
printLock.release()
else:
#This is a message bound for me.
readPosition = 0
messageVersion, messageVersionLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageVersionLength
if messageVersion != 1:
print 'Cannot understand message versions other than one. Ignoring message.'
return
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += sendersAddressVersionNumberLength
if sendersAddressVersionNumber == 0:
print 'Cannot understand sendersAddressVersionNumber = 0. Ignoring message.'
return
if sendersAddressVersionNumber >= 3:
print 'Sender\'s address version number', sendersAddressVersionNumber, ' not yet supported. Ignoring message.'
return
if len(data) < 170:
print 'Length of the unencrypted data is unreasonably short. Sanity check failed. Ignoring message.'
return
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(data[readPosition:readPosition+10])
if sendersStreamNumber == 0:
print 'sender\'s stream number is 0. Ignoring message.'
return
readPosition += sendersStreamNumberLength
behaviorBitfield = data[readPosition:readPosition+4]
readPosition += 4
pubSigningKey = '\x04' + data[readPosition:readPosition+64]
readPosition += 64
pubEncryptionKey = '\x04' + data[readPosition:readPosition+64]
readPosition += 64
endOfThePublicKeyPosition = readPosition #needed for when we store the pubkey in our database of pubkeys for later use.
if toRipe != data[readPosition:readPosition+20]:
printLock.acquire()
print 'The original sender of this message did not send it to you. Someone is attempting a Surreptitious Forwarding Attack.'
print 'See: http://tools.ietf.org/html/draft-ietf-smime-sender-auth-00'
print 'your toRipe:', toRipe.encode('hex')
print 'embedded destination toRipe:', data[readPosition:readPosition+20].encode('hex')
printLock.release()
return
readPosition += 20
messageEncodingType, messageEncodingTypeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += messageLengthLength
message = data[readPosition:readPosition+messageLength]
#print 'First 150 characters of message:', repr(message[:150])
readPosition += messageLength
ackLength, ackLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += ackLengthLength
ackData = data[readPosition:readPosition+ackLength]
readPosition += ackLength
positionOfBottomOfAckData = readPosition #needed to mark the end of what is covered by the signature
signatureLength, signatureLengthLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += signatureLengthLength
signature = data[readPosition:readPosition+signatureLength]
try:
highlevelcrypto.verify(data[:positionOfBottomOfAckData],signature,pubSigningKey.encode('hex'))
print 'ECDSA verify passed'
except Exception, err:
print 'ECDSA verify failed', err
return
printLock.acquire()
print 'As a matter of intellectual curiosity, here is the Bitcoin address associated with the keys owned by the other person:', calculateBitcoinAddressFromPubkey(pubSigningKey), ' ..and here is the testnet address:',calculateTestnetAddressFromPubkey(pubSigningKey),'. The other person must take their private signing key from Bitmessage and import it into Bitcoin (or a service like Blockchain.info) for it to be of any use. Do not use this unless you know what you are doing.'
printLock.release()
#calculate the fromRipe.
sha = hashlib.new('sha512')
sha.update(pubSigningKey+pubEncryptionKey)
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce or time (which would let us send out a pubkey message) so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = (ripe.digest(),False,'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'+'\xFF\xFF\xFF\xFF'+data[messageVersionLength:endOfThePublicKeyPosition],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
fromAddress = encodeAddress(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest())
if config.get('bitmessagesettings', 'blackwhitelist') == 'black': #If we are using a blacklist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM blacklist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
label, enabled = row
if enabled:
print 'Message ignored because address is in blacklist.'
blockMessage = True
else: #We're using a whitelist
t = (fromAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT label, enabled FROM whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
print 'Message ignored because address not in whitelist.'
blockMessage = True
for row in queryreturn: #It could be in the whitelist but disabled. Let's check.
label, enabled = row
if not enabled:
print 'Message ignored because address in whitelist but not enabled.'
blockMessage = True
if not blockMessage:
print 'fromAddress:', fromAddress
print 'First 150 characters of message:', repr(message[:150])
#Look up the destination address (my address) based on the destination ripe hash.
#I realize that I could have a data structure devoted to this task, or maintain an indexed table
#in the sql database, but I would prefer to minimize the number of data structures this program
#uses. Searching linearly through the user's short list of addresses doesn't take very long anyway.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if hash == key:
toAddress = addressInKeysFile
toLabel = config.get(addressInKeysFile, 'label')
if toLabel == '':
toLabel = addressInKeysFile
if messageEncodingType == 2:
bodyPositionIndex = string.find(message,'\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
body = message[bodyPositionIndex+6:]
else:
subject = ''
body = message
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them.'
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
print 'within processmsg, self.inventoryHash is', self.inventoryHash.encode('hex')
if messageEncodingType <> 0:
sqlLock.acquire()
t = (self.inventoryHash,toAddress,fromAddress,subject,int(time.time()),body,'inbox')
sqlSubmitQueue.put('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.emit(SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.inventoryHash,toAddress,fromAddress,subject,body)
#Now let's consider sending the acknowledgement. We'll need to make sure that our client will properly process the ackData; if the packet is malformed, we could clear out self.data and an attacker could use that behavior to determine that we were capable of decoding this message.
ackDataValidThusFar = True
if len(ackData) < 24:
print 'The length of ackData is unreasonably short. Not sending ackData.'
ackDataValidThusFar = False
elif ackData[0:4] != '\xe9\xbe\xb4\xd9':
print 'Ackdata magic bytes were wrong. Not sending ackData.'
ackDataValidThusFar = False
if ackDataValidThusFar:
ackDataPayloadLength, = unpack('>L',ackData[16:20])
if len(ackData)-24 != ackDataPayloadLength:
print 'ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.'
ackDataValidThusFar = False
if ackDataValidThusFar:
print 'ackData is valid. Will process it.'
self.ackDataThatWeHaveYetToSend.append(ackData) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
#Display timing data
timeRequiredToAttemptToDecryptMessage = time.time()- self.messageProcessingStartTime
successfullyDecryptMessageTimings.append(timeRequiredToAttemptToDecryptMessage)
sum = 0
for item in successfullyDecryptMessageTimings:
sum += item
printLock.acquire()
print 'Time to decrypt this message successfully:', timeRequiredToAttemptToDecryptMessage
print 'Average time for all message decryption successes since startup:', sum / len(successfullyDecryptMessageTimings)
printLock.release()
#We have received a pubkey
def recpubkey(self):
self.pubkeyProcessingStartTime = time.time()
if self.payloadLength < 146: #sanity check
return
#We must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient():
print 'Proof of work in pubkey message insufficient.'
return
readPosition = 24 #for the message header
readPosition += 8 #for the nonce
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
if embeddedTime < int(time.time())-lengthOfTimeToHoldOnToAllPubkeys-86400: #If the pubkey is more than a month old then reject it. (the 86400 is included to give an extra day of wiggle-room. If the wiggle-room is actually of any use, everyone on the network will delete this pubkey from their database the next time the cleanerThread cleans anyway- except for the node that actually wants the pubkey.)
printLock.acquire()
print 'The embedded time in this pubkey message is too old. Ignoring.'
printLock.release()
return
if embeddedTime > int(time.time()) + 10800:
printLock.acquire()
print 'The embedded time in this pubkey message more than several hours in the future. This is irrational. Ignoring message.'
printLock.release()
return
readPosition += 4 #for the time
addressVersion, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
if self.streamNumber != streamNumber:
print 'stream number embedded in this pubkey doesn\'t match our stream number. Ignoring.'
return
inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if inventoryHash in inventory:
print 'We have already received this pubkey. Ignoring it.'
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
print 'We have already received this pubkey (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], int(time.time()))
inventoryLock.release()
self.broadcastinv(inventoryHash)
self.emit(SIGNAL("incrementNumberOfPubkeysProcessed()"))
self.processpubkey()
lengthOfTimeWeShouldUseToProcessThisMessage = .2
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time()- self.pubkeyProcessingStartTime)
if sleepTime > 0:
#printLock.acquire()
#print 'Timing attack mitigation: Sleeping for', sleepTime ,'seconds.'
#printLock.release()
time.sleep(sleepTime)
#printLock.acquire()
#print 'Total pubkey processing time:', time.time()- self.pubkeyProcessingStartTime, 'seconds.'
#printLock.release()
def processpubkey(self):
readPosition = 24 #for the message header
readPosition += 8 #for the nonce
embeddedTime, = unpack('>I',self.data[readPosition:readPosition+4])
readPosition += 4 #for the time
addressVersion, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
if addressVersion == 0:
print '(Within processpubkey) addressVersion of 0 doesn\'t make sense.'
return
if addressVersion >= 3:
printLock.acquire()
print 'This version of Bitmessage cannot handle version', addressVersion,'addresses.'
printLock.release()
return
if addressVersion == 2:
if self.payloadLength < 146: #sanity check. This is the minimum possible length.
print 'payloadLength less than 146. Sanity check failed.'
return
bitfieldBehaviors = self.data[readPosition:readPosition+4]
readPosition += 4
publicSigningKey = self.data[readPosition:readPosition+64]
#Is it possible for a public key to be invalid such that trying to encrypt or sign with it will cause an error? If it is, we should probably test these keys here.
readPosition += 64
publicEncryptionKey = self.data[readPosition:readPosition+64]
if len(publicEncryptionKey) < 64:
print 'publicEncryptionKey length less than 64. Sanity check failed.'
return
sha = hashlib.new('sha512')
sha.update('\x04'+publicSigningKey+'\x04'+publicEncryptionKey)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
ripe = ripeHasher.digest()
printLock.acquire()
print 'within recpubkey, addressVersion:', addressVersion, ', streamNumber:', streamNumber
print 'ripe', ripe.encode('hex')
print 'publicSigningKey in hex:', publicSigningKey.encode('hex')
print 'publicEncryptionKey in hex:', publicEncryptionKey.encode('hex')
printLock.release()
t = (ripe,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally='yes' ''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []: #if this pubkey is already in our database and if we have used it personally:
print 'We HAVE used this pubkey personally. Updating time.'
t = (ripe,True,self.data[24:24+self.payloadLength],embeddedTime,'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
else:
print 'We have NOT used this pubkey personally. Inserting in database.'
t = (ripe,True,self.data[24:24+self.payloadLength],embeddedTime,'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
#This code which deals with old RSA addresses will soon be removed.
elif addressVersion == 1:
nLength, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
nString = self.data[readPosition:readPosition+nLength]
readPosition += nLength
eLength, varintLength = decodeVarint(self.data[readPosition:readPosition+10])
readPosition += varintLength
eString = self.data[readPosition:readPosition+eLength]
readPosition += eLength
sha = hashlib.new('sha512')
sha.update(nString+eString)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
ripe = ripeHasher.digest()
print 'within recpubkey, addressVersion', addressVersion
print 'streamNumber', streamNumber
print 'ripe', repr(ripe)
print 'n=', convertStringToInt(nString)
print 'e=', convertStringToInt(eString)
t = (ripe,)
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally='yes' ''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []: #if this pubkey is already in our database and if we have used it personally:
print 'We HAVE used this pubkey personally. Updating time.'
t = (ripe,True,self.data[24:24+self.payloadLength],int(time.time()),'yes')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
else:
print 'We have NOT used this pubkey personally. Inserting in database.'
t = (ripe,True,self.data[24:24+self.payloadLength],int(time.time()),'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
printLock.acquire()
print 'added foreign pubkey into our database'
printLock.release()
workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
#We have received a getpubkey message
def recgetpubkey(self):
if not self.isProofOfWorkSufficient():
print 'Proof of work in getpubkey message insufficient.'
return
embeddedTime, = unpack('>I',self.data[32:36])
if embeddedTime > int(time.time())+10800:
print 'The time in this getpubkey message is too new. Ignoring it. Time:', embeddedTime
return
if embeddedTime < int(time.time())-maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in this getpubkey message is too old. Ignoring it. Time:', embeddedTime
return
addressVersionNumber, addressVersionLength = decodeVarint(self.data[36:42])
streamNumber, streamNumberLength = decodeVarint(self.data[36+addressVersionLength:42+addressVersionLength])
if streamNumber <> self.streamNumber:
print 'The streamNumber', streamNumber, 'doesn\'t match our stream number:', self.streamNumber
return
inventoryHash = calculateInventoryHash(self.data[24:self.payloadLength+24])
inventoryLock.acquire()
if inventoryHash in inventory:
print 'We have already received this getpubkey request. Ignoring it.'
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
print 'We have already received this getpubkey request (it is stored on disk in the SQL inventory). Ignoring it.'
inventoryLock.release()
return
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[inventoryHash] = 0
objectType = 'getpubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, self.data[24:self.payloadLength+24], embeddedTime)
inventoryLock.release()
#This getpubkey request is valid so far. Forward to peers.
self.broadcastinv(inventoryHash)
if addressVersionNumber == 0:
print 'The addressVersionNumber of the pubkey request is zero. That doesn\'t make any sense. Ignoring it.'
return
elif addressVersionNumber > 2:
print 'The addressVersionNumber of the pubkey request is too high. Can\'t understand. Ignoring it.'
return
print 'the hash requested in this getpubkey request is:', self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength].encode('hex')
sqlLock.acquire()
t = (self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength],int(time.time())-lengthOfTimeToHoldOnToAllPubkeys) #this prevents SQL injection
sqlSubmitQueue.put('''SELECT hash, transmitdata, time FROM pubkeys WHERE hash=? AND havecorrectnonce=1 AND time>?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn != []:
for row in queryreturn:
hash, payload, timeEncodedInPubkey = row
printLock.acquire()
print 'We have the requested pubkey stored in our database of pubkeys. Sending it.'
printLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, payload, timeEncodedInPubkey)#If the time embedded in this pubkey is more than 3 days old then this object isn't going to last very long in the inventory- the cleanerThread is going to come along and move it from the inventory in memory to the SQL inventory and then delete it from the SQL inventory. It should still find its way back to the original requestor if he is online however.
self.broadcastinv(inventoryHash)
else: #the pubkey is not in our database of pubkeys. Let's check if the requested key is ours (which would mean we should do the POW, put it in the pubkey table, and broadcast out the pubkey.)
if self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength] in myECAddressHashes: #if this address hash is one of mine
printLock.acquire()
print 'Found getpubkey-requested-hash in my list of EC hashes. Telling Worker thread to do the POW for a pubkey message and send it out.'
printLock.release()
myAddress = encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength])
workerQueue.put(('doPOWForMyV2Pubkey',myAddress))
#This code which deals with old RSA addresses will soon be removed.
"""elif self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength] in myRSAAddressHashes:
print 'Found getpubkey requested hash in my list of RSA hashes.'
payload = '\x00\x00\x00\x01' #bitfield of features supported by me (see the wiki).
payload += self.data[36:36+addressVersionLength+streamNumberLength]
#print int(config.get(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'n'))
nString = convertIntToString(int(config.get(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'n')))
eString = convertIntToString(config.getint(encodeAddress(addressVersionNumber,streamNumber,self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength]), 'e'))
payload += encodeVarint(len(nString))
payload += nString
payload += encodeVarint(len(eString))
payload += eString
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
t = (self.data[36+addressVersionLength+streamNumberLength:56+addressVersionLength+streamNumberLength],True,payload,int(time.time())+1209600) #after two weeks (1,209,600 seconds), we may remove our own pub key from our database. It will be regenerated and put back in the database if it is requested.
sqlLock.acquire()
#** pubkeys insert query not yet fixed! **
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, self.streamNumber, payload, int(time.time()))
self.broadcastinv(inventoryHash) """
else:
printLock.acquire()
print 'This getpubkey request is not for any of my keys.'
printLock.release()
#We have received an inv message
def recinv(self):
numberOfItemsInInv, lengthOfVarint = decodeVarint(self.data[24:34])
if numberOfItemsInInv == 1: #we'll just request this data from the person who advertised the object.
for i in range(numberOfItemsInInv):
if len(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]) == 32: #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
if self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)] in inventory:
printLock.acquire()
print 'Inventory (in memory) has inventory item already.'
printLock.release()
elif isInSqlInventory(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]):
print 'Inventory (SQL on disk) has inventory item already.'
else:
self.sendgetdata(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)])
else:
print 'inv message lists', numberOfItemsInInv, 'objects.'
for i in range(numberOfItemsInInv): #upon finishing dealing with an incoming message, the receiveDataThread will request a random object from the peer. This way if we get multiple inv messages from multiple peers which list mostly the same objects, we will make getdata requests for different random objects from the various peers.
if len(self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]) == 32: #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[self.data[24+lengthOfVarint+(32*i):56+lengthOfVarint+(32*i)]] = 0
#Send a getdata message to our peer to request the object with the given hash
def sendgetdata(self,hash):
print 'sending getdata to retrieve object with hash:', hash.encode('hex')
payload = '\x01' + hash
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getdata\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
#We have received a getdata request from our peer
def recgetdata(self):
value, lengthOfVarint = decodeVarint(self.data[24:34])
#print 'Number of items in getdata request:', value
try:
for i in xrange(value):
hash = self.data[24+lengthOfVarint+(i*32):56+lengthOfVarint+(i*32)]
printLock.acquire()
print 'received getdata request for item:', hash.encode('hex')
printLock.release()
#print 'inventory is', inventory
if hash in inventory:
objectType, streamNumber, payload, receivedTime = inventory[hash]
self.sendData(objectType,payload)
else:
t = (hash,)
sqlLock.acquire()
sqlSubmitQueue.put('''select objecttype, payload from inventory where hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
objectType, payload = row
self.sendData(objectType,payload)
else:
print 'Someone asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. That shouldn\'t have happened.'
except:
pass #someone is probably trying to cause a program error by, for example, making a request for 10 items but only including the hashes for 5.
#Our peer has requested (in a getdata message) that we send an object.
def sendData(self,objectType,payload):
if objectType == 'pubkey':
print 'sending pubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'pubkey\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'getpubkey':
print 'sending getpubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getpubkey\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'msg':
print 'sending msg'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'broadcast':
print 'sending broadcast'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'broadcast\x00\x00\x00'
headerData += pack('>L',len(payload)) #payload length. Note that we add an extra 8 for the nonce.
headerData += hashlib.sha512(payload).digest()[:4]
self.sock.send(headerData + payload)
elif objectType == 'getpubkey' or objectType == 'pubkeyrequest':
print 'sending getpubkey'
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'getpubkey\x00\x00\x00' #version command
headerData += pack('>L',len(payload)) #payload length
headerData += hashlib.sha512(payload).digest()[0:4]
self.sock.send(headerData + payload)
else:
sys.stderr.write('Error: sendData has been asked to send a strange objectType: %s\n' % str(objectType))
#Send an inv message with just one hash to all of our peers
def broadcastinv(self,hash):
printLock.acquire()
print 'broadcasting inv with hash:', hash.encode('hex')
printLock.release()
broadcastToSendDataQueues((self.streamNumber, 'sendinv', hash))
#We have received an addr message.
def recaddr(self):
listOfAddressDetailsToBroadcastToPeers = []
numberOfAddressesIncluded = 0
numberOfAddressesIncluded, lengthOfNumberOfAddresses = decodeVarint(self.data[24:29])
if verbose >= 1:
print 'addr message contains', numberOfAddressesIncluded, 'IP addresses.'
#print 'lengthOfNumberOfAddresses', lengthOfNumberOfAddresses
if numberOfAddressesIncluded > 1000:
return
needToWriteKnownNodesToDisk = False
for i in range(0,numberOfAddressesIncluded):
try:
if self.data[40+lengthOfNumberOfAddresses+(34*i):52+lengthOfNumberOfAddresses+(34*i)] != '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF':
printLock.acquire()
print 'Skipping IPv6 address.', repr(self.data[40+lengthOfNumberOfAddresses+(34*i):56+lengthOfNumberOfAddresses+(34*i)])
printLock.release()
continue
#print repr(self.data[6+lengthOfNumberOfAddresses+(34*i):18+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (to test for an IPv6 address). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrStream, = unpack('>I',self.data[28+lengthOfNumberOfAddresses+(34*i):32+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrServices, = unpack('>Q',self.data[32+lengthOfNumberOfAddresses+(34*i):40+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrServices). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
try:
recaddrPort, = unpack('>H',self.data[56+lengthOfNumberOfAddresses+(34*i):58+lengthOfNumberOfAddresses+(34*i)])
except Exception, err:
if verbose >= 2:
printLock.acquire()
sys.stderr.write('ERROR TRYING TO UNPACK recaddr (recaddrPort). Message: %s\n' % str(err))
printLock.release()
break #giving up on unpacking any more. We should still be connected however.
#print 'Within recaddr(): IP', recaddrIP, ', Port', recaddrPort, ', i', i
hostFromAddrMessage = socket.inet_ntoa(self.data[52+lengthOfNumberOfAddresses+(34*i):56+lengthOfNumberOfAddresses+(34*i)])
#print 'hostFromAddrMessage', hostFromAddrMessage
if hostFromAddrMessage == '127.0.0.1':
continue
timeSomeoneElseReceivedMessageFromThisNode, = unpack('>I',self.data[24+lengthOfNumberOfAddresses+(34*i):28+lengthOfNumberOfAddresses+(34*i)]) #This is the 'time' value in the received addr message.
if hostFromAddrMessage not in knownNodes[recaddrStream]:
if len(knownNodes[recaddrStream]) < 20000 and timeSomeoneElseReceivedMessageFromThisNode > (int(time.time())-10800) and timeSomeoneElseReceivedMessageFromThisNode < (int(time.time()) + 10800): #If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
knownNodes[recaddrStream][hostFromAddrMessage] = (recaddrPort, timeSomeoneElseReceivedMessageFromThisNode)
print 'added new node', hostFromAddrMessage, 'to knownNodes.'
needToWriteKnownNodesToDisk = True
hostDetails = (timeSomeoneElseReceivedMessageFromThisNode, recaddrStream, recaddrServices, hostFromAddrMessage, recaddrPort)
listOfAddressDetailsToBroadcastToPeers.append(hostDetails)
else:
PORT, timeLastReceivedMessageFromThisNode = knownNodes[recaddrStream][hostFromAddrMessage]#PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
if (timeLastReceivedMessageFromThisNode < timeSomeoneElseReceivedMessageFromThisNode) and (timeSomeoneElseReceivedMessageFromThisNode < int(time.time())):
knownNodes[recaddrStream][hostFromAddrMessage] = (PORT, timeSomeoneElseReceivedMessageFromThisNode)
if PORT != recaddrPort:
print 'Strange occurance: The port specified in an addr message', str(recaddrPort),'does not match the port',str(PORT),'that this program (or some other peer) used to connect to it',str(hostFromAddrMessage),'. Perhaps they changed their port or are using a strange NAT configuration.'
if needToWriteKnownNodesToDisk: #Runs if any nodes were new to us. Also, share those nodes with our peers.
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
self.broadcastaddr(listOfAddressDetailsToBroadcastToPeers)
print 'knownNodes currently has', len(knownNodes[recaddrStream]), 'nodes for this stream.'
#Function runs when we want to broadcast an addr message to all of our peers. Runs when we learn of nodes that we didn't previously know about and want to share them with our peers.
def broadcastaddr(self,listOfAddressDetailsToBroadcastToPeers):
numberOfAddressesInAddrMessage = len(listOfAddressDetailsToBroadcastToPeers)
payload = ''
for hostDetails in listOfAddressDetailsToBroadcastToPeers:
timeLastReceivedMessageFromThisNode, streamNumber, services, host, port = hostDetails
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',streamNumber)
payload += pack('>q',services) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(host)
payload += pack('>H',port)#remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
datatosend = '\xE9\xBE\xB4\xD9addr\x00\x00\x00\x00\x00\x00\x00\x00'
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
if verbose >= 2:
printLock.acquire()
print 'Broadcasting addr with', numberOfAddressesInAddrMessage, 'entries.'
printLock.release()
broadcastToSendDataQueues((self.streamNumber, 'sendaddr', datatosend))
#Send a big addr message to our peer
def sendaddr(self):
addrsInMyStream = {}
addrsInChildStreamLeft = {}
addrsInChildStreamRight = {}
#print 'knownNodes', knownNodes
#We are going to share a maximum number of 1000 addrs with our peer. 500 from this stream, 250 from the left child stream, and 250 from the right child stream.
if len(knownNodes[self.streamNumber]) > 0:
for i in range(500):
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber], 1)
addrsInMyStream[HOST] = knownNodes[self.streamNumber][HOST]
if len(knownNodes[self.streamNumber*2]) > 0:
for i in range(250):
random.seed()
HOST, = random.sample(knownNodes[self.streamNumber*2], 1)
addrsInChildStreamLeft[HOST] = knownNodes[self.streamNumber*2][HOST]
if len(knownNodes[(self.streamNumber*2)+1]) > 0:
for i in range(250):
random.seed()
HOST, = random.sample(knownNodes[(self.streamNumber*2)+1], 1)
addrsInChildStreamRight[HOST] = knownNodes[(self.streamNumber*2)+1][HOST]
numberOfAddressesInAddrMessage = 0
payload = ''
print 'addrsInMyStream.items()', addrsInMyStream.items()
for HOST, value in addrsInMyStream.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',self.streamNumber)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
for HOST, value in addrsInChildStreamLeft.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',self.streamNumber*2)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
for HOST, value in addrsInChildStreamRight.items():
PORT, timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > (int(time.time())- maximumAgeOfNodesThatIAdvertiseToOthers): #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage += 1
payload += pack('>I',timeLastReceivedMessageFromThisNode)
payload += pack('>I',(self.streamNumber*2)+1)
payload += pack('>q',1) #service bit flags offered by this node
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(HOST)
payload += pack('>H',PORT)#remote port
payload = encodeVarint(numberOfAddressesInAddrMessage) + payload
datatosend = '\xE9\xBE\xB4\xD9addr\x00\x00\x00\x00\x00\x00\x00\x00'
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
if verbose >= 2:
printLock.acquire()
print 'Sending addr with', numberOfAddressesInAddrMessage, 'entries.'
printLock.release()
self.sock.send(datatosend)
#We have received a version message
def recversion(self):
if self.payloadLength < 83:
#This version message is unreasonably short. Forget it.
return
elif not self.verackSent: #There is a potential exploit if we don't check to make sure that we have not already received and accepted a version message: An attacker could connect directly to us, send a msg message with the ackdata set to an invalid version message which would cause us to close the connection to the attacker thus proving that we were able to decode the message. Checking the connectionIsOrWasFullyEstablished variable would also suffice.
self.remoteProtocolVersion, = unpack('>L',self.data[24:28])
#print 'remoteProtocolVersion', self.remoteProtocolVersion
self.myExternalIP = socket.inet_ntoa(self.data[64:68])
#print 'myExternalIP', self.myExternalIP
self.remoteNodeIncomingPort, = unpack('>H',self.data[94:96])
#print 'remoteNodeIncomingPort', self.remoteNodeIncomingPort
#print 'self.data[96:104]', repr(self.data[96:104])
#print 'eightBytesOfRandomDataUsedToDetectConnectionsToSelf', repr(eightBytesOfRandomDataUsedToDetectConnectionsToSelf)
useragentLength, lengthOfUseragentVarint = decodeVarint(self.data[104:108])
readPosition = 104 + lengthOfUseragentVarint
useragent = self.data[readPosition:readPosition+useragentLength]
readPosition += useragentLength
numberOfStreamsInVersionMessage, lengthOfNumberOfStreamsInVersionMessage = decodeVarint(self.data[readPosition:])
readPosition += lengthOfNumberOfStreamsInVersionMessage
self.streamNumber, lengthOfRemoteStreamNumber = decodeVarint(self.data[readPosition:])
printLock.acquire()
print 'Remote node useragent:', useragent, ' stream number:', self.streamNumber
printLock.release()
#If this was an incoming connection, then the sendData thread doesn't know the stream. We have to set it.
if not self.initiatedConnection:
broadcastToSendDataQueues((0,'setStreamNumber',(self.HOST,self.streamNumber)))
if self.streamNumber != 1:
self.sock.close()
printLock.acquire()
print 'Closed connection to', self.HOST, 'because they are interested in stream', self.steamNumber,'.'
printLock.release()
self.data = ''
return
if self.data[96:104] == eightBytesOfRandomDataUsedToDetectConnectionsToSelf:
self.sock.close()
printLock.acquire()
print 'Closing connection to myself: ', self.HOST
printLock.release()
self.data = ''
return
knownNodes[self.streamNumber][self.HOST] = (self.remoteNodeIncomingPort, int(time.time()))
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
#I've commented out this code because it should be up to the newer node to decide whether their protocol version is incompatiable with the remote node's version.
'''if self.remoteProtocolVersion > 1:
print 'The remote node''s protocol version is too new for this program to understand. Disconnecting. It is:', self.remoteProtocolVersion
self.sock.close()
self.selfInitiatedConnectionList.remove(self)
else:'''
self.sendverack()
if self.initiatedConnection == False:
self.sendversion()
#Sends a version message
def sendversion(self):
global softwareVersion
payload = ''
payload += pack('>L',1) #protocol version.
payload += pack('>q',1) #bitflags of the services I offer.
payload += pack('>q',int(time.time()))
payload += pack('>q',1) #boolservices offered by the remote node. This data is ignored by the remote host because how could We know what Their services are without them telling us?
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(self.HOST)
payload += pack('>H',self.PORT)#remote IPv6 and port
payload += pack('>q',1) #bitflags of the services I offer.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L',2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
payload += pack('>H',config.getint('bitmessagesettings', 'port'))#my external IPv6 and port
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + softwareVersion + '/' #Length of userAgent must be less than 253.
payload += pack('>B',len(userAgent)) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload += userAgent
payload += encodeVarint(1) #The number of streams about which I care. PyBitmessage currently only supports 1.
payload += encodeVarint(self.streamNumber)
datatosend = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + 'version\x00\x00\x00\x00\x00' #version command
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
printLock.acquire()
print 'Sending version message'
printLock.release()
self.sock.send(datatosend)
#self.versionSent = 1
#Sends a verack message
def sendverack(self):
printLock.acquire()
print 'Sending verack'
printLock.release()
self.sock.sendall('\xE9\xBE\xB4\xD9\x76\x65\x72\x61\x63\x6B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
#cf 83 e1 35
self.verackSent = True
if self.verackReceived == True:
self.connectionFullyEstablished()
#Every connection to a peer has a sendDataThread (and also a receiveDataThread).
class sendDataThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.mailbox = Queue.Queue()
sendDataQueues.append(self.mailbox)
self.data = ''
def setup(self,sock,HOST,PORT,streamNumber,objectsOfWhichThisRemoteNodeIsAlreadyAware):
self.sock = sock
self.HOST = HOST
self.PORT = PORT
self.streamNumber = streamNumber
self.lastTimeISentData = int(time.time()) #If this value increases beyond five minutes ago, we'll send a pong message to keep the connection alive.
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
printLock.acquire()
print 'The streamNumber of this sendDataThread (ID:', id(self),') at setup() is', self.streamNumber
printLock.release()
def sendVersionMessage(self):
#Note that there is another copy of this version-sending code in the receiveData class which would need to be changed if you make changes here.
global softwareVersion
payload = ''
payload += pack('>L',1) #protocol version.
payload += pack('>q',1) #bitflags of the services I offer.
payload += pack('>q',int(time.time()))
payload += pack('>q',1) #boolservices of remote connection. How can I even know this for sure? This is probably ignored by the remote host.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + socket.inet_aton(self.HOST)
payload += pack('>H',self.PORT)#remote IPv6 and port
payload += pack('>q',1) #bitflags of the services I offer.
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L',2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
payload += pack('>H',config.getint('bitmessagesettings', 'port'))#my external IPv6 and port
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + softwareVersion + '/' #Length of userAgent must be less than 253.
payload += pack('>B',len(userAgent)) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload += userAgent
payload += encodeVarint(1) #The number of streams about which I care. PyBitmessage currently only supports 1 per connection.
payload += encodeVarint(self.streamNumber)
datatosend = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + 'version\x00\x00\x00\x00\x00' #version command
datatosend = datatosend + pack('>L',len(payload)) #payload length
datatosend = datatosend + hashlib.sha512(payload).digest()[0:4]
datatosend = datatosend + payload
printLock.acquire()
print 'Sending version packet: ', repr(datatosend)
printLock.release()
self.sock.send(datatosend)
self.versionSent = 1
def run(self):
while True:
deststream,command,data = self.mailbox.get()
#printLock.acquire()
#print 'sendDataThread, destream:', deststream, ', Command:', command, ', ID:',id(self), ', HOST:', self.HOST
#printLock.release()
if deststream == self.streamNumber or deststream == 0:
if command == 'shutdown':
if data == self.HOST or data == 'all':
printLock.acquire()
print 'sendDataThread thread (associated with', self.HOST,') ID:',id(self), 'shutting down now.'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'len of sendDataQueues', len(sendDataQueues)
printLock.release()
break
#When you receive an incoming connection, a sendDataThread is created even though you don't yet know what stream number the remote peer is interested in. They will tell you in a version message and if you too are interested in that stream then you will continue on with the connection and will set the streamNumber of this send data thread here:
elif command == 'setStreamNumber':
hostInMessage, specifiedStreamNumber = data
if hostInMessage == self.HOST:
printLock.acquire()
print 'setting the stream number in the sendData thread (ID:',id(self), ') to', specifiedStreamNumber
printLock.release()
self.streamNumber = specifiedStreamNumber
elif command == 'sendaddr':
try:
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time unless we have a long list of messages in our queue to send.
random.seed()
time.sleep(random.randrange(0, 10))
self.sock.sendall(data)
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.sendall failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
elif command == 'sendinv':
if data not in self.objectsOfWhichThisRemoteNodeIsAlreadyAware:
payload = '\x01' + data
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time
random.seed()
time.sleep(random.randrange(0, 10))
try:
self.sock.sendall(headerData + payload)
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.sendall failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
elif command == 'pong':
if self.lastTimeISentData < (int(time.time()) - 298):
#Send out a pong message to keep the connection alive.
printLock.acquire()
print 'Sending pong to', self.HOST, 'to keep connection alive.'
printLock.release()
try:
self.sock.sendall('\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
self.lastTimeISentData = int(time.time())
except:
print 'self.sock.send pong failed'
self.sock.close()
sendDataQueues.remove(self.mailbox)
print 'sendDataThread thread', self, 'ending now'
break
else:
printLock.acquire()
print 'sendDataThread ID:',id(self),'ignoring command', command,'because it is not in stream',deststream
printLock.release()
#Wen you want to command a sendDataThread to do something, like shutdown or send some data, this function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are responsible for putting their queue into (and out of) the sendDataQueues list.
def broadcastToSendDataQueues(data):
#print 'running broadcastToSendDataQueues'
for q in sendDataQueues:
q.put((data))
def flushInventory():
#Note that the singleCleanerThread clears out the inventory dictionary from time to time, although it only clears things that have been in the dictionary for a long time. This clears the inventory dictionary Now.
sqlLock.acquire()
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, receivedTime = storedValue
t = (hash,objectType,streamNumber,payload,receivedTime)
sqlSubmitQueue.put('''INSERT INTO inventory VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
del inventory[hash]
sqlLock.release()
def isInSqlInventory(hash):
t = (hash,)
sqlLock.acquire()
sqlSubmitQueue.put('''select hash from inventory where hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
return False
else:
return True
def convertIntToString(n):
a = __builtins__.hex(n)
if a[-1:] == 'L':
a = a[:-1]
if (len(a) % 2) == 0:
return a[2:].decode('hex')
else:
return ('0'+a[2:]).decode('hex')
def convertStringToInt(s):
return int(s.encode('hex'), 16)
def decodeWalletImportFormat(WIFstring):
fullString = arithmetic.changebase(WIFstring,58,256)
privkey = fullString[:-4]
if fullString[-4:] != hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]:
sys.stderr.write('Major problem! When trying to decode one of your private keys, the checksum failed. Here is the PRIVATE key: %s\n' % str(WIFstring))
return ""
else:
#checksum passed
if privkey[0] == '\x80':
return privkey[1:]
else:
sys.stderr.write('Major problem! When trying to decode one of your private keys, the checksum passed but the key doesn\'t begin with hex 80. Here is the PRIVATE key: %s\n' % str(WIFstring))
return ""
def reloadMyAddressHashes():
printLock.acquire()
print 'reloading keys from keys.dat file'
printLock.release()
myRSAAddressHashes.clear()
myECAddressHashes.clear()
#myPrivateKeys.clear()
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
if isEnabled:
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 2:
privEncryptionKey = decodeWalletImportFormat(config.get(addressInKeysFile, 'privencryptionkey')).encode('hex') #returns a simple 32 bytes of information encoded in 64 Hex characters, or null if there was an error
if len(privEncryptionKey) == 64:#It is 32 bytes encoded as 64 hex characters
myECAddressHashes[hash] = highlevelcrypto.makeCryptor(privEncryptionKey)
elif addressVersionNumber == 1:
n = config.getint(addressInKeysFile, 'n')
e = config.getint(addressInKeysFile, 'e')
d = config.getint(addressInKeysFile, 'd')
p = config.getint(addressInKeysFile, 'p')
q = config.getint(addressInKeysFile, 'q')
myRSAAddressHashes[hash] = rsa.PrivateKey(n,e,d,p,q)
#This function expects that pubkey begin with \x04
def calculateBitcoinAddressFromPubkey(pubkey):
if len(pubkey)!= 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey),'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x00' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress,256,58)
return "1"*numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def calculateTestnetAddressFromPubkey(pubkey):
if len(pubkey)!= 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey),'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x6F' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress,256,58)
return "1"*numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
#This thread exists because SQLITE3 is so un-threadsafe that we must submit queries to it and it puts results back in a different queue. They won't let us just use locks.
class sqlThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
self.conn = sqlite3.connect(appdata + 'messages.dat' )
self.conn.text_factory = str
self.cur = self.conn.cursor()
try:
self.cur.execute( '''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text, received text, message text, folder text, UNIQUE(msgid) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, lastactiontime integer, status text, pubkeyretrynumber integer, msgretrynumber integer, folder text)''' )
self.cur.execute( '''CREATE TABLE subscriptions (label text, address text, enabled bool)''' )
self.cur.execute( '''CREATE TABLE addressbook (label text, address text)''' )
self.cur.execute( '''CREATE TABLE blacklist (label text, address text, enabled bool)''' )
self.cur.execute( '''CREATE TABLE whitelist (label text, address text, enabled bool)''' )
#Explanation of what is in the pubkeys table:
# The hash is the RIPEMD160 hash that is encoded in the Bitmessage address.
# If you or someone else did the POW for this pubkey, then havecorrectnonce will be true. If you received the pubkey in a msg message then havecorrectnonce will be false. You won't have the correct nonce and won't be able to send the message to peers if they request the pubkey.
# transmitdata is literally the data that was included in the Bitmessage pubkey message when it arrived, except for the 24 byte protocol header- ie, it starts with the POW nonce.
# time is the time that the pubkey was broadcast on the network same as with every other type of Bitmessage object.
# usedpersonally is set to "yes" if we have used the key personally. This keeps us from deleting it because we may want to reply to a message in the future. This field is not a bool because we may need more flexability in the future and it doesn't take up much more space anyway.
self.cur.execute( '''CREATE TABLE pubkeys (hash blob, havecorrectnonce bool, transmitdata blob, time blob, usedpersonally text, UNIQUE(hash, havecorrectnonce) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE)''' )
self.cur.execute( '''CREATE TABLE knownnodes (timelastseen int, stream int, services blob, host blob, port blob, UNIQUE(host, stream, port) ON CONFLICT REPLACE)''' ) #This table isn't used in the program yet but I have a feeling that we'll need it.
self.cur.execute( '''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx',1)''')
self.conn.commit()
print 'Created messages database file'
except Exception, err:
if str(err) == 'table inbox already exists':
print 'Database file already exists.'
else:
sys.stderr.write('ERROR trying to create database file (message.dat). Error message: %s\n' % str(err))
sys.exit()
#People running earlier versions of PyBitmessage do not have the usedpersonally field in their pubkeys table. Let's add it.
if config.getint('bitmessagesettings','settingsversion') == 2:
item = '''ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' '''
parameters = ''
self.cur.execute(item, parameters)
self.conn.commit()
config.set('bitmessagesettings','settingsversion','3')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
try:
testpayload = '\x00\x00'
t = ('1234','True',testpayload,'12345678','no')
self.cur.execute( '''INSERT INTO pubkeys VALUES(?,?,?,?,?)''',t)
self.conn.commit()
self.cur.execute('''SELECT transmitdata FROM pubkeys WHERE hash='1234' ''')
queryreturn = self.cur.fetchall()
for row in queryreturn:
transmitdata, = row
self.cur.execute('''DELETE FROM pubkeys WHERE hash='1234' ''')
self.conn.commit()
if transmitdata == '':
sys.stderr.write('Problem: The version of SQLite you have cannot store Null values. Please download and install the latest revision of your version of Python (for example, the latest Python 2.7 revision) and try again.\n')
sys.stderr.write('PyBitmessage will now exist very abruptly. You may now see threading errors related to this abrupt exit but the problem you need to solve is related to SQLite.\n\n')
sys.exit()
except Exception, err:
print err
while True:
item = sqlSubmitQueue.get()
parameters = sqlSubmitQueue.get()
#print 'item', item
#print 'parameters', parameters
self.cur.execute(item, parameters)
sqlReturnQueue.put(self.cur.fetchall())
sqlSubmitQueue.task_done()
self.conn.commit()
'''The singleCleaner class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy.
It cleans these data structures in memory:
inventory (moves data to the on-disk sql database)
It cleans these tables on the disk:
inventory (clears data more than 2 days and 12 hours old)
pubkeys (clears pubkeys older than 4 weeks old which we have not used personally)
It resends messages when there has been no response:
resends getpubkey messages in two days (then 4 days, then 8 days, etc...)
resends msg messages in two days (then 4 days, then 8 days, etc...)
'''
class singleCleaner(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
timeWeLastClearedInventoryAndPubkeysTables = 0
while True:
time.sleep(300)
sqlLock.acquire()
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, receivedTime = storedValue
if int(time.time())- 3600 > receivedTime:
t = (hash,objectType,streamNumber,payload,receivedTime)
sqlSubmitQueue.put('''INSERT INTO inventory VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
del inventory[hash]
sqlLock.release()
broadcastToSendDataQueues((0, 'pong', 'no data')) #commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
#inventory (moves data from the inventory data structure to the on-disk sql database)
sqlLock.acquire()
#inventory (clears data more than 2 days and 12 hours old)
t = (int(time.time())-lengthOfTimeToLeaveObjectsInInventory,)
sqlSubmitQueue.put('''DELETE FROM inventory WHERE receivedtime<?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#pubkeys
t = (int(time.time())-lengthOfTimeToHoldOnToAllPubkeys,)
sqlSubmitQueue.put('''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
t = ()
sqlSubmitQueue.put('''select toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber FROM sent WHERE ((status='findingpubkey' OR status='sentmessage') AND folder='sent') ''') #If the message's folder='trash' then we'll ignore it.
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber = row
if status == 'findingpubkey':
if int(time.time()) - lastactiontime > (maximumAgeOfAnObjectThatIAmWillingToAccept * (2 ** (pubkeyretrynumber))):
print 'It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.'
try:
del neededPubkeys[toripe] #We need to take this entry out of the neededPubkeys structure because the workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
except:
pass
workerQueue.put(('sendmessage',toaddress))
t = (int(time.time()),pubkeyretrynumber+1,toripe)
sqlSubmitQueue.put('''UPDATE sent SET lastactiontime=?, pubkeyretrynumber=? WHERE toripe=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),toripe,'Public key requested again. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
else:# status == sentmessage
if int(time.time()) - lastactiontime > (maximumAgeOfAnObjectThatIAmWillingToAccept * (2 ** (msgretrynumber))):
print 'It has been a long time and we haven\'t heard an acknowledgement to our msg. Sending again.'
t = (int(time.time()),msgretrynumber+1,'findingpubkey',ackdata)
sqlSubmitQueue.put('''UPDATE sent SET lastactiontime=?, msgretrynumber=?, status=? WHERE ackdata=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Message sent again because the acknowledgement was never received. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
workerQueue.put(('sendmessage',toaddress))
sqlLock.release()
#Clear the status bar in case a message has been sitting there for a while.
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"")
#This thread, of which there is only one, does the heavy lifting: calculating POWs.
class singleWorker(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def run(self):
sqlLock.acquire()
sqlSubmitQueue.put('''SELECT toripe FROM sent WHERE (status=? AND folder='sent')''')
sqlSubmitQueue.put(('findingpubkey',))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toripe, = row
neededPubkeys[toripe] = 0
self.sendBroadcast() #just in case there are any proof of work tasks for Broadcasts that have yet to be sent.
#Now let us see if there are any proofs of work for msg messages that we have yet to complete..
sqlLock.acquire()
t = ('doingpow',)
sqlSubmitQueue.put('SELECT toripe FROM sent WHERE status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toripe, = row
self.sendMsg(toripe)
while True:
command, data = workerQueue.get()
#statusbar = 'The singleWorker thread is working on work.'
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
if command == 'sendmessage':
toAddress = data
toStatus,toAddressVersionNumber,toStreamNumber,toRipe = decodeAddress(toAddress)
#print 'message type', type(message)
#print repr(message.toUtf8())
#print str(message.toUtf8())
sqlLock.acquire()
sqlSubmitQueue.put('SELECT * FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
#print 'queryreturn', queryreturn
if queryreturn == []:
#We'll need to request the pub key because we don't have it.
if not toRipe in neededPubkeys:
neededPubkeys[toRipe] = 0
print 'requesting pubkey:', toRipe.encode('hex')
self.requestPubKey(toAddressVersionNumber,toStreamNumber,toRipe)
else:
print 'We have already requested this pubkey (the ripe hash is in neededPubkeys). We will re-request again soon.'
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),toRipe,'Public key was requested earlier. Receiver must be offline. Will retry.')
else:
print 'We already have the necessary public key.'
self.sendMsg(toRipe) #by calling this function, we are asserting that we already have the pubkey for toRipe
elif command == 'sendbroadcast':
print 'Within WorkerThread, processing sendbroadcast command.'
fromAddress,subject,message = data
self.sendBroadcast()
elif command == 'doPOWForMyV2Pubkey':
self.doPOWForMyV2Pubkey(data)
elif command == 'newpubkey':
toAddressVersion,toStreamNumber,toRipe = data
if toRipe in neededPubkeys:
print 'We have been awaiting the arrival of this pubkey.'
del neededPubkeys[toRipe]
self.sendMsg(toRipe)
else:
print 'We don\'t need this pub key. We didn\'t ask for it. Pubkey hash:', toRipe.encode('hex')
workerQueue.task_done()
def doPOWForMyV2Pubkey(self,myAddress): #This function also broadcasts out the pubkey message once it is done with the POW
status,addressVersionNumber,streamNumber,hash = decodeAddress(myAddress)
embeddedTime = int(time.time())+random.randrange(-300, 300) #the current time plus or minus five minutes
payload = pack('>I',(embeddedTime))
payload += encodeVarint(2) #Address version number
payload += encodeVarint(streamNumber)
payload += '\x00\x00\x00\x01' #bitfield of features supported by me (see the wiki).
try:
privSigningKeyBase58 = config.get(myAddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(myAddress, 'privencryptionkey')
except Exception, err:
printLock.acquire()
sys.stderr.write('Error within doPOWForMyV2Pubkey. Could not read the keys from the keys.dat file for a requested address. %s\n' % err)
printLock.release()
return
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload += pubSigningKey[1:]
payload += pubEncryptionKey[1:]
#Do the POW for this pubkey message
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
t = (hash,True,payload,embeddedTime,'no')
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey'
inventory[inventoryHash] = (objectType, streamNumber, payload, embeddedTime)
printLock.acquire()
print 'broadcasting inv with hash:', inventoryHash.encode('hex')
printLock.release()
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),"")
def sendBroadcast(self):
sqlLock.acquire()
t = ('broadcastpending',)
sqlSubmitQueue.put('SELECT fromaddress, subject, message, ackdata FROM sent WHERE status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
fromaddress, subject, body, ackdata = row
status,addressVersionNumber,streamNumber,ripe = decodeAddress(fromaddress)
if addressVersionNumber == 2:
#We need to convert our private keys to public keys in order to include them.
privSigningKeyBase58 = config.get(fromaddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(fromaddress, 'privencryptionkey')
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex') #At this time these pubkeys are 65 bytes long because they include the encoding byte which we won't be sending in the broadcast message.
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload = pack('>I',(int(time.time())+random.randrange(-300, 300)))#the current time plus or minus five minutes
payload += encodeVarint(1) #broadcast version
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += '\x00\x00\x00\x01' #behavior bitfield
payload += pubSigningKey[1:]
payload += pubEncryptionKey[1:]
payload += ripe
payload += '\x02' #message encoding type
payload += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding.
payload += 'Subject:' + subject + '\n' + 'Body:' + body
signature = highlevelcrypto.sign(payload,privSigningKeyHex)
payload += encodeVarint(len(signature))
payload += signature
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For broadcast message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (within sendBroadcast function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Broadcast sent at '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
sqlLock.acquire()
t = ('broadcastsent',int(time.time()),fromaddress, subject, body,'broadcastpending')
sqlSubmitQueue.put('UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
elif addressVersionNumber == 1: #This whole section can be taken out soon because we aren't supporting v1 addresses for much longer.
messageToTransmit = '\x02' #message encoding type
messageToTransmit += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding.
messageToTransmit += 'Subject:' + subject + '\n' + 'Body:' + body
#We need the all the integers for our private key in order to sign our message, and we need our public key to send with the message.
n = config.getint(fromaddress, 'n')
e = config.getint(fromaddress, 'e')
d = config.getint(fromaddress, 'd')
p = config.getint(fromaddress, 'p')
q = config.getint(fromaddress, 'q')
nString = convertIntToString(n)
eString = convertIntToString(e)
#myPubkey = rsa.PublicKey(n,e)
myPrivatekey = rsa.PrivateKey(n,e,d,p,q)
#The payload of the broadcast message starts with a POW, but that will be added later.
payload = pack('>I',(int(time.time())))
payload += encodeVarint(1) #broadcast version
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += ripe
payload += encodeVarint(len(nString))
payload += nString
payload += encodeVarint(len(eString))
payload += eString
payload += messageToTransmit
signature = rsa.sign(messageToTransmit,myPrivatekey,'SHA-512')
#print 'signature', signature.encode('hex')
payload += signature
#print 'nString', repr(nString)
#print 'eString', repr(eString)
nonce = 0
trialValue = 99999999999999999999
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For broadcast message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (within sendBroadcast function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Broadcast sent at '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
sqlLock.acquire()
t = ('broadcastsent',int(time.time()),fromaddress, subject, body,'broadcastpending')
sqlSubmitQueue.put('UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
else:
printLock.acquire()
print 'In the singleWorker thread, the sendBroadcast function doesn\'t understand the address version'
printLock.release()
def sendMsg(self,toRipe):
sqlLock.acquire()
t = ('doingpow','findingpubkey',toRipe)
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE status=? AND toripe=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
t = ('doingpow',toRipe)
sqlSubmitQueue.put('SELECT toaddress, fromaddress, subject, message, ackdata FROM sent WHERE status=? AND toripe=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
toaddress, fromaddress, subject, message, ackdata = row
ackdataForWhichImWatching[ackdata] = 0
toStatus,toAddressVersionNumber,toStreamNumber,toHash = decodeAddress(toaddress)
fromStatus,fromAddressVersionNumber,fromStreamNumber,fromHash = decodeAddress(fromaddress)
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Doing work necessary to send the message.')
printLock.acquire()
print 'Found a message in our database that needs to be sent with this pubkey.'
print 'First 150 characters of message:', message[:150]
printLock.release()
embeddedTime = pack('>I',(int(time.time())+random.randrange(-300, 300)))#the current time plus or minus five minutes. We will use this time both for our message and for the ackdata packed within our message.
if fromAddressVersionNumber == 2:
payload = '\x01' #Message version.
payload += encodeVarint(fromAddressVersionNumber)
payload += encodeVarint(fromStreamNumber)
payload += '\x00\x00\x00\x01' #Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
#We need to convert our private keys to public keys in order to include them.
privSigningKeyBase58 = config.get(fromaddress, 'privsigningkey')
privEncryptionKeyBase58 = config.get(fromaddress, 'privencryptionkey')
privSigningKeyHex = decodeWalletImportFormat(privSigningKeyBase58).encode('hex')
privEncryptionKeyHex = decodeWalletImportFormat(privEncryptionKeyBase58).encode('hex')
pubSigningKey = highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
pubEncryptionKey = highlevelcrypto.privToPub(privEncryptionKeyHex).decode('hex')
payload += pubSigningKey[1:] #The \x04 on the beginning of the public keys are not sent. This way there is only one acceptable way to encode and send a public key.
payload += pubEncryptionKey[1:]
payload += toHash #This hash will be checked by the receiver of the message to verify that toHash belongs to them. This prevents a Surreptitious Forwarding Attack.
payload += '\x02' #Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
messageToTransmit = 'Subject:' + subject + '\n' + 'Body:' + message
payload += encodeVarint(len(messageToTransmit))
payload += messageToTransmit
fullAckPayload = self.generateFullAckMessage(ackdata,toStreamNumber,embeddedTime)#The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
payload += encodeVarint(len(fullAckPayload))
payload += fullAckPayload
signature = highlevelcrypto.sign(payload,privSigningKeyHex)
payload += encodeVarint(len(signature))
payload += signature
elif fromAddressVersionNumber == 1: #This code is for old version 1 (RSA) addresses. It will soon be removed.
payload = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' #this run of nulls allows the true message receiver to identify his message
payload += '\x01' #Message version.
payload += '\x00\x00\x00\x01'
payload += encodeVarint(fromAddressVersionNumber)
payload += encodeVarint(fromStreamNumber)
try:
sendersN = convertIntToString(config.getint(fromaddress, 'n'))
except:
printLock.acquire()
print 'Error: Could not find', fromaddress, 'in our keys.dat file. You must have deleted it. Aborting the send.'
printLock.release()
return
payload += encodeVarint(len(sendersN))
payload += sendersN
sendersE = convertIntToString(config.getint(fromaddress, 'e'))
payload += encodeVarint(len(sendersE))
payload += sendersE
payload += '\x02' #Type 2 is simple UTF-8 message encoding.
messageToTransmit = 'Subject:' + subject + '\n' + 'Body:' + message
payload += encodeVarint(len(messageToTransmit))
payload += messageToTransmit
#Later, if anyone impliments clients that don't send the ack_data, then we should probably check here to make sure that the receiver will make use of this ack_data and not attach it if not.
fullAckPayload = self.generateFullAckMessage(ackdata,toStreamNumber,embeddedTime)
payload += encodeVarint(len(fullAckPayload))
payload += fullAckPayload
sendersPrivKey = rsa.PrivateKey(config.getint(fromaddress, 'n'),config.getint(fromaddress, 'e'),config.getint(fromaddress, 'd'),config.getint(fromaddress, 'p'),config.getint(fromaddress, 'q'))
payload += rsa.sign(payload,sendersPrivKey,'SHA-512')
#We have assembled the data that will be encrypted. Now let us fetch the recipient's public key out of our database and do the encryption.
if toAddressVersionNumber == 2:
sqlLock.acquire()
sqlSubmitQueue.put('SELECT transmitdata FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
pubkeyPayload, = row
#The pubkey is stored the way we originally received it which means that we need to read beyond things like the nonce and time to get to the public keys.
readPosition = 8 #to bypass the nonce
readPosition += 4 #to bypass the embedded time
readPosition += 1 #to bypass the address version whose length is definitely 1
streamNumber, streamNumberLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += streamNumberLength
behaviorBitfield = pubkeyPayload[readPosition:readPosition+4]
readPosition += 4 #to bypass the bitfield of behaviors
#pubSigningKeyBase256 = pubkeyPayload[readPosition:readPosition+64] #We don't use this key for anything here.
readPosition += 64
pubEncryptionKeyBase256 = pubkeyPayload[readPosition:readPosition+64]
readPosition += 64
encrypted = highlevelcrypto.encrypt(payload,"04"+pubEncryptionKeyBase256.encode('hex'))
elif toAddressVersionNumber == 1:
sqlLock.acquire()
sqlSubmitQueue.put('SELECT transmitdata FROM pubkeys WHERE hash=?')
sqlSubmitQueue.put((toRipe,))
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
pubkeyPayload, = row
readPosition = 8 #to bypass the nonce
behaviorBitfield = pubkeyPayload[8:12]
readPosition += 4 #to bypass the bitfield of behaviors
addressVersion, addressVersionLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += addressVersionLength
streamNumber, streamNumberLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += streamNumberLength
nLength, nLengthLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += nLengthLength
n = convertStringToInt(pubkeyPayload[readPosition:readPosition+nLength])
readPosition += nLength
eLength, eLengthLength = decodeVarint(pubkeyPayload[readPosition:readPosition+10])
readPosition += eLengthLength
e = convertStringToInt(pubkeyPayload[readPosition:readPosition+eLength])
receiversPubkey = rsa.PublicKey(n,e)
infile = cStringIO.StringIO(payload)
outfile = cStringIO.StringIO()
#print 'Encrypting using public key:', receiversPubkey
encrypt_bigfile(infile,outfile,receiversPubkey)
encrypted = outfile.getvalue()
infile.close()
outfile.close()
nonce = 0
trialValue = 99999999999999999999
encodedStreamNumber = encodeVarint(toStreamNumber)
#We are now dropping the unencrypted data in payload since it has already been encrypted and replacing it with the encrypted payload that we will send out.
payload = embeddedTime + encodedStreamNumber + encrypted
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
print '(For msg message) Doing proof of work. Target:', target
powStartTime = time.time()
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
print '(For msg message) Found proof of work', trialValue, 'Nonce:', nonce
print 'POW took', int(time.time()-powStartTime), 'seconds.', nonce/(time.time()-powStartTime), 'nonce trials per second.'
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'msg'
inventory[inventoryHash] = (objectType, toStreamNumber, payload, int(time.time()))
self.emit(SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"),ackdata,'Message sent. Waiting on acknowledgement. Sent on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
print 'sending inv (within sendmsg function)'
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
#Update the status of the message in the 'sent' table to have a 'sent' status
sqlLock.acquire()
t = ('sentmessage',toaddress, fromaddress, subject, message,'doingpow')
sqlSubmitQueue.put('UPDATE sent SET status=? WHERE toaddress=? AND fromaddress=? AND subject=? AND message=? AND status=?')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
t = (toRipe,)
sqlSubmitQueue.put('''UPDATE pubkeys SET usedpersonally='yes' WHERE hash=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
def requestPubKey(self,addressVersionNumber,streamNumber,ripe):
payload = pack('>I',int(time.time()))
payload += encodeVarint(addressVersionNumber)
payload += encodeVarint(streamNumber)
payload += ripe
printLock.acquire()
print 'making request for pubkey with ripe:', ripe.encode('hex')
printLock.release()
nonce = 0
trialValue = 99999999999999999999
#print 'trial value', trialValue
statusbar = 'Doing the computations necessary to request the recipient\'s public key.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),ripe,'Doing work necessary to request public key.')
print 'Doing proof-of-work necessary to send getpubkey message.'
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
printLock.acquire()
print 'Found proof of work', trialValue, 'Nonce:', nonce
printLock.release()
payload = pack('>Q',nonce) + payload
inventoryHash = calculateInventoryHash(payload)
objectType = 'getpubkey'
inventory[inventoryHash] = (objectType, streamNumber, payload, int(time.time()))
print 'sending inv (for the getpubkey message)'
#payload = '\x01' + pack('>H',objectType) + hash
broadcastToSendDataQueues((streamNumber, 'sendinv', inventoryHash))
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Broacasting the public key request. This program will auto-retry if they are offline.')
self.emit(SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"),ripe,'Sending public key request. Waiting for reply. Requested at ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
def generateFullAckMessage(self,ackdata,toStreamNumber,embeddedTime):
nonce = 0
trialValue = 99999999999999999999
encodedStreamNumber = encodeVarint(toStreamNumber)
payload = embeddedTime + encodedStreamNumber + ackdata
target = 2**64 / ((len(payload)+payloadLengthExtraBytes+8) * averageProofOfWorkNonceTrialsPerByte)
printLock.acquire()
print '(For ack message) Doing proof of work...'
printLock.release()
powStartTime = time.time()
initialHash = hashlib.sha512(payload).digest()
while trialValue > target:
nonce += 1
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
printLock.acquire()
print '(For ack message) Found proof of work', trialValue, 'Nonce:', nonce
print 'POW took', int(time.time()-powStartTime), 'seconds.', nonce/(time.time()-powStartTime), 'nonce trials per second.'
printLock.release()
payload = pack('>Q',nonce) + payload
headerData = '\xe9\xbe\xb4\xd9' #magic bits, slighly different from Bitcoin's magic bits.
headerData += 'msg\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L',len(payload))
headerData += hashlib.sha512(payload).digest()[:4]
return headerData + payload
class addressGenerator(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
def setup(self,addressVersionNumber,streamNumber,label="(no label)",numberOfAddressesToMake=1,deterministicPassphrase="",eighteenByteRipe=False):
self.addressVersionNumber = addressVersionNumber
self.streamNumber = streamNumber
self.label = label
self.numberOfAddressesToMake = numberOfAddressesToMake
self.deterministicPassphrase = deterministicPassphrase
self.eighteenByteRipe = eighteenByteRipe
def run(self):
if self.addressVersionNumber == 2:
if self.deterministicPassphrase == "":
statusbar = 'Generating one new address'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that starts with either \x00 or \x00\x00. Then when we pack them into a Bitmessage address,
#we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time.time()
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
potentialPrivSigningKey = OpenSSL.rand(32)
potentialPubSigningKey = self.pointMult(potentialPrivSigningKey)
while True:
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
potentialPrivEncryptionKey = OpenSSL.rand(32)
potentialPubEncryptionKey = self.pointMult(potentialPrivEncryptionKey)
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(potentialPubSigningKey+potentialPubEncryptionKey)
ripe.update(sha.digest())
#print 'potential ripe.digest', ripe.digest().encode('hex')
if self.eighteenByteRipe:
if ripe.digest()[:2] == '\x00\x00':
break
else:
if ripe.digest()[:1] == '\x00':
break
print 'Generated address with ripe digest:', ripe.digest().encode('hex')
print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix/(time.time()-startTime),'addresses per second before finding one with the correct ripe-prefix.'
if ripe.digest()[:2] == '\x00\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[2:])
elif ripe.digest()[:1] == '\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[1:])
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = '\x80'+potentialPrivSigningKey
checksum = hashlib.sha256(hashlib.sha256(privSigningKey).digest()).digest()[0:4]
privSigningKeyWIF = arithmetic.changebase(privSigningKey + checksum,256,58)
#print 'privSigningKeyWIF',privSigningKeyWIF
privEncryptionKey = '\x80'+potentialPrivEncryptionKey
checksum = hashlib.sha256(hashlib.sha256(privEncryptionKey).digest()).digest()[0:4]
privEncryptionKeyWIF = arithmetic.changebase(privEncryptionKey + checksum,256,58)
#print 'privEncryptionKeyWIF',privEncryptionKeyWIF
config.add_section(address)
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'privSigningKey',privSigningKeyWIF)
config.set(address,'privEncryptionKey',privEncryptionKeyWIF)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address. Doing work necessary to broadcast it...')
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
reloadMyAddressHashes()
workerQueue.put(('doPOWForMyV2Pubkey',address))
else: #There is something in the deterministicPassphrase variable thus we are going to do this deterministically.
statusbar = 'Generating '+str(self.numberOfAddressesToMake) + ' new addresses.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
signingKeyNonce = 0
encryptionKeyNonce = 1
for i in range(self.numberOfAddressesToMake):
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that has a RIPEMD hash that starts with either \x00 or \x00\x00. Then when we pack them
#into a Bitmessage address, we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time.time()
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
while True:
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
potentialPrivSigningKey = hashlib.sha512(self.deterministicPassphrase + encodeVarint(signingKeyNonce)).digest()[:32]
potentialPrivEncryptionKey = hashlib.sha512(self.deterministicPassphrase + encodeVarint(encryptionKeyNonce)).digest()[:32]
potentialPubSigningKey = self.pointMult(potentialPrivSigningKey)
potentialPubEncryptionKey = self.pointMult(potentialPrivEncryptionKey)
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
signingKeyNonce += 2
encryptionKeyNonce += 2
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(potentialPubSigningKey+potentialPubEncryptionKey)
ripe.update(sha.digest())
#print 'potential ripe.digest', ripe.digest().encode('hex')
if self.eighteenByteRipe:
if ripe.digest()[:2] == '\x00\x00':
break
else:
if ripe.digest()[:1] == '\x00':
break
print 'ripe.digest', ripe.digest().encode('hex')
print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix/(time.time()-startTime),'keys per second.'
if ripe.digest()[:2] == '\x00\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[2:])
elif ripe.digest()[:1] == '\x00':
address = encodeAddress(2,self.streamNumber,ripe.digest()[1:])
#self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = '\x80'+potentialPrivSigningKey
checksum = hashlib.sha256(hashlib.sha256(privSigningKey).digest()).digest()[0:4]
privSigningKeyWIF = arithmetic.changebase(privSigningKey + checksum,256,58)
privEncryptionKey = '\x80'+potentialPrivEncryptionKey
checksum = hashlib.sha256(hashlib.sha256(privEncryptionKey).digest()).digest()[0:4]
privEncryptionKeyWIF = arithmetic.changebase(privEncryptionKey + checksum,256,58)
try:
config.add_section(address)
print 'self.label', self.label
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'privSigningKey',privSigningKeyWIF)
config.set(address,'privEncryptionKey',privEncryptionKeyWIF)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
except:
print address,'already exists. Not adding it again.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address')
reloadMyAddressHashes()
#This code which deals with old RSA addresses will soon be removed.
elif self.addressVersionNumber == 1:
statusbar = 'Generating new ' + str(config.getint('bitmessagesettings', 'bitstrength')) + ' bit RSA key. This takes a minute on average. If you want to generate multiple addresses now, you can; they will queue.'
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),statusbar)
(pubkey, privkey) = rsa.newkeys(config.getint('bitmessagesettings', 'bitstrength'))
print privkey['n']
print privkey['e']
print privkey['d']
print privkey['p']
print privkey['q']
sha = hashlib.new('sha512')
#sha.update(str(pubkey.n)+str(pubkey.e))
sha.update(convertIntToString(pubkey.n)+convertIntToString(pubkey.e))
ripe = hashlib.new('ripemd160')
ripe.update(sha.digest())
address = encodeAddress(1,self.streamNumber,ripe.digest())
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Finished generating address. Writing to keys.dat')
config.add_section(address)
config.set(address,'label',self.label)
config.set(address,'enabled','true')
config.set(address,'decoy','false')
config.set(address,'n',str(privkey['n']))
config.set(address,'e',str(privkey['e']))
config.set(address,'d',str(privkey['d']))
config.set(address,'p',str(privkey['p']))
config.set(address,'q',str(privkey['q']))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.emit(SIGNAL("updateStatusBar(PyQt_PyObject)"),'Done generating address')
self.emit(SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"),self.label,address,str(self.streamNumber))
reloadMyAddressHashes()
#Does an EC point multiplication; turns a private key into a public key.
def pointMult(self,secret):
#ctx = OpenSSL.BN_CTX_new() #This value proved to cause Seg Faults on Linux. It turns out that it really didn't speed up EC_POINT_mul anyway.
k = OpenSSL.EC_KEY_new_by_curve_name(OpenSSL.get_curve('secp256k1'))
priv_key = OpenSSL.BN_bin2bn(secret, 32, 0)
group = OpenSSL.EC_KEY_get0_group(k)
pub_key = OpenSSL.EC_POINT_new(group)
OpenSSL.EC_POINT_mul(group, pub_key, priv_key, None, None, None)
OpenSSL.EC_KEY_set_private_key(k, priv_key)
OpenSSL.EC_KEY_set_public_key(k, pub_key)
#print 'priv_key',priv_key
#print 'pub_key',pub_key
size = OpenSSL.i2o_ECPublicKey(k, 0)
mb = ctypes.create_string_buffer(size)
OpenSSL.i2o_ECPublicKey(k, ctypes.byref(ctypes.pointer(mb)))
#print 'mb.raw', mb.raw.encode('hex'), 'length:', len(mb.raw)
#print 'mb.raw', mb.raw, 'length:', len(mb.raw)
OpenSSL.EC_POINT_free(pub_key)
#OpenSSL.BN_CTX_free(ctx)
OpenSSL.BN_free(priv_key)
OpenSSL.EC_KEY_free(k)
return mb.raw
class iconGlossaryDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_iconGlossaryDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelPortNumber.setText('You are using TCP port ' + str(config.getint('bitmessagesettings', 'port')) + '. (This can be changed in the settings).')
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class helpDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_helpDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelHelpURI.setOpenExternalLinks(True)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class aboutDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_aboutDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.labelVersion.setText('version ' + softwareVersion)
class regenerateAddressesDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_regenerateAddressesDialog()
self.ui.setupUi(self)
self.parent = parent
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class settingsDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_settingsDialog()
self.ui.setupUi(self)
self.parent = parent
self.ui.checkBoxStartOnLogon.setChecked(config.getboolean('bitmessagesettings', 'startonlogon'))
self.ui.checkBoxMinimizeToTray.setChecked(config.getboolean('bitmessagesettings', 'minimizetotray'))
self.ui.checkBoxShowTrayNotifications.setChecked(config.getboolean('bitmessagesettings', 'showtraynotifications'))
self.ui.checkBoxStartInTray.setChecked(config.getboolean('bitmessagesettings', 'startintray'))
if 'darwin' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxShowTrayNotifications.setDisabled(True)
self.ui.checkBoxStartInTray.setDisabled(True)
self.ui.labelSettingsNote.setText('Options have been disabled because they either arn\'t applicable or because they haven\'t yet been implimented for your operating system.')
elif 'linux' in sys.platform:
self.ui.checkBoxStartOnLogon.setDisabled(True)
self.ui.checkBoxMinimizeToTray.setDisabled(True)
self.ui.checkBoxStartInTray.setDisabled(True)
self.ui.labelSettingsNote.setText('Options have been disabled because they either arn\'t applicable or because they haven\'t yet been implimented for your operating system.')
#On the Network settings tab:
self.ui.lineEditTCPPort.setText(str(config.get('bitmessagesettings', 'port')))
self.ui.checkBoxAuthentication.setChecked(config.getboolean('bitmessagesettings', 'socksauthentication'))
if str(config.get('bitmessagesettings', 'socksproxytype')) == 'none':
self.ui.comboBoxProxyType.setCurrentIndex(0)
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
elif str(config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS4a':
self.ui.comboBoxProxyType.setCurrentIndex(1)
self.ui.lineEditTCPPort.setEnabled(False)
elif str(config.get('bitmessagesettings', 'socksproxytype')) == 'SOCKS5':
self.ui.comboBoxProxyType.setCurrentIndex(2)
self.ui.lineEditTCPPort.setEnabled(False)
self.ui.lineEditSocksHostname.setText(str(config.get('bitmessagesettings', 'sockshostname')))
self.ui.lineEditSocksPort.setText(str(config.get('bitmessagesettings', 'socksport')))
self.ui.lineEditSocksUsername.setText(str(config.get('bitmessagesettings', 'socksusername')))
self.ui.lineEditSocksPassword.setText(str(config.get('bitmessagesettings', 'sockspassword')))
QtCore.QObject.connect(self.ui.comboBoxProxyType, QtCore.SIGNAL("currentIndexChanged(int)"), self.comboBoxProxyTypeChanged)
def comboBoxProxyTypeChanged(self,comboBoxIndex):
if comboBoxIndex == 0:
self.ui.lineEditSocksHostname.setEnabled(False)
self.ui.lineEditSocksPort.setEnabled(False)
self.ui.lineEditSocksUsername.setEnabled(False)
self.ui.lineEditSocksPassword.setEnabled(False)
self.ui.checkBoxAuthentication.setEnabled(False)
self.ui.lineEditTCPPort.setEnabled(True)
elif comboBoxIndex == 1 or comboBoxIndex == 2:
self.ui.lineEditSocksHostname.setEnabled(True)
self.ui.lineEditSocksPort.setEnabled(True)
self.ui.checkBoxAuthentication.setEnabled(True)
if self.ui.checkBoxAuthentication.isChecked():
self.ui.lineEditSocksUsername.setEnabled(True)
self.ui.lineEditSocksPassword.setEnabled(True)
self.ui.lineEditTCPPort.setEnabled(False)
class NewSubscriptionDialog(QtGui.QDialog):
def __init__(self,parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewSubscriptionDialog() #Jonathan changed this line
self.ui.setupUi(self) #Jonathan left this line alone
self.parent = parent
QtCore.QObject.connect(self.ui.lineEditSubscriptionAddress, QtCore.SIGNAL("textChanged(QString)"), self.subscriptionAddressChanged)
def subscriptionAddressChanged(self,QString):
status,a,b,c = decodeAddress(str(QString))
if status == 'missingbm':
self.ui.labelSubscriptionAddressCheck.setText('The address should start with ''BM-''')
elif status == 'checksumfailed':
self.ui.labelSubscriptionAddressCheck.setText('The address is not typed or copied correctly (the checksum failed).')
elif status == 'versiontoohigh':
self.ui.labelSubscriptionAddressCheck.setText('The version number of this address is higher than this software can support. Please upgrade Bitmessage.')
elif status == 'invalidcharacters':
self.ui.labelSubscriptionAddressCheck.setText('The address contains invalid characters.')
elif status == 'success':
self.ui.labelSubscriptionAddressCheck.setText('Address is valid.')
class NewAddressDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_NewAddressDialog()
self.ui.setupUi(self)
self.parent = parent
row = 1
#Let's fill out the 'existing address' combo box with addresses from the 'Your Identities' tab.
while self.parent.ui.tableWidgetYourIdentities.item(row-1,1):
self.ui.radioButtonExisting.click()
#print self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text()
self.ui.comboBoxExisting.addItem(self.parent.ui.tableWidgetYourIdentities.item(row-1,1).text())
row += 1
self.ui.groupBoxDeterministic.setHidden(True)
QtGui.QWidget.resize(self,QtGui.QWidget.sizeHint(self))
class MyForm(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#Ask the user if we may delete their old version 1 addresses if they have any.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 1:
displayMsg = "One of your addresses, "+addressInKeysFile+", is an old version 1 address. Version 1 addresses are no longer supported. May we delete it now?"
reply = QtGui.QMessageBox.question(self, 'Message',displayMsg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
config.remove_section(addressInKeysFile)
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#Configure Bitmessage to start on startup (or remove the configuration) based on the setting in the keys.dat file
if 'win32' in sys.platform or 'win64' in sys.platform:
#Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
self.settings.remove("PyBitmessage") #In case the user moves the program and the registry entry is no longer valid, this will delete the old registry entry.
if config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage",sys.argv[0])
elif 'darwin' in sys.platform:
#startup for mac
pass
elif 'linux' in sys.platform:
#startup for linux
pass
self.trayIcon = QtGui.QSystemTrayIcon(self)
self.trayIcon.setIcon( QtGui.QIcon(':/newPrefix/images/can-icon-16px.png') )
traySignal = "activated(QSystemTrayIcon::ActivationReason)"
QtCore.QObject.connect(self.trayIcon, QtCore.SIGNAL(traySignal), self.__icon_activated)
menu = QtGui.QMenu()
self.exitAction = menu.addAction("Exit", self.close)
self.trayIcon.setContextMenu(menu)
#I'm currently under the impression that Mac users have different expectations for the tray icon. They don't necessairly expect it to open the main window when clicked and they still expect a program showing a tray icon to also be in the dock.
if 'darwin' in sys.platform:
self.trayIcon.show()
#FILE MENU and other buttons
QtCore.QObject.connect(self.ui.actionExit, QtCore.SIGNAL("triggered()"), self.close)
QtCore.QObject.connect(self.ui.actionManageKeys, QtCore.SIGNAL("triggered()"), self.click_actionManageKeys)
QtCore.QObject.connect(self.ui.actionRegenerateDeterministicAddresses, QtCore.SIGNAL("triggered()"), self.click_actionRegenerateDeterministicAddresses)
QtCore.QObject.connect(self.ui.actionManageKeys, QtCore.SIGNAL("triggered()"), self.click_actionManageKeys)
QtCore.QObject.connect(self.ui.pushButtonNewAddress, QtCore.SIGNAL("clicked()"), self.click_NewAddressDialog)
QtCore.QObject.connect(self.ui.comboBoxSendFrom, QtCore.SIGNAL("activated(int)"),self.redrawLabelFrom)
QtCore.QObject.connect(self.ui.pushButtonAddAddressBook, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddAddressBook)
QtCore.QObject.connect(self.ui.pushButtonAddSubscription, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddSubscription)
QtCore.QObject.connect(self.ui.pushButtonAddBlacklist, QtCore.SIGNAL("clicked()"), self.click_pushButtonAddBlacklist)
QtCore.QObject.connect(self.ui.pushButtonSend, QtCore.SIGNAL("clicked()"), self.click_pushButtonSend)
QtCore.QObject.connect(self.ui.pushButtonLoadFromAddressBook, QtCore.SIGNAL("clicked()"), self.click_pushButtonLoadFromAddressBook)
QtCore.QObject.connect(self.ui.radioButtonBlacklist, QtCore.SIGNAL("clicked()"), self.click_radioButtonBlacklist)
QtCore.QObject.connect(self.ui.radioButtonWhitelist, QtCore.SIGNAL("clicked()"), self.click_radioButtonWhitelist)
QtCore.QObject.connect(self.ui.pushButtonStatusIcon, QtCore.SIGNAL("clicked()"), self.click_pushButtonStatusIcon)
QtCore.QObject.connect(self.ui.actionSettings, QtCore.SIGNAL("triggered()"), self.click_actionSettings)
QtCore.QObject.connect(self.ui.actionAbout, QtCore.SIGNAL("triggered()"), self.click_actionAbout)
QtCore.QObject.connect(self.ui.actionHelp, QtCore.SIGNAL("triggered()"), self.click_actionHelp)
#Popup menu for the Inbox tab
self.ui.inboxContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionReply = self.ui.inboxContextMenuToolbar.addAction("Reply", self.on_action_InboxReply)
self.actionAddSenderToAddressBook = self.ui.inboxContextMenuToolbar.addAction("Add sender to your Address Book", self.on_action_InboxAddSenderToAddressBook)
self.actionTrashInboxMessage = self.ui.inboxContextMenuToolbar.addAction("Move to Trash", self.on_action_InboxTrash)
self.ui.tableWidgetInbox.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuInbox)
self.popMenuInbox = QtGui.QMenu( self )
self.popMenuInbox.addAction( self.actionReply )
self.popMenuInbox.addAction( self.actionAddSenderToAddressBook )
self.popMenuInbox.addSeparator()
self.popMenuInbox.addAction( self.actionTrashInboxMessage )
#Popup menu for the Your Identities tab
self.ui.addressContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionNew = self.ui.addressContextMenuToolbar.addAction("New", self.on_action_YourIdentitiesNew)
self.actionEnable = self.ui.addressContextMenuToolbar.addAction("Enable", self.on_action_YourIdentitiesEnable)
self.actionDisable = self.ui.addressContextMenuToolbar.addAction("Disable", self.on_action_YourIdentitiesDisable)
self.actionClipboard = self.ui.addressContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_YourIdentitiesClipboard)
self.ui.tableWidgetYourIdentities.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuYourIdentities)
self.popMenu = QtGui.QMenu( self )
self.popMenu.addAction( self.actionNew )
self.popMenu.addSeparator()
self.popMenu.addAction( self.actionClipboard )
self.popMenu.addSeparator()
self.popMenu.addAction( self.actionEnable )
self.popMenu.addAction( self.actionDisable )
#Popup menu for the Address Book page
self.ui.addressBookContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionAddressBookNew = self.ui.addressBookContextMenuToolbar.addAction("New", self.on_action_AddressBookNew)
self.actionAddressBookDelete = self.ui.addressBookContextMenuToolbar.addAction("Delete", self.on_action_AddressBookDelete)
self.actionAddressBookClipboard = self.ui.addressBookContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_AddressBookClipboard)
self.actionAddressBookSend = self.ui.addressBookContextMenuToolbar.addAction("Send message to this address", self.on_action_AddressBookSend)
self.ui.tableWidgetAddressBook.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuAddressBook)
self.popMenuAddressBook = QtGui.QMenu( self )
self.popMenuAddressBook.addAction( self.actionAddressBookNew )
self.popMenuAddressBook.addAction( self.actionAddressBookDelete )
self.popMenuAddressBook.addSeparator()
self.popMenuAddressBook.addAction( self.actionAddressBookSend )
self.popMenuAddressBook.addAction( self.actionAddressBookClipboard )
#Popup menu for the Subscriptions page
self.ui.subscriptionsContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionsubscriptionsNew = self.ui.subscriptionsContextMenuToolbar.addAction("New", self.on_action_SubscriptionsNew)
self.actionsubscriptionsDelete = self.ui.subscriptionsContextMenuToolbar.addAction("Delete", self.on_action_SubscriptionsDelete)
self.actionsubscriptionsClipboard = self.ui.subscriptionsContextMenuToolbar.addAction("Copy address to clipboard", self.on_action_SubscriptionsClipboard)
self.ui.tableWidgetSubscriptions.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuSubscriptions)
self.popMenuSubscriptions = QtGui.QMenu( self )
self.popMenuSubscriptions.addAction( self.actionsubscriptionsNew )
self.popMenuSubscriptions.addAction( self.actionsubscriptionsDelete )
self.popMenuSubscriptions.addSeparator()
self.popMenuSubscriptions.addAction( self.actionsubscriptionsClipboard )
#Popup menu for the Sent page
self.ui.sentContextMenuToolbar = QtGui.QToolBar()
# Actions
self.actionTrashSentMessage = self.ui.sentContextMenuToolbar.addAction("Move to Trash", self.on_action_SentTrash)
self.ui.tableWidgetSent.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.connect(self.ui.tableWidgetSent, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menuSent)
self.popMenuSent = QtGui.QMenu( self )
self.popMenuSent.addAction( self.actionTrashSentMessage )
#Initialize the user's list of addresses on the 'Your Identities' tab.
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
newItem = QtGui.QTableWidgetItem(unicode(config.get(addressInKeysFile, 'label'),'utf-8)'))
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.insertRow(0)
self.ui.tableWidgetYourIdentities.setItem(0, 0, newItem)
newItem = QtGui.QTableWidgetItem(addressInKeysFile)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(str(addressStream(addressInKeysFile)))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
if not isEnabled:
newItem.setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
if isEnabled:
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
self.sqlLookup = sqlThread()
self.sqlLookup.start()
reloadMyAddressHashes()
self.reloadBroadcastSendersForWhichImWatching()
#Load inbox from messages database file
sqlSubmitQueue.put('''SELECT msgid, toaddress, fromaddress, subject, received, message FROM inbox where folder='inbox' ORDER BY received''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
msgid, toAddress, fromAddress, subject, received, message, = row
try:
if toAddress == '[Broadcast subscribers]':
toLabel = '[Broadcast subscribers]'
else:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
fromLabel = ''
t = (fromAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetInbox.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetInbox.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetInbox.setItem(0,2,newItem)
newItem = myTableWidgetItem(strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(received))))
newItem.setData(Qt.UserRole,QByteArray(msgid))
newItem.setData(33,int(received))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetInbox.setItem(0,3,newItem)
#self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(0,2).data(Qt.UserRole).toPyObject())
#Load Sent items from database
sqlSubmitQueue.put('''SELECT toaddress, fromaddress, subject, message, status, ackdata, lastactiontime FROM sent where folder = 'sent' ORDER BY lastactiontime''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
toAddress, fromAddress, subject, message, status, ackdata, lastactiontime = row
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = ''
t = (toAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,2,newItem)
if status == 'findingpubkey':
newItem = myTableWidgetItem('Waiting on their public key. Will request it again soon.')
elif status == 'sentmessage':
newItem = myTableWidgetItem('Message sent. Waiting on acknowledgement. Sent at ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(lastactiontime)))
elif status == 'doingpow':
newItem = myTableWidgetItem('Need to do work to send message. Work is queued.')
elif status == 'ackreceived':
newItem = myTableWidgetItem('Acknowledgement of the message received ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
elif status == 'broadcastpending':
newItem = myTableWidgetItem('Doing the work necessary to send broadcast...')
elif status == 'broadcastsent':
newItem = myTableWidgetItem('Broadcast on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
else:
newItem = myTableWidgetItem('Unknown status. ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(lastactiontime))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(lastactiontime))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSent.setItem(0,3,newItem)
#Initialize the address book
sqlSubmitQueue.put('SELECT * FROM addressbook')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
#Initialize the Subscriptions
sqlSubmitQueue.put('SELECT label, address FROM subscriptions')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
#Initialize the Blacklist or Whitelist
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
self.loadBlackWhiteList()
else:
self.ui.tabWidget.setTabText(6,'Whitelist')
self.ui.radioButtonWhitelist.click()
self.loadBlackWhiteList()
#Initialize the ackdataForWhichImWatching data structure using data from the sql database.
sqlSubmitQueue.put('''SELECT ackdata FROM sent where (status='sentmessage' OR status='doingpow')''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
ackdata, = row
print 'Watching for ackdata', ackdata.encode('hex')
ackdataForWhichImWatching[ackdata] = 0
QtCore.QObject.connect(self.ui.tableWidgetYourIdentities, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetYourIdentitiesItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetAddressBook, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetAddressBookItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetSubscriptions, QtCore.SIGNAL("itemChanged(QTableWidgetItem *)"), self.tableWidgetSubscriptionsItemChanged)
QtCore.QObject.connect(self.ui.tableWidgetInbox, QtCore.SIGNAL("itemSelectionChanged ()"), self.tableWidgetInboxItemClicked)
QtCore.QObject.connect(self.ui.tableWidgetSent, QtCore.SIGNAL("itemSelectionChanged ()"), self.tableWidgetSentItemClicked)
#Put the colored icon on the status bar
#self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
self.statusbar = self.statusBar()
self.statusbar.insertPermanentWidget(0,self.ui.pushButtonStatusIcon)
self.ui.labelStartupTime.setText('Since startup on ' + strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
self.numberOfMessagesProcessed = 0
self.numberOfBroadcastsProcessed = 0
self.numberOfPubkeysProcessed = 0
#Below this point, it would be good if all of the necessary global data structures were initialized.
self.rerenderComboBoxSendFrom()
self.listOfOutgoingSynSenderThreads = [] #if we don't maintain this list, the threads will get garbage-collected.
self.connectToStream(1)
self.singleListenerThread = singleListener()
self.singleListenerThread.start()
QtCore.QObject.connect(self.singleListenerThread, QtCore.SIGNAL("passObjectThrough(PyQt_PyObject)"), self.connectObjectToSignals)
self.singleCleanerThread = singleCleaner()
self.singleCleanerThread.start()
QtCore.QObject.connect(self.singleCleanerThread, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.singleCleanerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.workerThread = singleWorker()
self.workerThread.start()
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(self.workerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
def click_actionManageKeys(self):
if 'darwin' in sys.platform or 'linux' in sys.platform:
reply = QtGui.QMessageBox.information(self, 'keys.dat?','You may manage your keys by editing the keys.dat file stored in\n' + appdata + '\nIt is important that you back up this file.', QMessageBox.Ok)
elif sys.platform == 'win32' or sys.platform == 'win64':
reply = QtGui.QMessageBox.question(self, 'Open keys.dat?','You may manage your keys by editing the keys.dat file stored in\n' + appdata + '\nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.openKeysFile()
else:
pass
def click_actionRegenerateDeterministicAddresses(self):
self.regenerateAddressesDialogInstance = regenerateAddressesDialog(self)
if self.regenerateAddressesDialogInstance.exec_():
if self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, "bad passphrase", "You must type your passphrase. If you don\'t have one then this is not the form for you.")
else:
streamNumberForAddress = int(self.regenerateAddressesDialogInstance.ui.lineEditStreamNumber.text())
addressVersionNumber = int(self.regenerateAddressesDialogInstance.ui.lineEditAddressVersionNumber.text())
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(addressVersionNumber,streamNumberForAddress,"unused address",self.regenerateAddressesDialogInstance.ui.spinBoxNumberOfAddressesToMake.value(),self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text().toUtf8(),self.regenerateAddressesDialogInstance.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
self.ui.tabWidget.setCurrentIndex(3)
def openKeysFile(self):
if 'linux' in sys.platform:
subprocess.call(["xdg-open", file])
else:
os.startfile(appdata + '\\keys.dat')
def changeEvent(self, event):
if config.getboolean('bitmessagesettings', 'minimizetotray') and not 'darwin' in sys.platform:
if event.type() == QtCore.QEvent.WindowStateChange:
if self.windowState() & QtCore.Qt.WindowMinimized:
self.hide()
self.trayIcon.show()
#self.hidden = True
if 'win32' in sys.platform or 'win64' in sys.platform:
self.setWindowFlags(Qt.ToolTip)
elif event.oldState() & QtCore.Qt.WindowMinimized:
#The window state has just been changed to Normal/Maximised/FullScreen
pass
#QtGui.QWidget.changeEvent(self, event)
def __icon_activated(self, reason):
if reason == QtGui.QSystemTrayIcon.Trigger:
if 'linux' in sys.platform:
self.trayIcon.hide()
self.setWindowFlags(Qt.Window)
self.show()
elif 'win32' in sys.platform or 'win64' in sys.platform:
self.trayIcon.hide()
self.setWindowFlags(Qt.Window)
self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
elif 'darwin' in sys.platform:
#self.trayIcon.hide() #this line causes a segmentation fault
#self.setWindowFlags(Qt.Window)
#self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
def incrementNumberOfMessagesProcessed(self):
self.numberOfMessagesProcessed += 1
self.ui.labelMessageCount.setText('Processed ' + str(self.numberOfMessagesProcessed) + ' person-to-person messages.')
def incrementNumberOfBroadcastsProcessed(self):
self.numberOfBroadcastsProcessed += 1
self.ui.labelBroadcastCount.setText('Processed ' + str(self.numberOfBroadcastsProcessed) + ' broadcast messages.')
def incrementNumberOfPubkeysProcessed(self):
self.numberOfPubkeysProcessed += 1
self.ui.labelPubkeyCount.setText('Processed ' + str(self.numberOfPubkeysProcessed) + ' public keys.')
def updateNetworkStatusTab(self,streamNumber,connectionCount):
global statusIconColor
#print 'updating network status tab'
totalNumberOfConnectionsFromAllStreams = 0 #One would think we could use len(sendDataQueues) for this, but sendData threads don't remove themselves from sendDataQueues fast enough for len(sendDataQueues) to be accurate here.
for currentRow in range(self.ui.tableWidgetConnectionCount.rowCount()):
rowStreamNumber = int(self.ui.tableWidgetConnectionCount.item(currentRow,0).text())
if streamNumber == rowStreamNumber:
self.ui.tableWidgetConnectionCount.item(currentRow,1).setText(str(connectionCount))
totalNumberOfConnectionsFromAllStreams += connectionCount
self.ui.labelTotalConnections.setText('Total Connections: ' + str(totalNumberOfConnectionsFromAllStreams))
if totalNumberOfConnectionsFromAllStreams > 0 and statusIconColor == 'red': #FYI: The 'singlelistener' thread sets the icon color to green when it receives an incoming connection, meaning that the user's firewall is configured correctly.
self.setStatusIcon('yellow')
elif totalNumberOfConnectionsFromAllStreams == 0:
self.setStatusIcon('red')
def setStatusIcon(self,color):
global statusIconColor
#print 'setting status icon color'
if color == 'red':
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/redicon.png"))
statusIconColor = 'red'
if color == 'yellow':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/yellowicon.png"))
statusIconColor = 'yellow'
if color == 'green':
if self.statusBar().currentMessage() == 'Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.':
self.statusBar().showMessage('')
self.ui.pushButtonStatusIcon.setIcon(QIcon(":/newPrefix/images/greenicon.png"))
statusIconColor = 'green'
def updateSentItemStatusByHash(self,toRipe,textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if ripe == toRipe:
self.ui.tableWidgetSent.item(i,3).setText(unicode(textToDisplay,'utf-8'))
def updateSentItemStatusByAckdata(self,ackdata,textToDisplay):
for i in range(self.ui.tableWidgetSent.rowCount()):
toAddress = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
tableAckdata = self.ui.tableWidgetSent.item(i,3).data(Qt.UserRole).toPyObject()
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if ackdata == tableAckdata:
self.ui.tableWidgetSent.item(i,3).setText(unicode(textToDisplay,'utf-8'))
def rerenderInboxFromLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
addressToLookup = str(self.ui.tableWidgetInbox.item(i,1).data(Qt.UserRole).toPyObject())
fromLabel = ''
t = (addressToLookup,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.item(i,1).setText(unicode(fromLabel,'utf-8'))
else:
#It might be a broadcast message. We should check for that label.
sqlLock.acquire()
sqlSubmitQueue.put('''select label from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
self.ui.tableWidgetInbox.item(i,1).setText(unicode(fromLabel,'utf-8'))
def rerenderInboxToLabels(self):
for i in range(self.ui.tableWidgetInbox.rowCount()):
toAddress = str(self.ui.tableWidgetInbox.item(i,0).data(Qt.UserRole).toPyObject())
try:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
self.ui.tableWidgetInbox.item(i,0).setText(unicode(toLabel,'utf-8'))
def rerenderSentFromLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
fromAddress = str(self.ui.tableWidgetSent.item(i,1).data(Qt.UserRole).toPyObject())
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
self.ui.tableWidgetSent.item(i,1).setText(unicode(fromLabel,'utf-8'))
def rerenderSentToLabels(self):
for i in range(self.ui.tableWidgetSent.rowCount()):
addressToLookup = str(self.ui.tableWidgetSent.item(i,0).data(Qt.UserRole).toPyObject())
toLabel = ''
t = (addressToLookup,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.item(i,0).setText(unicode(toLabel,'utf-8'))
def click_pushButtonSend(self):
self.statusBar().showMessage('')
toAddresses = str(self.ui.lineEditTo.text())
fromAddress = str(self.ui.labelFrom.text())
subject = str(self.ui.lineEditSubject.text().toUtf8())
message = str(self.ui.textEditMessage.document().toPlainText().toUtf8())
if self.ui.radioButtonSpecific.isChecked(): #To send a message to specific people (rather than broadcast)
toAddressesList = [s.strip() for s in toAddresses.replace(',', ';').split(';')]
toAddressesList = list(set(toAddressesList)) #remove duplicate addresses. If the user has one address with a BM- and the same address without the BM-, this will not catch it. They'll send the message to the person twice.
for toAddress in toAddressesList:
if toAddress <> '':
status,addressVersionNumber,streamNumber,ripe = decodeAddress(toAddress)
if status <> 'success':
printLock.acquire()
print 'Status bar:', 'Error: Could not decode', toAddress, ':', status
printLock.release()
if status == 'missingbm':
self.statusBar().showMessage('Error: Bitmessage addresses start with BM- Please check ' + toAddress)
if status == 'checksumfailed':
self.statusBar().showMessage('Error: The address ' + toAddress+' is not typed or copied correctly. Please check it.')
if status == 'invalidcharacters':
self.statusBar().showMessage('Error: The address '+ toAddress+ ' contains invalid characters. Please check it.')
if status == 'versiontoohigh':
self.statusBar().showMessage('Error: The address version in '+ toAddress+ ' is too high. Either you need to upgrade your Bitmessage software or your acquaintance is being clever.')
elif fromAddress == '':
self.statusBar().showMessage('Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab.')
else:
toAddress = addBMIfNotPresent(toAddress)
try:
config.get(toAddress, 'enabled')
#The toAddress is one owned by me. We cannot send messages to ourselves without significant changes to the codebase.
QMessageBox.about(self, "Sending to your address", "Error: One of the addresses to which you are sending a message, "+toAddress+", is yours. Unfortunately the Bitmessage client cannot process its own messages. Please try running a second client on a different computer or within a VM.")
continue
except:
pass
if addressVersionNumber > 2 or addressVersionNumber == 0:
QMessageBox.about(self, "Address version number", "Concerning the address "+toAddress+", Bitmessage cannot understand address version numbers of "+str(addressVersionNumber)+". Perhaps upgrade Bitmessage to the latest version.")
continue
if streamNumber > 1 or streamNumber == 0:
QMessageBox.about(self, "Stream number", "Concerning the address "+toAddress+", Bitmessage cannot handle stream numbers of "+str(streamNumber)+". Perhaps upgrade Bitmessage to the latest version.")
continue
self.statusBar().showMessage('')
try:
if connectionsCount[streamNumber] == 0:
self.statusBar().showMessage('Warning: You are currently not connected. Bitmessage will do the work necessary to send the message but it won\'t send until you connect.')
except:
self.statusBar().showMessage('Warning: The address uses a stream number currently not supported by this Bitmessage version. Perhaps upgrade.')
ackdata = OpenSSL.rand(32)
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'findingpubkey',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('sendmessage',toAddress))
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = ''
t = (toAddress,)
sqlLock.acquire()
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
toLabel, = row
self.ui.tableWidgetSent.insertRow(0)
if toLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(toAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
newItem = myTableWidgetItem('Just pressed ''send'' '+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.labelFrom.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0,0)
else:
self.statusBar().showMessage('Your \'To\' field is empty.')
else: #User selected 'Broadcast'
if fromAddress == '':
self.statusBar().showMessage('Error: You must specify a From address. If you don\'t have one, go to the \'Your Identities\' tab.')
else:
self.statusBar().showMessage('')
#We don't actually need the ackdata for acknowledgement since this is a broadcast message, but we can use it to update the user interface when the POW is done generating.
ackdata = OpenSSL.rand(32)
toAddress = '[Broadcast subscribers]'
ripe = ''
sqlLock.acquire()
t = ('',toAddress,ripe,fromAddress,subject,message,ackdata,int(time.time()),'broadcastpending',1,1,'sent')
sqlSubmitQueue.put('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
workerQueue.put(('sendbroadcast',(fromAddress,subject,message)))
try:
fromLabel = config.get(fromAddress, 'label')
except:
fromLabel = ''
if fromLabel == '':
fromLabel = fromAddress
toLabel = '[Broadcast subscribers]'
self.ui.tableWidgetSent.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetSent.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetSent.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetSent.setItem(0,2,newItem)
#newItem = QtGui.QTableWidgetItem('Doing work necessary to send broadcast...'+strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem = myTableWidgetItem('Doing work necessary to send broadcast...')
newItem.setData(Qt.UserRole,QByteArray(ackdata))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetSent.setItem(0,3,newItem)
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.labelFrom.setText('')
self.ui.tabWidget.setCurrentIndex(2)
self.ui.tableWidgetSent.setCurrentCell(0,0)
def click_pushButtonLoadFromAddressBook(self):
self.ui.tabWidget.setCurrentIndex(5)
for i in range(4):
time.sleep(0.1)
self.statusBar().showMessage('')
time.sleep(0.1)
self.statusBar().showMessage('Right click an entry in your address book and select \'Send message to this address\'.')
def redrawLabelFrom(self,index):
self.ui.labelFrom.setText(self.ui.comboBoxSendFrom.itemData(index).toPyObject())
def rerenderComboBoxSendFrom(self):
self.ui.comboBoxSendFrom.clear()
self.ui.labelFrom.setText('')
configSections = config.sections()
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled') #I realize that this is poor programming practice but I don't care. It's easier for others to read.
if isEnabled:
self.ui.comboBoxSendFrom.insertItem(0,unicode(config.get(addressInKeysFile, 'label'),'utf-8'),addressInKeysFile)
self.ui.comboBoxSendFrom.insertItem(0,'','')
if(self.ui.comboBoxSendFrom.count() == 2):
self.ui.comboBoxSendFrom.setCurrentIndex(1)
self.redrawLabelFrom(self.ui.comboBoxSendFrom.currentIndex())
else:
self.ui.comboBoxSendFrom.setCurrentIndex(0)
def connectToStream(self,streamNumber):
connectionsCount[streamNumber] = 0
#Add a line to the Connection Count table on the Network Status tab with a 'zero' connection count. This will be updated as necessary by another function.
self.ui.tableWidgetConnectionCount.insertRow(0)
newItem = QtGui.QTableWidgetItem(str(streamNumber))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem('0')
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetConnectionCount.setItem(0,1,newItem)
a = outgoingSynSender()
self.listOfOutgoingSynSenderThreads.append(a)
QtCore.QObject.connect(a, QtCore.SIGNAL("passObjectThrough(PyQt_PyObject)"), self.connectObjectToSignals)
QtCore.QObject.connect(a, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
a.setup(streamNumber)
a.start()
def connectObjectToSignals(self,object):
QtCore.QObject.connect(object, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
QtCore.QObject.connect(object, QtCore.SIGNAL("displayNewMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewMessage)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateSentItemStatusByHash(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByHash)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateSentItemStatusByAckdata(PyQt_PyObject,PyQt_PyObject)"), self.updateSentItemStatusByAckdata)
QtCore.QObject.connect(object, QtCore.SIGNAL("updateNetworkStatusTab(PyQt_PyObject,PyQt_PyObject)"), self.updateNetworkStatusTab)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfMessagesProcessed()"), self.incrementNumberOfMessagesProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfPubkeysProcessed()"), self.incrementNumberOfPubkeysProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("incrementNumberOfBroadcastsProcessed()"), self.incrementNumberOfBroadcastsProcessed)
QtCore.QObject.connect(object, QtCore.SIGNAL("setStatusIcon(PyQt_PyObject)"), self.setStatusIcon)
def displayNewMessage(self,inventoryHash,toAddress,fromAddress,subject,message):
'''print 'test signals displayNewMessage'
print 'toAddress', toAddress
print 'fromAddress', fromAddress
print 'message', message'''
fromLabel = ''
sqlLock.acquire()
t = (fromAddress,)
sqlSubmitQueue.put('''select label from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
else:
#There might be a label in the subscriptions table
sqlLock.acquire()
t = (fromAddress,)
sqlSubmitQueue.put('''select label from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn <> []:
for row in queryreturn:
fromLabel, = row
try:
if toAddress == '[Broadcast subscribers]':
toLabel = '[Broadcast subscribers]'
else:
toLabel = config.get(toAddress, 'label')
except:
toLabel = ''
if toLabel == '':
toLabel = toAddress
#msgid, toaddress, fromaddress, subject, received, message = row
newItem = QtGui.QTableWidgetItem(unicode(toLabel,'utf-8'))
newItem.setData(Qt.UserRole,str(toAddress))
self.ui.tableWidgetInbox.insertRow(0)
self.ui.tableWidgetInbox.setItem(0,0,newItem)
if fromLabel == '':
newItem = QtGui.QTableWidgetItem(unicode(fromAddress,'utf-8'))
if config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.trayIcon.showMessage('New Message', 'New message from '+ fromAddress, 1, 2000)
else:
newItem = QtGui.QTableWidgetItem(unicode(fromLabel,'utf-8'))
if config.getboolean('bitmessagesettings', 'showtraynotifications'):
self.trayIcon.showMessage('New Message', 'New message from '+fromLabel, 1, 2000)
newItem.setData(Qt.UserRole,str(fromAddress))
self.ui.tableWidgetInbox.setItem(0,1,newItem)
newItem = QtGui.QTableWidgetItem(unicode(subject,'utf-8)'))
newItem.setData(Qt.UserRole,unicode(message,'utf-8)'))
self.ui.tableWidgetInbox.setItem(0,2,newItem)
newItem = myTableWidgetItem(strftime(config.get('bitmessagesettings', 'timeformat'),localtime(int(time.time()))))
newItem.setData(Qt.UserRole,QByteArray(inventoryHash))
newItem.setData(33,int(time.time()))
self.ui.tableWidgetInbox.setItem(0,3,newItem)
self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(0,2).data(Qt.UserRole).toPyObject())
self.ui.tableWidgetInbox.setCurrentCell(0,0)
def click_pushButtonAddAddressBook(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),)
sqlSubmitQueue.put('''select * from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
t = (str(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())))
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO addressbook VALUES (?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def click_pushButtonAddSubscription(self):
self.NewSubscriptionDialogInstance = NewSubscriptionDialog(self)
if self.NewSubscriptionDialogInstance.exec_():
if self.NewSubscriptionDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),)
sqlSubmitQueue.put('''select * from subscriptions where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetSubscriptions.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetSubscriptions.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetSubscriptions.setItem(0,1,newItem)
t = (str(self.NewSubscriptionDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewSubscriptionDialogInstance.ui.lineEditSubscriptionAddress.text())),True)
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO subscriptions VALUES (?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.rerenderInboxFromLabels()
self.reloadBroadcastSendersForWhichImWatching()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your subsciptions twice. Perhaps rename the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def loadBlackWhiteList(self):
#Initialize the Blacklist or Whitelist table
listType = config.get('bitmessagesettings', 'blackwhitelist')
if listType == 'black':
sqlSubmitQueue.put('''SELECT label, address FROM blacklist''')
else:
sqlSubmitQueue.put('''SELECT label, address FROM whitelist''')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
for row in queryreturn:
label, address = row
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(label,'utf-8'))
self.ui.tableWidgetBlacklist.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetBlacklist.setItem(0,1,newItem)
def click_pushButtonStatusIcon(self):
print 'click_pushButtonStatusIcon'
self.iconGlossaryInstance = iconGlossaryDialog(self)
if self.iconGlossaryInstance.exec_():
pass
def click_actionHelp(self):
self.helpDialogInstance = helpDialog(self)
self.helpDialogInstance.exec_()
def click_actionAbout(self):
self.aboutDialogInstance = aboutDialog(self)
self.aboutDialogInstance.exec_()
def click_actionSettings(self):
global statusIconColor
self.settingsDialogInstance = settingsDialog(self)
if self.settingsDialogInstance.exec_():
config.set('bitmessagesettings', 'startonlogon', str(self.settingsDialogInstance.ui.checkBoxStartOnLogon.isChecked()))
config.set('bitmessagesettings', 'minimizetotray', str(self.settingsDialogInstance.ui.checkBoxMinimizeToTray.isChecked()))
config.set('bitmessagesettings', 'showtraynotifications', str(self.settingsDialogInstance.ui.checkBoxShowTrayNotifications.isChecked()))
config.set('bitmessagesettings', 'startintray', str(self.settingsDialogInstance.ui.checkBoxStartInTray.isChecked()))
if int(config.get('bitmessagesettings','port')) != int(self.settingsDialogInstance.ui.lineEditTCPPort.text()):
QMessageBox.about(self, "Restart", "You must restart Bitmessage for the port number change to take effect.")
config.set('bitmessagesettings', 'port', str(self.settingsDialogInstance.ui.lineEditTCPPort.text()))
if config.get('bitmessagesettings', 'socksproxytype') == 'none' and str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText())[0:5] == 'SOCKS':
if statusIconColor != 'red':
QMessageBox.about(self, "Restart", "Bitmessage will use your proxy from now on now but you may want to manually restart Bitmessage now to close existing connections.")
if config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS' and str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText()) == 'none':
self.statusBar().showMessage('')
config.set('bitmessagesettings', 'socksproxytype', str(self.settingsDialogInstance.ui.comboBoxProxyType.currentText()))
config.set('bitmessagesettings', 'socksauthentication', str(self.settingsDialogInstance.ui.checkBoxAuthentication.isChecked()))
config.set('bitmessagesettings', 'sockshostname', str(self.settingsDialogInstance.ui.lineEditSocksHostname.text()))
config.set('bitmessagesettings', 'socksport', str(self.settingsDialogInstance.ui.lineEditSocksPort.text()))
config.set('bitmessagesettings', 'socksusername', str(self.settingsDialogInstance.ui.lineEditSocksUsername.text()))
config.set('bitmessagesettings', 'sockspassword', str(self.settingsDialogInstance.ui.lineEditSocksPassword.text()))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
if 'win32' in sys.platform or 'win64' in sys.platform:
#Auto-startup for Windows
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
self.settings = QSettings(RUN_PATH, QSettings.NativeFormat)
if config.getboolean('bitmessagesettings', 'startonlogon'):
self.settings.setValue("PyBitmessage",sys.argv[0])
else:
self.settings.remove("PyBitmessage")
elif 'darwin' in sys.platform:
#startup for mac
pass
elif 'linux' in sys.platform:
#startup for linux
pass
def click_radioButtonBlacklist(self):
if config.get('bitmessagesettings', 'blackwhitelist') == 'white':
config.set('bitmessagesettings','blackwhitelist','black')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6,'Blacklist')
def click_radioButtonWhitelist(self):
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
config.set('bitmessagesettings','blackwhitelist','white')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
#self.ui.tableWidgetBlacklist.clearContents()
self.ui.tableWidgetBlacklist.setRowCount(0)
self.loadBlackWhiteList()
self.ui.tabWidget.setTabText(6,'Whitelist')
def click_pushButtonAddBlacklist(self):
self.NewBlacklistDialogInstance = NewSubscriptionDialog(self)
if self.NewBlacklistDialogInstance.exec_():
if self.NewBlacklistDialogInstance.ui.labelSubscriptionAddressCheck.text() == 'Address is valid.':
#First we must check to see if the address is already in the address book. The user cannot add it again or else it will cause problems when updating and deleting the entry.
sqlLock.acquire()
t = (addBMIfNotPresent(str(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text())),)
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''select * from blacklist where address=?''')
else:
sqlSubmitQueue.put('''select * from whitelist where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetBlacklist.insertRow(0)
newItem = QtGui.QTableWidgetItem(unicode(self.NewBlacklistDialogInstance.ui.newsubscriptionlabel.text().toUtf8(),'utf-8'))
self.ui.tableWidgetBlacklist.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addBMIfNotPresent(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text()))
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetBlacklist.setItem(0,1,newItem)
t = (str(self.NewBlacklistDialogInstance.ui.newsubscriptionlabel.text().toUtf8()),addBMIfNotPresent(str(self.NewBlacklistDialogInstance.ui.lineEditSubscriptionAddress.text())),True)
sqlLock.acquire()
if config.get('bitmessagesettings', 'blackwhitelist') == 'black':
sqlSubmitQueue.put('''INSERT INTO blacklist VALUES (?,?,?)''')
else:
sqlSubmitQueue.put('''INSERT INTO whitelist VALUES (?,?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
else:
self.statusBar().showMessage('Error: You cannot add the same address to your list twice. Perhaps rename the existing one if you want.')
else:
self.statusBar().showMessage('The address you entered was invalid. Ignoring it.')
def click_NewAddressDialog(self):
self.dialog = NewAddressDialog(self)
# For Modal dialogs
if self.dialog.exec_():
#self.dialog.ui.buttonBox.enabled = False
if self.dialog.ui.radioButtonRandomAddress.isChecked():
if self.dialog.ui.radioButtonMostAvailable.isChecked():
streamNumberForAddress = 1
else:
#User selected 'Use the same stream as an existing address.'
streamNumberForAddress = addressStream(self.dialog.ui.comboBoxExisting.currentText())
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,str(self.dialog.ui.newaddresslabel.text().toUtf8()),1,"",self.dialog.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
else:
if self.dialog.ui.lineEditPassphrase.text() != self.dialog.ui.lineEditPassphraseAgain.text():
QMessageBox.about(self, "Passphrase mismatch", "The passphrase you entered twice doesn\'t match. Try again.")
elif self.dialog.ui.lineEditPassphrase.text() == "":
QMessageBox.about(self, "Choose a passphrase", "You really do need a passphrase.")
else:
streamNumberForAddress = 1 #this will eventually have to be replaced by logic to determine the most available stream number.
self.addressGenerator = addressGenerator()
self.addressGenerator.setup(2,streamNumberForAddress,"unused address",self.dialog.ui.spinBoxNumberOfAddressesToMake.value(),self.dialog.ui.lineEditPassphrase.text().toUtf8(),self.dialog.ui.checkBoxEighteenByteRipe.isChecked())
QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
self.addressGenerator.start()
else:
print 'new address dialog box rejected'
def closeEvent(self, event):
'''quit_msg = "Are you sure you want to exit Bitmessage?"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()'''
broadcastToSendDataQueues((0, 'shutdown', 'all'))
printLock.acquire()
print 'Closing. Flushing inventory in memory out to disk...'
printLock.release()
self.statusBar().showMessage('Flushing inventory in memory out to disk.')
flushInventory()
#This one last useless query will guarantee that the previous query committed before we close the program.
sqlLock.acquire()
sqlSubmitQueue.put('SELECT address FROM subscriptions')
sqlSubmitQueue.put('')
sqlReturnQueue.get()
sqlLock.release()
self.statusBar().showMessage('Saving the knownNodes list of peers to disk...')
output = open(appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownNodes, output)
output.close()
self.trayIcon.hide()
printLock.acquire()
print 'Done.'
printLock.release()
self.statusBar().showMessage('All done. Closing user interface...')
event.accept()
raise SystemExit
def on_action_InboxReply(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
toAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,0).data(Qt.UserRole).toPyObject())
fromAddressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,1).data(Qt.UserRole).toPyObject())
if toAddressAtCurrentInboxRow == '[Broadcast subscribers]':
self.ui.labelFrom.setText('')
else:
if not config.get(toAddressAtCurrentInboxRow,'enabled'):
self.statusBar().showMessage('Error: The address from which you are trying to send is disabled. Enable it from the \'Your Identities\' tab first.')
return
self.ui.labelFrom.setText(toAddressAtCurrentInboxRow)
self.ui.lineEditTo.setText(str(fromAddressAtCurrentInboxRow))
self.ui.comboBoxSendFrom.setCurrentIndex(0)
#self.ui.comboBoxSendFrom.setEditText(str(self.ui.tableWidgetInbox.item(currentInboxRow,0).text))
self.ui.textEditMessage.setText('\n\n------------------------------------------------------\n'+self.ui.tableWidgetInbox.item(currentInboxRow,2).data(Qt.UserRole).toPyObject())
if self.ui.tableWidgetInbox.item(currentInboxRow,2).text()[0:3] == 'Re:':
self.ui.lineEditSubject.setText(str(self.ui.tableWidgetInbox.item(currentInboxRow,2).text()))
else:
self.ui.lineEditSubject.setText('Re: '+self.ui.tableWidgetInbox.item(currentInboxRow,2).text())
self.ui.radioButtonSpecific.setChecked(True)
self.ui.tabWidget.setCurrentIndex(1)
def on_action_InboxAddSenderToAddressBook(self):
currentInboxRow = self.ui.tableWidgetInbox.currentRow()
#self.ui.tableWidgetInbox.item(currentRow,1).data(Qt.UserRole).toPyObject()
addressAtCurrentInboxRow = str(self.ui.tableWidgetInbox.item(currentInboxRow,1).data(Qt.UserRole).toPyObject())
#Let's make sure that it isn't already in the address book
sqlLock.acquire()
t = (addressAtCurrentInboxRow,)
sqlSubmitQueue.put('''select * from addressbook where address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
if queryreturn == []:
self.ui.tableWidgetAddressBook.insertRow(0)
newItem = QtGui.QTableWidgetItem('--New entry. Change label in Address Book.--')
self.ui.tableWidgetAddressBook.setItem(0,0,newItem)
newItem = QtGui.QTableWidgetItem(addressAtCurrentInboxRow)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetAddressBook.setItem(0,1,newItem)
t = ('--New entry. Change label in Address Book.--',addressAtCurrentInboxRow)
sqlLock.acquire()
sqlSubmitQueue.put('''INSERT INTO addressbook VALUES (?,?)''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.ui.tabWidget.setCurrentIndex(5)
self.ui.tableWidgetAddressBook.setCurrentCell(0,0)
self.statusBar().showMessage('Entry added to the Address Book. Edit the label to your liking.')
else:
self.statusBar().showMessage('Error: You cannot add the same address to your address book twice. Try renaming the existing one if you want.')
#Send item on the Inbox tab to trash
def on_action_InboxTrash(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
inventoryHashToTrash = str(self.ui.tableWidgetInbox.item(currentRow,3).data(Qt.UserRole).toPyObject())
t = (inventoryHashToTrash,)
sqlLock.acquire()
#sqlSubmitQueue.put('''delete from inbox where msgid=?''')
sqlSubmitQueue.put('''UPDATE inbox SET folder='trash' WHERE msgid=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetInbox.removeRow(currentRow)
self.statusBar().showMessage('Moved item to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back.')
#Send item on the Sent tab to trash
def on_action_SentTrash(self):
currentRow = self.ui.tableWidgetSent.currentRow()
ackdataToTrash = str(self.ui.tableWidgetSent.item(currentRow,3).data(Qt.UserRole).toPyObject())
t = (ackdataToTrash,)
sqlLock.acquire()
sqlSubmitQueue.put('''UPDATE sent SET folder='trash' WHERE ackdata=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetSent.removeRow(currentRow)
self.statusBar().showMessage('Moved item to trash. There is no user interface to view your trash, but it is still on disk if you are desperate to get it back.')
#Group of functions for the Address Book dialog box
def on_action_AddressBookNew(self):
self.click_pushButtonAddAddressBook()
def on_action_AddressBookDelete(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
labelAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
sqlSubmitQueue.put('''DELETE FROM addressbook WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
queryreturn = sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetAddressBook.removeRow(currentRow)
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
self.reloadBroadcastSendersForWhichImWatching()
def on_action_AddressBookClipboard(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_action_AddressBookSend(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
if self.ui.lineEditTo.text() == '':
self.ui.lineEditTo.setText(str(addressAtCurrentRow))
else:
self.ui.lineEditTo.setText(str(self.ui.lineEditTo.text()) + '; '+ str(addressAtCurrentRow))
self.statusBar().showMessage('You have added the address to the \'To\' field on the \'Send\' tab. You may add more recipients if you want. When you are done, go to the \'Send\' tab.')
def on_context_menuAddressBook(self, point):
self.popMenuAddressBook.exec_( self.ui.tableWidgetAddressBook.mapToGlobal(point) )
#Group of functions for the Subscriptions dialog box
def on_action_SubscriptionsNew(self):
self.click_pushButtonAddSubscription()
def on_action_SubscriptionsDelete(self):
print 'clicked Delete'
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
labelAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,0).text().toUtf8()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
t = (str(labelAtCurrentRow),str(addressAtCurrentRow))
sqlLock.acquire()
sqlSubmitQueue.put('''DELETE FROM subscriptions WHERE label=? AND address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
sqlLock.release()
self.ui.tableWidgetSubscriptions.removeRow(currentRow)
self.rerenderInboxFromLabels()
def on_action_SubscriptionsClipboard(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuSubscriptions(self, point):
self.popMenuSubscriptions.exec_( self.ui.tableWidgetSubscriptions.mapToGlobal(point) )
#Group of functions for the Your Identities dialog box
def on_action_YourIdentitiesNew(self):
self.click_NewAddressDialog()
def on_action_YourIdentitiesEnable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
config.set(str(addressAtCurrentRow),'enabled','true')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.ui.tableWidgetYourIdentities.item(currentRow,0).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(0,0,0))
self.ui.tableWidgetYourIdentities.item(currentRow,2).setTextColor(QtGui.QColor(0,0,0))
reloadMyAddressHashes()
def on_action_YourIdentitiesDisable(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
config.set(str(addressAtCurrentRow),'enabled','false')
self.ui.tableWidgetYourIdentities.item(currentRow,0).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.item(currentRow,1).setTextColor(QtGui.QColor(128,128,128))
self.ui.tableWidgetYourIdentities.item(currentRow,2).setTextColor(QtGui.QColor(128,128,128))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
reloadMyAddressHashes()
def on_action_YourIdentitiesClipboard(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(str(addressAtCurrentRow))
def on_context_menuYourIdentities(self, point):
self.popMenu.exec_( self.ui.tableWidgetYourIdentities.mapToGlobal(point) )
def on_context_menuInbox(self, point):
self.popMenuInbox.exec_( self.ui.tableWidgetInbox.mapToGlobal(point) )
def on_context_menuSent(self, point):
self.popMenuSent.exec_( self.ui.tableWidgetSent.mapToGlobal(point) )
def tableWidgetInboxItemClicked(self):
currentRow = self.ui.tableWidgetInbox.currentRow()
self.ui.textEditInboxMessage.setText(self.ui.tableWidgetInbox.item(currentRow,2).data(Qt.UserRole).toPyObject())
def tableWidgetSentItemClicked(self):
currentRow = self.ui.tableWidgetSent.currentRow()
self.ui.textEditSentMessage.setText(self.ui.tableWidgetSent.item(currentRow,2).data(Qt.UserRole).toPyObject())
def tableWidgetYourIdentitiesItemChanged(self):
currentRow = self.ui.tableWidgetYourIdentities.currentRow()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetYourIdentities.item(currentRow,1).text()
config.set(str(addressAtCurrentRow),'label',str(self.ui.tableWidgetYourIdentities.item(currentRow,0).text().toUtf8()))
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
self.rerenderComboBoxSendFrom()
#self.rerenderInboxFromLabels()
self.rerenderInboxToLabels()
self.rerenderSentFromLabels()
#self.rerenderSentToLabels()
def tableWidgetAddressBookItemChanged(self):
currentRow = self.ui.tableWidgetAddressBook.currentRow()
sqlLock.acquire()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetAddressBook.item(currentRow,1).text()
t = (str(self.ui.tableWidgetAddressBook.item(currentRow,0).text().toUtf8()),str(addressAtCurrentRow))
sqlSubmitQueue.put('''UPDATE addressbook set label=? WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#except Exception, err:
# print 'Program Exception in tableWidgetAddressBookItemChanged:', err
sqlLock.release()
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def tableWidgetSubscriptionsItemChanged(self):
currentRow = self.ui.tableWidgetSubscriptions.currentRow()
sqlLock.acquire()
if currentRow >= 0:
addressAtCurrentRow = self.ui.tableWidgetSubscriptions.item(currentRow,1).text()
t = (str(self.ui.tableWidgetSubscriptions.item(currentRow,0).text().toUtf8()),str(addressAtCurrentRow))
sqlSubmitQueue.put('''UPDATE subscriptions set label=? WHERE address=?''')
sqlSubmitQueue.put(t)
sqlReturnQueue.get()
#except Exception, err:
# print 'Program Exception in tableWidgetSubscriptionsItemChanged:', err
sqlLock.release()
self.rerenderInboxFromLabels()
self.rerenderSentToLabels()
def writeNewAddressToTable(self,label,address,streamNumber):
self.ui.tableWidgetYourIdentities.insertRow(0)
self.ui.tableWidgetYourIdentities.setItem(0, 0, QtGui.QTableWidgetItem(unicode(label,'utf-8')))
newItem = QtGui.QTableWidgetItem(address)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetYourIdentities.setItem(0, 1, newItem)
newItem = QtGui.QTableWidgetItem(streamNumber)
newItem.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.tableWidgetYourIdentities.setItem(0, 2, newItem)
self.rerenderComboBoxSendFrom()
def updateStatusBar(self,data):
printLock.acquire()
print 'Status bar:', data
printLock.release()
self.statusBar().showMessage(data)
def reloadBroadcastSendersForWhichImWatching(self):
broadcastSendersForWhichImWatching.clear()
sqlLock.acquire()
sqlSubmitQueue.put('SELECT address FROM subscriptions')
sqlSubmitQueue.put('')
queryreturn = sqlReturnQueue.get()
sqlLock.release()
for row in queryreturn:
address, = row
status,addressVersionNumber,streamNumber,hash = decodeAddress(address)
broadcastSendersForWhichImWatching[hash] = 0
#In order for the time columns on the Inbox and Sent tabs to be sorted correctly (rather than alphabetically), we need to overload the < operator and use this class instead of QTableWidgetItem.
class myTableWidgetItem(QTableWidgetItem):
def __lt__(self,other):
return int(self.data(33).toPyObject()) < int(other.data(33).toPyObject())
sendDataQueues = [] #each sendData thread puts its queue in this list.
myRSAAddressHashes = {}
myECAddressHashes = {}
#myPrivateKeys = {}
inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
workerQueue = Queue.Queue()
sqlSubmitQueue = Queue.Queue() #SQLITE3 is so thread-unsafe that they won't even let you call it from different threads using your own locks. SQL objects can only be called from one thread.
sqlReturnQueue = Queue.Queue()
sqlLock = threading.Lock()
printLock = threading.Lock()
ackdataForWhichImWatching = {}
broadcastSendersForWhichImWatching = {}
statusIconColor = 'red'
connectionsCount = {} #Used for the 'network status' tab.
connectionsCountLock = threading.Lock()
inventoryLock = threading.Lock() #Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack('>Q',random.randrange(1, 18446744073709551615))
connectedHostsList = {} #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender thread won't connect to the same remote node twice.
neededPubkeys = {}
successfullyDecryptMessageTimings = [] #A list of the amounts of time it took to successfully decrypt msg messages
#These constants are not at the top because if changed they will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
averageProofOfWorkNonceTrialsPerByte = 320 #The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work.
payloadLengthExtraBytes = 14000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
if useVeryEasyProofOfWorkForTesting:
averageProofOfWorkNonceTrialsPerByte = averageProofOfWorkNonceTrialsPerByte / 16
payloadLengthExtraBytes = payloadLengthExtraBytes / 7000
if __name__ == "__main__":
# Check the Major version, the first element in the array
if sqlite3.sqlite_version_info[0] < 3:
print 'This program requires sqlite version 3 or higher because 2 and lower cannot store NULL values. I see version:', sqlite3.sqlite_version_info
sys.exit()
if not storeConfigFilesInSameDirectoryAsProgram:
APPNAME = "PyBitmessage"
from os import path, environ
if sys.platform == 'darwin':
if "HOME" in environ:
appdata = path.join(os.environ["HOME"], "Library/Application support/", APPNAME) + '/'
else:
print 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
sys.exit()
elif 'win32' in sys.platform or 'win64' in sys.platform:
appdata = path.join(environ['APPDATA'], APPNAME) + '\\'
else:
appdata = path.expanduser(path.join("~", "." + APPNAME + "/"))
if not os.path.exists(appdata):
os.makedirs(appdata)
else:
appdata = ""
config = ConfigParser.SafeConfigParser()
config.read(appdata + 'keys.dat')
try:
config.get('bitmessagesettings', 'settingsversion')
print 'Loading config files from', appdata
except:
#This appears to be the first time running the program; there is no config file (or it cannot be accessed). Create config file.
config.add_section('bitmessagesettings')
config.set('bitmessagesettings','settingsversion','1')
#config.set('bitmessagesettings','bitstrength','2048')
config.set('bitmessagesettings','port','8444')
config.set('bitmessagesettings','timeformat','%%a, %%d %%b %%Y %%I:%%M %%p')
config.set('bitmessagesettings','blackwhitelist','black')
config.set('bitmessagesettings','startonlogon','false')
if 'linux' in sys.platform:
config.set('bitmessagesettings','minimizetotray','false')#This isn't implimented yet and when True on Ubuntu causes Bitmessage to disappear while running when minimized.
else:
config.set('bitmessagesettings','minimizetotray','true')
config.set('bitmessagesettings','showtraynotifications','true')
config.set('bitmessagesettings','startintray','false')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
print 'Storing config files in', appdata
if config.getint('bitmessagesettings','settingsversion') == 1:
config.set('bitmessagesettings','settingsversion','3') #If the settings version is equal to 2 then the sqlThread will modify the pubkeys table and change the settings version to 3.
config.set('bitmessagesettings','socksproxytype','none')
config.set('bitmessagesettings','sockshostname','localhost')
config.set('bitmessagesettings','socksport','9050')
config.set('bitmessagesettings','socksauthentication','false')
config.set('bitmessagesettings','socksusername','')
config.set('bitmessagesettings','sockspassword','')
config.set('bitmessagesettings','keysencrypted','false')
config.set('bitmessagesettings','messagesencrypted','false')
with open(appdata + 'keys.dat', 'wb') as configfile:
config.write(configfile)
try:
pickleFile = open(appdata + 'knownnodes.dat', 'rb')
knownNodes = pickle.load(pickleFile)
pickleFile.close()
except:
createDefaultKnownNodes(appdata)
pickleFile = open(appdata + 'knownnodes.dat', 'rb')
knownNodes = pickle.load(pickleFile)
pickleFile.close()
if config.getint('bitmessagesettings', 'settingsversion') > 3:
print 'Bitmessage cannot read future versions of the keys file (keys.dat). Run the newer version of Bitmessage.'
raise SystemExit
#DNS bootstrap. This could be programmed to use the SOCKS proxy to do the DNS lookup some day but for now we will just rely on the entries in defaultKnownNodes.py. Hopefully either they are up to date or the user has run Bitmessage recently without SOCKS turned on and received good bootstrap nodes using that method.
if config.get('bitmessagesettings', 'socksproxytype') == 'none':
try:
for item in socket.getaddrinfo('bootstrap8080.bitmessage.org',80):
print 'Adding', item[4][0],'to knownNodes based on DNS boostrap method'
knownNodes[1][item[4][0]] = (8080,int(time.time()))
except:
print 'bootstrap8080.bitmessage.org DNS bootstraping failed.'
try:
for item in socket.getaddrinfo('bootstrap8444.bitmessage.org',80):
print 'Adding', item[4][0],'to knownNodes based on DNS boostrap method'
knownNodes[1][item[4][0]] = (8444,int(time.time()))
except:
print 'bootstrap8444.bitmessage.org DNS bootstrapping failed.'
else:
print 'DNS bootstrap skipped because SOCKS is used.'
app = QtGui.QApplication(sys.argv)
app.setStyleSheet("QStatusBar::item { border: 0px solid black }")
myapp = MyForm()
myapp.show()
if config.getboolean('bitmessagesettings', 'startintray'):
myapp.hide()
myapp.trayIcon.show()
#self.hidden = True
#self.setWindowState(self.windowState() & QtCore.Qt.WindowMinimized)
#self.hide()
if 'win32' in sys.platform or 'win64' in sys.platform:
myapp.setWindowFlags(Qt.ToolTip)
sys.exit(app.exec_())
# So far, the Bitmessage protocol, this client, the Wiki, and the forums
# are all a one-man operation. Bitcoin tips are quite appreciated!
# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u
|
from __future__ import division
import requests
import json
import re
from bs4 import BeautifulSoup
tags_to_rating_sum = {}
tags_to_count = {}
baseString = "http://store.steampowered.com/app/"
review_to_score = {
"Overwhelmingly Positive": 8,
"Very Positive": 7,
"Positive": 6,
"Mostly Positive": 5,
"Mixed": 4,
"Mostly Negative": 3,
"Negative": 2,
"Very Negative": 1,
"Overwhelmingly Negative": 0
}
def create_dicts_from_file(f):
global tags_to_rating_sum
tags_to_rating_sum= {}
global tags_to_count
tags_to_count = {}
tags_list = json.load(f)
for tag in tags_list:
tags_to_rating_sum[tag['tag']] = 0
tags_to_count[tag['tag']] = 0
def process_games(f):
global tags_to_count
global tags_to_rating_sum
games = json.load(f)
for game in games[:10]:
link = baseString + game['app_id']
page = requests.get(link)
soup = BeautifulSoup(page.text, "lxml")
overall_section = soup.find_all("div", class_="summary_section")
#print(overall_section)
if len(overall_section) == 0:
continue
for rating in overall_section:
if (rating.text.find("Overall")):
rating2 = rating.text.replace("\n", "").strip()
print(rating2)
if __name__ == "__main__":
with open("../../data/tags.json", 'r') as f:
create_dicts_from_file(f)
with open("../../data/app_ids.json", 'r') as g:
process_games(g)
creating an average rating per tag based upon customer reviews
from __future__ import division
import requests
import json
import re
from bs4 import BeautifulSoup
tags_to_rating_sum = {}
tags_to_count = {}
baseString = "http://store.steampowered.com/app/"
review_to_score = {
"Overwhelmingly Positive": 8,
"Very Positive": 7,
"Positive": 6,
"Mostly Positive": 5,
"Mixed": 4,
"Mostly Negative": 3,
"Negative": 2,
"Very Negative": 1,
"Overwhelmingly Negative": 0
}
average = {}
def create_dicts_from_file(f):
global tags_to_rating_sum
tags_to_rating_sum= {}
global tags_to_count
tags_to_count = {}
tags_list = json.load(f)
for tag in tags_list:
tags_to_rating_sum[tag['tag']] = 0
tags_to_count[tag['tag']] = 0
def process_games(f):
global tags_to_count
global tags_to_rating_sum
games = json.load(f)
overall_re = r'Overall:([A-Za-z ]+)'
for game in games:
link = baseString + game['app_id']
page = requests.get(link)
soup = BeautifulSoup(page.text, "lxml")
#for tag in tags_section.find_all("a"):
overall_section = soup.find_all("div", class_="summary_section")
#print(overall_section)
if len(overall_section) == 0:
continue
for rating in overall_section:
if (rating.text.find("Overall") > 0):
rating2 = rating.text.replace("\n", "").strip()
match = re.match(overall_re, rating2)
review = match.groups()[0]
score = review_to_score[review]
tags_section = soup.find("div", class_="glance_tags popular_tags")
#print(tags_section.find_all("a"))
if (len(tags_section) == 0):
continue
for tag in tags_section.find_all("a"):
trimmed_tag = tag.text.replace("\r", "").replace("\n", "").replace("\t", "").strip()
if trimmed_tag in tags_to_count:
tags_to_count[trimmed_tag] += 1
tags_to_rating_sum[trimmed_tag] += score
def average_reviews():
global tags_to_count
global tags_to_rating_sum
global average
for key in tags_to_count:
average[key] = tags_to_rating_sum[key] / tags_to_count[key]
with open("../../reviews.json", "w") as f:
json.dump(average, f, indent=2)
if __name__ == "__main__":
with open("../../data/tags.json", 'r') as f:
create_dicts_from_file(f)
with open("../../data/app_ids.json", 'r') as g:
process_games(g)
|
import unittest
import numpy as np
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import cub_label_names
from chainercv.datasets import CUBLabelDataset
from chainercv.utils import assert_is_bbox
from chainercv.utils import assert_is_classification_dataset
@testing.parameterize(
{'return_bb': True},
{'return_bb': False}
)
class TestCUBLabelDataset(unittest.TestCase):
def setUp(self):
self.dataset = CUBLabelDataset(return_bb=self.return_bb)
@attr.slow
def test_cub_label_dataset(self):
assert_is_classification_dataset(
self.dataset, len(cub_label_names), n_example=10)
if self.return_bb:
idx = np.random.choice(np.arange(10))
_, _, bb = self.dataset[idx]
assert_is_bbox(bb[None])
testing.run_module(__name__, __file__)
updated based on #405
import unittest
import numpy as np
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import cub_label_names
from chainercv.datasets import CUBLabelDataset
from chainercv.utils import assert_is_bbox
from chainercv.utils import assert_is_label_dataset
@testing.parameterize(
{'return_bb': True},
{'return_bb': False}
)
class TestCUBLabelDataset(unittest.TestCase):
def setUp(self):
self.dataset = CUBLabelDataset(return_bb=self.return_bb)
@attr.slow
def test_cub_label_dataset(self):
assert_is_label_dataset(
self.dataset, len(cub_label_names), n_example=10)
if self.return_bb:
idx = np.random.choice(np.arange(10))
_, _, bb = self.dataset[idx]
assert_is_bbox(bb[None])
testing.run_module(__name__, __file__)
|
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon19.py measures the temperature of the diskarray.
import syslog, traceback
import os, sys, time, math, commands
from libdaemon import Daemon
from libsmart2 import SmartDisk
# BEWARE
# The disks identified here as `sda`, `sdb` etc. may not necessarily
# be called `/dev/sda`, `/dev/sdb` etc. on the system!!
sda = SmartDisk("wwn-0x50026b723c0d6dd5") # SSD 50026B723C0D6DD5"
sdb = SmartDisk("wwn-0x50014ee261020fce") # WD-WCC4N5PF96KD"
sdc = SmartDisk("wwn-0x50014ee605a043e2") # WD-WMC4N0K01249"
sdd = SmartDisk("wwn-0x50014ee6055a237b") # WD-WMC4N0J6Y6LW"
sde = SmartDisk("wwn-0x50014ee60507b79c") # WD-WMC4N0E24DVU"
#sdf
#sdg
DEBUG = False
class MyDaemon(Daemon):
def run(self):
sampleptr = 0
cycles = 6
SamplesPerCycle = 5
samples = SamplesPerCycle * cycles
datapoints = 4
data = []
sampleTime = 60
cycleTime = samples * sampleTime
# sync to whole minute
waitTime = (cycleTime + sampleTime) - (time.time() % (cycleTime/cycles))
if DEBUG:
print "NOT waiting {0} s.".format(waitTime)
else:
time.sleep(waitTime)
while True:
try:
startTime = time.time()
result = do_work().split(',')
if DEBUG: print result
data.append(map(float, result))
if (len(data) > samples):data.pop(0)
sampleptr = sampleptr + 1
# report sample average
if (sampleptr % SamplesPerCycle == 0):
somma = map(sum,zip(*data))
averages = [format(s / len(data), '.3f') for s in somma]
if DEBUG:print averages
do_report(averages)
if (sampleptr == samples):
sampleptr = 0
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(int(waitTime))
time.sleep(waitTime)
except Exception as e:
if DEBUG:
print("Unexpected error:")
print e.message
syslog.syslog(syslog.LOG_ALERT,e.__doc__)
syslog_trace(traceback.format_exc())
raise
def syslog_trace(trace):
'''Log a python stack trace to syslog'''
log_lines = trace.split('\n')
for line in log_lines:
if len(line):
syslog.syslog(syslog.LOG_ALERT,line)
def do_work():
# 5 datapoints gathered here
#
sda.smart()
sdb.smart()
sdc.smart()
sdd.smart()
sde.smart()
#sdf
#sdg
# disktemperature
Tsda=sda.getdata('194')
Tsdb=sdb.getdata('194')
Tsdc=sdc.getdata('194')
Tsdd=sdd.getdata('194')
Tsde=sde.getdata('194')
Tsdf=0
Tsdg=0
if DEBUG: print Tsda, Tsdb, Tsdc, Tsdd, Tsde
return '{0}, {1}, {2}, {3}, {4}'.format(Tsda, Tsdb, Tsdc, Tsdd, Tsde)
def do_report(result):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
result = ', '.join(map(str, result))
flock = '/tmp/ubundiagd/19.lock'
lock(flock)
f = file('/tmp/ubundiagd/19-tempdisk.csv', 'a')
# write out a NaN for disks sdf and sdg
f.write('{0}, {1}, NaN, NaN\n'.format(outDate, result) )
f.close()
unlock(flock)
return
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/ubundiagd/19.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
Revert "different calculation method"
This reverts commit fb815c564c7910db3b042aee015e6222d84eb9c3.
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon19.py measures the temperature of the diskarray.
import syslog, traceback
import os, sys, time, math, commands
from libdaemon import Daemon
from libsmart2 import SmartDisk
# BEWARE
# The disks identified here as `sda`, `sdb` etc. may not necessarily
# be called `/dev/sda`, `/dev/sdb` etc. on the system!!
sda = SmartDisk("wwn-0x50026b723c0d6dd5") # SSD 50026B723C0D6DD5"
sdb = SmartDisk("wwn-0x50014ee261020fce") # WD-WCC4N5PF96KD"
sdc = SmartDisk("wwn-0x50014ee605a043e2") # WD-WMC4N0K01249"
sdd = SmartDisk("wwn-0x50014ee6055a237b") # WD-WMC4N0J6Y6LW"
sde = SmartDisk("wwn-0x50014ee60507b79c") # WD-WMC4N0E24DVU"
#sdf
#sdg
DEBUG = False
class MyDaemon(Daemon):
def run(self):
sampleptr = 0
cycles = 6
SamplesPerCycle = 5
samples = SamplesPerCycle * cycles
datapoints = 4
data = []
sampleTime = 60
cycleTime = samples * sampleTime
# sync to whole minute
waitTime = (cycleTime + sampleTime) - (time.time() % cycleTime)
if DEBUG:
print "NOT waiting {0} s.".format(waitTime)
else:
time.sleep(waitTime)
while True:
try:
startTime = time.time()
result = do_work().split(',')
if DEBUG: print result
data.append(map(float, result))
if (len(data) > samples):data.pop(0)
sampleptr = sampleptr + 1
# report sample average
if (sampleptr % SamplesPerCycle == 0):
somma = map(sum,zip(*data))
averages = [format(s / len(data), '.3f') for s in somma]
if DEBUG:print averages
do_report(averages)
if (sampleptr == samples):
sampleptr = 0
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(int(waitTime))
time.sleep(waitTime)
except Exception as e:
if DEBUG:
print("Unexpected error:")
print e.message
syslog.syslog(syslog.LOG_ALERT,e.__doc__)
syslog_trace(traceback.format_exc())
raise
def syslog_trace(trace):
'''Log a python stack trace to syslog'''
log_lines = trace.split('\n')
for line in log_lines:
if len(line):
syslog.syslog(syslog.LOG_ALERT,line)
def do_work():
# 5 datapoints gathered here
#
sda.smart()
sdb.smart()
sdc.smart()
sdd.smart()
sde.smart()
#sdf
#sdg
# disktemperature
Tsda=sda.getdata('194')
Tsdb=sdb.getdata('194')
Tsdc=sdc.getdata('194')
Tsdd=sdd.getdata('194')
Tsde=sde.getdata('194')
Tsdf=0
Tsdg=0
if DEBUG: print Tsda, Tsdb, Tsdc, Tsdd, Tsde
return '{0}, {1}, {2}, {3}, {4}'.format(Tsda, Tsdb, Tsdc, Tsdd, Tsde)
def do_report(result):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
result = ', '.join(map(str, result))
flock = '/tmp/ubundiagd/19.lock'
lock(flock)
f = file('/tmp/ubundiagd/19-tempdisk.csv', 'a')
# write out a NaN for disks sdf and sdg
f.write('{0}, {1}, NaN, NaN\n'.format(outDate, result) )
f.close()
unlock(flock)
return
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/ubundiagd/19.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
|
#!/usr/bin/env python3
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
import argparse
import codecs
import gettext
import os
import sys
import logging
from urllib.parse import urlparse
import pkg_resources
from pkg_resources import resource_filename
from setproctitle import setproctitle
from gosa.client import __version__ as VERSION
from requests import HTTPError
from gosa.client.command import ClientCommandRegistry
from gosa.client.mqtt_service import MQTTClientService
from gosa.client.plugins.dbus.proxy import DBUSProxy
from gosa.common import Environment
from gosa.common.components import JSONServiceProxy
from gosa.common.components.dbus_runner import DBusRunner
from gosa.common.components.registry import PluginRegistry
from gosa.common.gjson import dumps
t = gettext.translation('messages', resource_filename("gosa.client", "locale"), fallback=True)
_ = t.gettext
def notify_backend(env, mode, user):
""" Main event loop which will process all registered threads in a loop.
It will run as long env.active is set to True."""
log = logging.getLogger(__name__)
PluginRegistry.modules["ClientCommandRegistry"] = ClientCommandRegistry()
PluginRegistry.modules["MQTTClientService"] = MQTTClientService()
dbus_proxy = DBUSProxy()
dbus_proxy.serve(register_methods=False)
url = env.config.get("jsonrpc.url", default=None)
sys_id = env.config.get("core.id", default=None)
key = env.config.get("jsonrpc.key", default=None)
# Prepare URL for login
url = urlparse(url)
# Try to log in with provided credentials
connection = '%s://%s%s' % (url.scheme, url.netloc, url.path)
proxy = JSONServiceProxy(connection)
# Try to log in
try:
if not proxy.login(sys_id, key):
log.error("connection to GOsa backend failed")
print(_("Cannot join client: check user name or password!"))
return False
else:
if mode == "start":
config = proxy.preUserSession(sys_id, user)
# send config to dbus
if "menu" in config:
# send to client
print("sending generated menu for user %s" % user)
dbus_proxy.callDBusMethod("dbus_configureUserMenu", user, dumps(config["menu"]))
if "printer-setup" in config and "printers" in config["printer-setup"]:
dbus_proxy.callDBusMethod("dbus_deleteAllPrinters")
for p_conf in config["printer-setup"]["printers"]:
dbus_proxy.callDBusMethod("dbus_addPrinter", p_conf)
if "defaultPrinter" in config["printer-setup"] and config["printer-setup"]["defaultPrinter"] is not None:
dbus_proxy.callDBusMethod("dbus_defaultPrinter", config["printer-setup"]["defaultPrinter"])
if "resolution" in config and config["resolution"] is not None and len(config["resolution"]):
print("sending screen resolution: %sx%s for user %s" % (config["resolution"][0], config["resolution"][1], user))
dbus_proxy.callDBusMethod("dbus_configureUserScreen", user, config["resolution"][0], config["resolution"][1])
elif mode == "end":
proxy.postUserSession(sys_id, user)
except HTTPError as e:
if e.code == 401:
log.error("connection to GOsa backend failed")
print(_("Cannot join client: check user name or password!"))
return False
else:
print("Error: %s " % str(e))
sys.exit(1)
except Exception as e:
print("Error: %s " % str(e))
sys.exit(1)
def main():
"""
Main programm which is called when the GOsa backend process gets started.
It does the main forking os related tasks.
"""
# Enable DBus runner
dr = DBusRunner()
dr.start()
# Set process list title
os.putenv('SPT_NOENV', 'non_empty_value')
setproctitle("gosa-session")
description = 'Helper commands to notify the GOsa backend about active user sessions on the client.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-m', '--mode', dest="mode", type=str, help='"start" or "end" to specify the user session state')
parser.add_argument('-u', '--user', dest="user", type=str, help='user name')
options, unknown = parser.parse_known_args()
# Initialize core environment
env = Environment.getInstance()
notify_backend(env, options.mode, options.user)
if __name__ == '__main__':
if not sys.stdout.encoding:
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
if not sys.stderr.encoding:
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
pkg_resources.require('gosa.common==%s' % VERSION)
netstate = False
main()
remove None values before sending data over dbus
#!/usr/bin/env python3
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
import argparse
import codecs
import gettext
import os
import sys
import logging
from urllib.parse import urlparse
import pkg_resources
from pkg_resources import resource_filename
from setproctitle import setproctitle
from gosa.client import __version__ as VERSION
from requests import HTTPError
from gosa.client.command import ClientCommandRegistry
from gosa.client.mqtt_service import MQTTClientService
from gosa.client.plugins.dbus.proxy import DBUSProxy
from gosa.common import Environment
from gosa.common.components import JSONServiceProxy
from gosa.common.components.dbus_runner import DBusRunner
from gosa.common.components.registry import PluginRegistry
from gosa.common.gjson import dumps
t = gettext.translation('messages', resource_filename("gosa.client", "locale"), fallback=True)
_ = t.gettext
def notify_backend(env, mode, user):
""" Main event loop which will process all registered threads in a loop.
It will run as long env.active is set to True."""
log = logging.getLogger(__name__)
PluginRegistry.modules["ClientCommandRegistry"] = ClientCommandRegistry()
PluginRegistry.modules["MQTTClientService"] = MQTTClientService()
dbus_proxy = DBUSProxy()
dbus_proxy.serve(register_methods=False)
url = env.config.get("jsonrpc.url", default=None)
sys_id = env.config.get("core.id", default=None)
key = env.config.get("jsonrpc.key", default=None)
# Prepare URL for login
url = urlparse(url)
# Try to log in with provided credentials
connection = '%s://%s%s' % (url.scheme, url.netloc, url.path)
proxy = JSONServiceProxy(connection)
# Try to log in
try:
if not proxy.login(sys_id, key):
log.error("connection to GOsa backend failed")
print(_("Cannot join client: check user name or password!"))
return False
else:
if mode == "start":
config = proxy.preUserSession(sys_id, user)
# send config to dbus
if "menu" in config:
# send to client
print("sending generated menu for user '%s'" % user)
dbus_proxy.callDBusMethod("dbus_configureUserMenu", user, dumps(config["menu"]))
if "printer-setup" in config and "printers" in config["printer-setup"]:
dbus_proxy.callDBusMethod("dbus_deleteAllPrinters")
for p_conf in config["printer-setup"]["printers"]:
print("adding printer '%s'" % p_conf["cn"])
p = {key: value if value is not None else "" for (key, value) in p_conf.items()}
dbus_proxy.callDBusMethod("dbus_addPrinter", p)
if "defaultPrinter" in config["printer-setup"] and config["printer-setup"]["defaultPrinter"] is not None:
print("setting '%s' as default printer" % config["printer-setup"]["defaultPrinter"])
dbus_proxy.callDBusMethod("dbus_defaultPrinter", config["printer-setup"]["defaultPrinter"])
if "resolution" in config and config["resolution"] is not None and len(config["resolution"]):
print("sending screen resolution: %sx%s for user %s" % (config["resolution"][0], config["resolution"][1], user))
dbus_proxy.callDBusMethod("dbus_configureUserScreen", user, config["resolution"][0], config["resolution"][1])
elif mode == "end":
proxy.postUserSession(sys_id, user)
except HTTPError as e:
if e.code == 401:
log.error("connection to GOsa backend failed")
print(_("Cannot join client: check user name or password!"))
return False
else:
print("Error: %s " % str(e))
sys.exit(1)
except Exception as e:
print("Error: %s " % str(e))
sys.exit(1)
def main():
"""
Main programm which is called when the GOsa backend process gets started.
It does the main forking os related tasks.
"""
# Enable DBus runner
dr = DBusRunner()
dr.start()
# Set process list title
os.putenv('SPT_NOENV', 'non_empty_value')
setproctitle("gosa-session")
description = 'Helper commands to notify the GOsa backend about active user sessions on the client.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-m', '--mode', dest="mode", type=str, help='"start" or "end" to specify the user session state')
parser.add_argument('-u', '--user', dest="user", type=str, help='user name')
options, unknown = parser.parse_known_args()
# Initialize core environment
env = Environment.getInstance()
notify_backend(env, options.mode, options.user)
if __name__ == '__main__':
if not sys.stdout.encoding:
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
if not sys.stderr.encoding:
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
pkg_resources.require('gosa.common==%s' % VERSION)
netstate = False
main()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
from openerp import tools
from email.header import decode_header
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.tools import html_email_clean
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
from mako.template import Template as MakoTemplate
except ImportError:
_logger.warning("payment_acquirer: mako templates not available, payment acquirer will not work!")
""" Some tools for parsing / creating email fields """
def decode(text):
"""Returns unicode() string conversion of the the given encoded smtp header text"""
if text:
text = decode_header(text.replace('\r', ''))
return ''.join([tools.ustr(x[0], x[1]) for x in text])
class mail_message(osv.Model):
""" Messages model: system notification (replacing res.log notifications),
comments (OpenChatter discussion) and incoming emails. """
_name = 'mail.message'
_description = 'Message'
_inherit = ['ir.needaction_mixin']
_order = 'id desc'
_rec_name = 'record_name'
_message_read_limit = 30
_message_read_fields = ['id', 'parent_id', 'model', 'res_id', 'body', 'subject', 'date', 'to_read', 'email_from',
'type', 'vote_user_ids', 'attachment_ids', 'author_id', 'partner_ids', 'record_name']
_message_record_name_length = 18
_message_read_more_limit = 1024
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
if context and context.get('default_type') and context.get('default_type') not in self._columns['type'].selection:
context = dict(context, default_type=None)
return super(mail_message, self).default_get(cr, uid, fields, context=context)
def _shorten_name(self, name):
if len(name) <= (self._message_record_name_length + 3):
return name
return name[:self._message_record_name_length] + '...'
def _get_record_name(self, cr, uid, ids, name, arg, context=None):
""" Return the related document name, using name_get. It is done using
SUPERUSER_ID, to be sure to have the record name correctly stored. """
# TDE note: regroup by model/ids, to have less queries to perform
result = dict.fromkeys(ids, False)
for message in self.read(cr, uid, ids, ['model', 'res_id'], context=context):
if not message.get('model') or not message.get('res_id') or not self.pool.get(message['model']):
continue
result[message['id']] = self.pool.get(message['model']).name_get(cr, SUPERUSER_ID, [message['res_id']], context=context)[0][1]
return result
def _get_to_read(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('read', '=', False),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_to_read(self, cr, uid, obj, name, domain, context=None):
""" Search for messages to read by the current user. Condition is
inversed because we search unread message on a read column. """
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.read', '=', not domain[0][2])]
def _get_starred(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('starred', '=', True),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_starred(self, cr, uid, obj, name, domain, context=None):
""" Search for messages to read by the current user. Condition is
inversed because we search unread message on a read column. """
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.starred', '=', domain[0][2])]
def name_get(self, cr, uid, ids, context=None):
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for message in self.browse(cr, uid, ids, context=context):
name = '%s: %s' % (message.subject or '', message.body or '')
res.append((message.id, self._shorten_name(name.lstrip(' :'))))
return res
_columns = {
'type': fields.selection([
('email', 'Email'),
('comment', 'Comment'),
('notification', 'System notification'),
], 'Type',
help="Message type: email for email message, notification for system "\
"message, comment for other messages such as user replies"),
'email_from': fields.char('From',
help="Email address of the sender. This field is set when no matching partner is found for incoming emails."),
'author_id': fields.many2one('res.partner', 'Author', select=1,
ondelete='set null',
help="Author of the message. If not set, email_from may hold an email address that did not match any partner."),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
'notified_partner_ids': fields.many2many('res.partner', 'mail_notification',
'message_id', 'partner_id', 'Notified partners',
help='Partners that have a notification pushing this message in their mailboxes'),
'attachment_ids': fields.many2many('ir.attachment', 'message_attachment_rel',
'message_id', 'attachment_id', 'Attachments'),
'parent_id': fields.many2one('mail.message', 'Parent Message', select=True,
ondelete='set null', help="Initial thread message."),
'child_ids': fields.one2many('mail.message', 'parent_id', 'Child Messages'),
'model': fields.char('Related Document Model', size=128, select=1),
'res_id': fields.integer('Related Document ID', select=1),
'record_name': fields.function(_get_record_name, type='char',
store=True, string='Message Record Name',
help="Name get of the related document."),
'notification_ids': fields.one2many('mail.notification', 'message_id',
string='Notifications', auto_join=True,
help='Technical field holding the message notifications. Use notified_partner_ids to access notified partners.'),
'subject': fields.char('Subject'),
'date': fields.datetime('Date'),
'message_id': fields.char('Message-Id', help='Message unique identifier', select=1, readonly=1),
'body': fields.html('Contents', help='Automatically sanitized HTML contents'),
'to_read': fields.function(_get_to_read, fnct_search=_search_to_read,
type='boolean', string='To read',
help='Current user has an unread notification linked to this message'),
'starred': fields.function(_get_starred, fnct_search=_search_starred,
type='boolean', string='Starred',
help='Current user has a starred notification linked to this message'),
'subtype_id': fields.many2one('mail.message.subtype', 'Subtype',
ondelete='set null', select=1,),
'vote_user_ids': fields.many2many('res.users', 'mail_vote',
'message_id', 'user_id', string='Votes',
help='Users that voted for this message'),
}
def _needaction_domain_get(self, cr, uid, context=None):
return [('to_read', '=', True)]
def _get_default_author(self, cr, uid, context=None):
return self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
_defaults = {
'type': 'email',
'date': lambda *a: fields.datetime.now(),
'author_id': lambda self, cr, uid, ctx={}: self._get_default_author(cr, uid, ctx),
'body': '',
}
#------------------------------------------------------
# Vote/Like
#------------------------------------------------------
def vote_toggle(self, cr, uid, ids, context=None):
''' Toggles vote. Performed using read to avoid access rights issues.
Done as SUPERUSER_ID because uid may vote for a message he cannot modify. '''
for message in self.read(cr, uid, ids, ['vote_user_ids'], context=context):
new_has_voted = not (uid in message.get('vote_user_ids'))
if new_has_voted:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(4, uid)]}, context=context)
else:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(3, uid)]}, context=context)
return new_has_voted or False
#------------------------------------------------------
# download an attachment
#------------------------------------------------------
def download_attachment(self, cr, uid, id_message, attachment_id, context=None):
""" Return the content of linked attachments. """
message = self.browse(cr, uid, id_message, context=context)
if attachment_id in [attachment.id for attachment in message.attachment_ids]:
attachment = self.pool.get('ir.attachment').browse(cr, SUPERUSER_ID, attachment_id, context=context)
if attachment.datas and attachment.datas_fname:
return {
'base64': attachment.datas,
'filename': attachment.datas_fname,
}
return False
#------------------------------------------------------
# Notification API
#------------------------------------------------------
def set_message_read(self, cr, uid, msg_ids, read, create_missing=True, context=None):
""" Set messages as (un)read. Technically, the notifications related
to uid are set to (un)read. If for some msg_ids there are missing
notifications (i.e. due to load more or thread parent fetching),
they are created.
:param bool read: set notification as (un)read
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('read', '=', not read)]
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)read
if len(notif_ids) == len(msg_ids) or not create_missing:
return notification_obj.write(cr, uid, notif_ids, {'read': read}, context=context)
# some messages do not have notifications: find which one, create notification, update read status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, {'partner_id': user_pid, 'read': read, 'message_id': msg_id}, context=context)
return notification_obj.write(cr, uid, notif_ids, {'read': read}, context=context)
def set_message_starred(self, cr, uid, msg_ids, starred, create_missing=True, context=None):
""" Set messages as (un)starred. Technically, the notifications related
to uid are set to (un)starred.
:param bool starred: set notification as (un)starred
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('starred', '=', not starred)]
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)starred
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, {'starred': starred}, context=context)
return starred
# some messages do not have notifications: find which one, create notification, update starred status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, {'partner_id': user_pid, 'starred': starred, 'message_id': msg_id}, context=context)
notification_obj.write(cr, uid, notif_ids, {'starred': starred}, context=context)
return starred
#------------------------------------------------------
# Message loading for web interface
#------------------------------------------------------
def _message_read_dict_postprocess(self, cr, uid, messages, message_tree, context=None):
""" Post-processing on values given by message_read. This method will
handle partners in batch to avoid doing numerous queries.
:param list messages: list of message, as get_dict result
:param dict message_tree: {[msg.id]: msg browse record}
"""
res_partner_obj = self.pool.get('res.partner')
ir_attachment_obj = self.pool.get('ir.attachment')
pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=None)['partner_id'][0]
# 1. Aggregate partners (author_id and partner_ids) and attachments
partner_ids = set()
attachment_ids = set()
for key, message in message_tree.iteritems():
if message.author_id:
partner_ids |= set([message.author_id.id])
if message.notified_partner_ids:
partner_ids |= set([partner.id for partner in message.notified_partner_ids])
if message.attachment_ids:
attachment_ids |= set([attachment.id for attachment in message.attachment_ids])
# Read partners as SUPERUSER -> display the names like classic m2o even if no access
partners = res_partner_obj.name_get(cr, SUPERUSER_ID, list(partner_ids), context=context)
partner_tree = dict((partner[0], partner) for partner in partners)
# 2. Attachments as SUPERUSER, because could receive msg and attachments for doc uid cannot see
attachments = ir_attachment_obj.read(cr, SUPERUSER_ID, list(attachment_ids), ['id', 'datas_fname', 'name'], context=context)
attachments_tree = dict((attachment['id'], {'id': attachment['id'], 'filename': attachment['datas_fname'], 'name': attachment['name']}) for attachment in attachments)
# 3. Update message dictionaries
for message_dict in messages:
message_id = message_dict.get('id')
message = message_tree[message_id]
if message.author_id:
author = partner_tree[message.author_id.id]
else:
author = (0, message.email_from)
partner_ids = []
for partner in message.notified_partner_ids:
if partner.id in partner_tree:
partner_ids.append(partner_tree[partner.id])
attachment_ids = []
for attachment in message.attachment_ids:
if attachment.id in attachments_tree:
attachment_ids.append(attachments_tree[attachment.id])
message_dict.update({
'is_author': pid == author[0],
'author_id': author,
'partner_ids': partner_ids,
'attachment_ids': attachment_ids,
})
return True
def _message_read_dict(self, cr, uid, message, parent_id=False, context=None):
""" Return a dict representation of the message. This representation is
used in the JS client code, to display the messages. Partners and
attachments related stuff will be done in post-processing in batch.
:param dict message: mail.message browse record
"""
# private message: no model, no res_id
is_private = False
if not message.model or not message.res_id:
is_private = True
# votes and favorites: res.users ids, no prefetching should be done
vote_nb = len(message.vote_user_ids)
has_voted = uid in [user.id for user in message.vote_user_ids]
try:
body_html = html_email_clean(message.body)
except Exception:
body_html = '<p><b>Encoding Error : </b><br/>Unable to convert this message (id: %s).</p>' % message.id
_logger.exception(Exception)
return {'id': message.id,
'type': message.type,
'body': body_html,
'model': message.model,
'res_id': message.res_id,
'record_name': message.record_name,
'subject': message.subject,
'date': message.date,
'to_read': message.to_read,
'parent_id': parent_id,
'is_private': is_private,
'author_id': False,
'is_author': False,
'partner_ids': [],
'vote_nb': vote_nb,
'has_voted': has_voted,
'is_favorite': message.starred,
'attachment_ids': [],
}
def _message_read_add_expandables(self, cr, uid, messages, message_tree, parent_tree,
message_unload_ids=[], thread_level=0, domain=[], parent_id=False, context=None):
""" Create expandables for message_read, to load new messages.
1. get the expandable for new threads
if display is flat (thread_level == 0):
fetch message_ids < min(already displayed ids), because we
want a flat display, ordered by id
else:
fetch message_ids that are not childs of already displayed
messages
2. get the expandables for new messages inside threads if display
is not flat
for each thread header, search for its childs
for each hole in the child list based on message displayed,
create an expandable
:param list messages: list of message structure for the Chatter
widget to which expandables are added
:param dict message_tree: dict [id]: browse record of this message
:param dict parent_tree: dict [parent_id]: [child_ids]
:param list message_unload_ids: list of message_ids we do not want
to load
:return bool: True
"""
def _get_expandable(domain, message_nb, parent_id, max_limit):
return {
'domain': domain,
'nb_messages': message_nb,
'type': 'expandable',
'parent_id': parent_id,
'max_limit': max_limit,
}
if not messages:
return True
message_ids = sorted(message_tree.keys())
# 1. get the expandable for new threads
if thread_level == 0:
exp_domain = domain + [('id', '<', min(message_unload_ids + message_ids))]
else:
exp_domain = domain + ['!', ('id', 'child_of', message_unload_ids + parent_tree.keys())]
ids = self.search(cr, uid, exp_domain, context=context, limit=1)
if ids:
# inside a thread: prepend
if parent_id:
messages.insert(0, _get_expandable(exp_domain, -1, parent_id, True))
# new threads: append
else:
messages.append(_get_expandable(exp_domain, -1, parent_id, True))
# 2. get the expandables for new messages inside threads if display is not flat
if thread_level == 0:
return True
for message_id in message_ids:
message = message_tree[message_id]
# generate only for thread header messages (TDE note: parent_id may be False is uid cannot see parent_id, seems ok)
if message.parent_id:
continue
# check there are message for expandable
child_ids = set([child.id for child in message.child_ids]) - set(message_unload_ids)
child_ids = sorted(list(child_ids), reverse=True)
if not child_ids:
continue
# make groups of unread messages
id_min, id_max, nb = max(child_ids), 0, 0
for child_id in child_ids:
if not child_id in message_ids:
nb += 1
if id_min > child_id:
id_min = child_id
if id_max < child_id:
id_max = child_id
elif nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(child_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, False))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
id_min, id_max, nb = max(child_ids), 0, 0
else:
id_min, id_max, nb = max(child_ids), 0, 0
if nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(message_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, id_min))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
return True
def message_read(self, cr, uid, ids=None, domain=None, message_unload_ids=None,
thread_level=0, context=None, parent_id=False, limit=None):
""" Read messages from mail.message, and get back a list of structured
messages to be displayed as discussion threads. If IDs is set,
fetch these records. Otherwise use the domain to fetch messages.
After having fetch messages, their ancestors will be added to obtain
well formed threads, if uid has access to them.
After reading the messages, expandable messages are added in the
message list (see ``_message_read_add_expandables``). It consists
in messages holding the 'read more' data: number of messages to
read, domain to apply.
:param list ids: optional IDs to fetch
:param list domain: optional domain for searching ids if ids not set
:param list message_unload_ids: optional ids we do not want to fetch,
because i.e. they are already displayed somewhere
:param int parent_id: context of parent_id
- if parent_id reached when adding ancestors, stop going further
in the ancestor search
- if set in flat mode, ancestor_id is set to parent_id
:param int limit: number of messages to fetch, before adding the
ancestors and expandables
:return list: list of message structure for the Chatter widget
"""
assert thread_level in [0, 1], 'message_read() thread_level should be 0 (flat) or 1 (1 level of thread); given %s.' % thread_level
domain = domain if domain is not None else []
message_unload_ids = message_unload_ids if message_unload_ids is not None else []
if message_unload_ids:
domain += [('id', 'not in', message_unload_ids)]
notification_obj = self.pool.get('mail.notification')
limit = limit or self._message_read_limit
message_tree = {}
message_list = []
parent_tree = {}
# no specific IDS given: fetch messages according to the domain, add their parents if uid has access to
if ids is None:
ids = self.search(cr, uid, domain, context=context, limit=limit)
# fetch parent if threaded, sort messages
for message in self.browse(cr, uid, ids, context=context):
message_id = message.id
if message_id in message_tree:
continue
message_tree[message_id] = message
# find parent_id
if thread_level == 0:
tree_parent_id = parent_id
else:
tree_parent_id = message_id
parent = message
while parent.parent_id and parent.parent_id.id != parent_id:
parent = parent.parent_id
tree_parent_id = parent.id
if not parent.id in message_tree:
message_tree[parent.id] = parent
# newest messages first
parent_tree.setdefault(tree_parent_id, [])
if tree_parent_id != message_id:
parent_tree[tree_parent_id].append(self._message_read_dict(cr, uid, message_tree[message_id], parent_id=tree_parent_id, context=context))
if thread_level:
for key, message_id_list in parent_tree.iteritems():
message_id_list.sort(key=lambda item: item['id'])
message_id_list.insert(0, self._message_read_dict(cr, uid, message_tree[key], context=context))
# create final ordered message_list based on parent_tree
parent_list = parent_tree.items()
parent_list = sorted(parent_list, key=lambda item: max([msg.get('id') for msg in item[1]]) if item[1] else item[0], reverse=True)
message_list = [message for (key, msg_list) in parent_list for message in msg_list]
# get the child expandable messages for the tree
self._message_read_dict_postprocess(cr, uid, message_list, message_tree, context=context)
self._message_read_add_expandables(cr, uid, message_list, message_tree, parent_tree,
thread_level=thread_level, message_unload_ids=message_unload_ids, domain=domain, parent_id=parent_id, context=context)
return message_list
#------------------------------------------------------
# mail_message internals
#------------------------------------------------------
def init(self, cr):
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'mail_message_model_res_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE INDEX mail_message_model_res_id_idx ON mail_message (model, res_id)""")
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
doc_ids = doc_dict.keys()
allowed_doc_ids = self.pool.get(doc_model).search(cr, uid, [('id', 'in', doc_ids)], context=context)
return set([message_id for allowed_doc_id in allowed_doc_ids for message_id in doc_dict[allowed_doc_id]])
def _find_allowed_doc_ids(self, cr, uid, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
allowed_ids = set()
for doc_model, doc_dict in model_ids.iteritems():
if not model_access_obj.check(cr, uid, doc_model, 'read', False):
continue
allowed_ids |= self._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
return allowed_ids
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to remove
ids uid could not see according to our custom rules. Please refer
to check_access_rule for more details about those rules.
After having received ids of a classic search, keep only:
- if author_id == pid, uid is the author, OR
- a notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document is model, res_id
- otherwise: remove the id
"""
# Rules do not apply to administrator
if uid == SUPERUSER_ID:
return super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
# Perform a super with count as False, to have the ids, not a counter
ids = super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=False, access_rights_uid=access_rights_uid)
if not ids and count:
return 0
elif not ids:
return ids
pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'])['partner_id'][0]
author_ids, partner_ids, allowed_ids = set([]), set([]), set([])
model_ids = {}
messages = super(mail_message, self).read(cr, uid, ids, ['author_id', 'model', 'res_id', 'notified_partner_ids'], context=context)
for message in messages:
if message.get('author_id') and message.get('author_id')[0] == pid:
author_ids.add(message.get('id'))
elif pid in message.get('notified_partner_ids'):
partner_ids.add(message.get('id'))
elif message.get('model') and message.get('res_id'):
model_ids.setdefault(message.get('model'), {}).setdefault(message.get('res_id'), set()).add(message.get('id'))
allowed_ids = self._find_allowed_doc_ids(cr, uid, model_ids, context=context)
final_ids = author_ids | partner_ids | allowed_ids
if count:
return len(final_ids)
else:
# re-construct a list based on ids, because set did not keep the original order
id_list = [id for id in ids if id in final_ids]
return id_list
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Access rules of mail.message:
- read: if
- author_id == pid, uid is the author, OR
- mail_notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document if model, res_id
- otherwise: raise
- create: if
- no model, no res_id, I create a private message OR
- pid in message_follower_ids if model, res_id OR
- mail_notification (parent_id.id, pid) exists, uid has been notified of the parent, OR
- uid have write access on the related document if model, res_id, OR
- otherwise: raise
- write: if
- author_id == pid, uid is the author, OR
- uid has write access on the related document if model, res_id
- otherwise: raise
- unlink: if
- uid has write access on the related document if model, res_id
- otherwise: raise
"""
def _generate_model_record_ids(msg_val, msg_ids=[]):
""" :param model_record_ids: {'model': {'res_id': (msg_id, msg_id)}, ... }
:param message_values: {'msg_id': {'model': .., 'res_id': .., 'author_id': ..}}
"""
model_record_ids = {}
for id in msg_ids:
if msg_val[id]['model'] and msg_val[id]['res_id']:
model_record_ids.setdefault(msg_val[id]['model'], dict()).setdefault(msg_val[id]['res_id'], set()).add(msg_val[id]['res_id'])
return model_record_ids
if uid == SUPERUSER_ID:
return
if isinstance(ids, (int, long)):
ids = [ids]
not_obj = self.pool.get('mail.notification')
fol_obj = self.pool.get('mail.followers')
partner_id = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=None)['partner_id'][0]
# Read mail_message.ids to have their values
message_values = dict.fromkeys(ids)
cr.execute('SELECT DISTINCT id, model, res_id, author_id, parent_id FROM "%s" WHERE id = ANY (%%s)' % self._table, (ids,))
for id, rmod, rid, author_id, parent_id in cr.fetchall():
message_values[id] = {'model': rmod, 'res_id': rid, 'author_id': author_id, 'parent_id': parent_id}
# Author condition (READ, WRITE, CREATE (private)) -> could become an ir.rule ?
author_ids = []
if operation == 'read' or operation == 'write':
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('author_id') and message.get('author_id') == partner_id]
elif operation == 'create':
author_ids = [mid for mid, message in message_values.iteritems()
if not message.get('model') and not message.get('res_id')]
# Parent condition, for create (check for received notifications for the created message parent)
notified_ids = []
if operation == 'create':
parent_ids = [message.get('parent_id') for mid, message in message_values.iteritems()
if message.get('parent_id')]
not_ids = not_obj.search(cr, SUPERUSER_ID, [('message_id.id', 'in', parent_ids), ('partner_id', '=', partner_id)], context=context)
not_parent_ids = [notif.message_id.id for notif in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('parent_id') in not_parent_ids]
# Notification condition, for read (check for received notifications and create (in message_follower_ids)) -> could become an ir.rule, but not till we do not have a many2one variable field
other_ids = set(ids).difference(set(author_ids), set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
if operation == 'read':
not_ids = not_obj.search(cr, SUPERUSER_ID, [
('partner_id', '=', partner_id),
('message_id', 'in', ids),
], context=context)
notified_ids = [notification.message_id.id for notification in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
elif operation == 'create':
for doc_model, doc_dict in model_record_ids.items():
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', doc_model),
('res_id', 'in', list(doc_dict.keys())),
('partner_id', '=', partner_id),
], context=context)
fol_mids = [follower.res_id for follower in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == doc_model and message.get('res_id') in fol_mids]
# CRUD: Access rights related to the document
other_ids = other_ids.difference(set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
document_related_ids = []
for model, doc_dict in model_record_ids.items():
model_obj = self.pool.get(model)
mids = model_obj.exists(cr, uid, doc_dict.keys())
if operation in ['create', 'write', 'unlink']:
model_obj.check_access_rights(cr, uid, 'write')
model_obj.check_access_rule(cr, uid, mids, 'write', context=context)
else:
model_obj.check_access_rights(cr, uid, operation)
model_obj.check_access_rule(cr, uid, mids, operation, context=context)
document_related_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == model and message.get('res_id') in mids]
# Calculate remaining ids: if not void, raise an error
other_ids = other_ids.difference(set(document_related_ids))
if not other_ids:
return
raise orm.except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
default_starred = context.pop('default_starred', False)
if not values.get('message_id') and values.get('res_id') and values.get('model'):
values['message_id'] = tools.generate_tracking_message_id('%(res_id)s-%(model)s' % values)
elif not values.get('message_id'):
values['message_id'] = tools.generate_tracking_message_id('private')
newid = super(mail_message, self).create(cr, uid, values, context)
self._notify(cr, uid, newid, context=context)
# TDE FIXME: handle default_starred. Why not setting an inv on starred ?
# Because starred will call set_message_starred, that looks for notifications.
# When creating a new mail_message, it will create a notification to a message
# that does not exist, leading to an error (key not existing). Also this
# this means unread notifications will be created, yet we can not assure
# this is what we want.
if default_starred:
self.set_message_starred(cr, uid, [newid], True, context=context)
return newid
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" Override to explicitely call check_access_rule, that is not called
by the ORM. It instead directly fetches ir.rules and apply them. """
self.check_access_rule(cr, uid, ids, 'read', context=context)
res = super(mail_message, self).read(cr, uid, ids, fields=fields, context=context, load=load)
return res
def unlink(self, cr, uid, ids, context=None):
# cascade-delete attachments that are directly attached to the message (should only happen
# for mail.messages that act as parent for a standalone mail.mail record).
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
attachments_to_delete = []
for message in self.browse(cr, uid, ids, context=context):
for attach in message.attachment_ids:
if attach.res_model == self._name and attach.res_id == message.id:
attachments_to_delete.append(attach.id)
if attachments_to_delete:
self.pool.get('ir.attachment').unlink(cr, uid, attachments_to_delete, context=context)
return super(mail_message, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
""" Overridden to avoid duplicating fields that are unique to each email """
if default is None:
default = {}
default.update(message_id=False, headers=False)
return super(mail_message, self).copy(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Messaging API
#------------------------------------------------------
# TDE note: this code is not used currently, will be improved in a future merge, when quoted context
# will be added to email send for notifications. Currently only WIP.
MAIL_TEMPLATE = """<div>
% if message:
${display_message(message)}
% endif
% for ctx_msg in context_messages:
${display_message(ctx_msg)}
% endfor
% if add_expandable:
${display_expandable()}
% endif
${display_message(header_message)}
</div>
<%def name="display_message(message)">
<div>
Subject: ${message.subject}<br />
Body: ${message.body}
</div>
</%def>
<%def name="display_expandable()">
<div>This is an expandable.</div>
</%def>
"""
def message_quote_context(self, cr, uid, id, context=None, limit=3, add_original=False):
"""
1. message.parent_id = False: new thread, no quote_context
2. get the lasts messages in the thread before message
3. get the message header
4. add an expandable between them
:param dict quote_context: options for quoting
:return string: html quote
"""
add_expandable = False
message = self.browse(cr, uid, id, context=context)
if not message.parent_id:
return ''
context_ids = self.search(cr, uid, [
('parent_id', '=', message.parent_id.id),
('id', '<', message.id),
], limit=limit, context=context)
if len(context_ids) >= limit:
add_expandable = True
context_ids = context_ids[0:-1]
context_ids.append(message.parent_id.id)
context_messages = self.browse(cr, uid, context_ids, context=context)
header_message = context_messages.pop()
try:
if not add_original:
message = False
result = MakoTemplate(self.MAIL_TEMPLATE).render_unicode(message=message,
context_messages=context_messages,
header_message=header_message,
add_expandable=add_expandable,
# context kw would clash with mako internals
ctx=context,
format_exceptions=True)
result = result.strip()
return result
except Exception:
_logger.exception("failed to render mako template for quoting message")
return ''
return result
def _notify(self, cr, uid, newid, context=None):
""" Add the related record followers to the destination partner_ids if is not a private message.
Call mail_notification.notify to manage the email sending
"""
notification_obj = self.pool.get('mail.notification')
message = self.browse(cr, uid, newid, context=context)
partners_to_notify = set([])
# message has no subtype_id: pure log message -> no partners, no one notified
if not message.subtype_id:
return True
# all followers of the mail.message document have to be added as partners and notified
if message.model and message.res_id:
fol_obj = self.pool.get("mail.followers")
# browse as SUPERUSER because rules could restrict the search results
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', message.model),
('res_id', '=', message.res_id),
('subtype_ids', 'in', message.subtype_id.id)
], context=context)
partners_to_notify |= set(fo.partner_id for fo in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context))
# remove me from notified partners, unless the message is written on my own wall
if message.author_id and message.model == "res.partner" and message.res_id == message.author_id.id:
partners_to_notify |= set([message.author_id])
elif message.author_id:
partners_to_notify -= set([message.author_id])
# all partner_ids of the mail.message have to be notified regardless of the above (even the author if explicitly added!)
if message.partner_ids:
partners_to_notify |= set(message.partner_ids)
# notify
if partners_to_notify:
notification_obj._notify(cr, uid, newid, partners_to_notify=[p.id for p in partners_to_notify], context=context)
message.refresh()
# An error appear when a user receive a notification without notifying
# the parent message -> add a read notification for the parent
if message.parent_id:
# all notified_partner_ids of the mail.message have to be notified for the parented messages
partners_to_parent_notify = set(message.notified_partner_ids).difference(message.parent_id.notified_partner_ids)
for partner in partners_to_parent_notify:
notification_obj.create(cr, uid, {
'message_id': message.parent_id.id,
'partner_id': partner.id,
'read': True,
}, context=context)
#------------------------------------------------------
# Tools
#------------------------------------------------------
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool.get('res.partner').browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following partners chosen as recipients for the email have no email address linked :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Partners email addresses not found'),
'message': warning_msg,
}
}
[MERGE] [FIX] mail: now starring messages also sets them as unread.
bzr revid: tde@openerp.com-20130327120019-4qs0lz1igketjtd8
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
from openerp import tools
from email.header import decode_header
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.tools import html_email_clean
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
from mako.template import Template as MakoTemplate
except ImportError:
_logger.warning("payment_acquirer: mako templates not available, payment acquirer will not work!")
""" Some tools for parsing / creating email fields """
def decode(text):
"""Returns unicode() string conversion of the the given encoded smtp header text"""
if text:
text = decode_header(text.replace('\r', ''))
return ''.join([tools.ustr(x[0], x[1]) for x in text])
class mail_message(osv.Model):
""" Messages model: system notification (replacing res.log notifications),
comments (OpenChatter discussion) and incoming emails. """
_name = 'mail.message'
_description = 'Message'
_inherit = ['ir.needaction_mixin']
_order = 'id desc'
_rec_name = 'record_name'
_message_read_limit = 30
_message_read_fields = ['id', 'parent_id', 'model', 'res_id', 'body', 'subject', 'date', 'to_read', 'email_from',
'type', 'vote_user_ids', 'attachment_ids', 'author_id', 'partner_ids', 'record_name']
_message_record_name_length = 18
_message_read_more_limit = 1024
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
if context and context.get('default_type') and context.get('default_type') not in self._columns['type'].selection:
context = dict(context, default_type=None)
return super(mail_message, self).default_get(cr, uid, fields, context=context)
def _shorten_name(self, name):
if len(name) <= (self._message_record_name_length + 3):
return name
return name[:self._message_record_name_length] + '...'
def _get_record_name(self, cr, uid, ids, name, arg, context=None):
""" Return the related document name, using name_get. It is done using
SUPERUSER_ID, to be sure to have the record name correctly stored. """
# TDE note: regroup by model/ids, to have less queries to perform
result = dict.fromkeys(ids, False)
for message in self.read(cr, uid, ids, ['model', 'res_id'], context=context):
if not message.get('model') or not message.get('res_id') or not self.pool.get(message['model']):
continue
result[message['id']] = self.pool.get(message['model']).name_get(cr, SUPERUSER_ID, [message['res_id']], context=context)[0][1]
return result
def _get_to_read(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('read', '=', False),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_to_read(self, cr, uid, obj, name, domain, context=None):
""" Search for messages to read by the current user. Condition is
inversed because we search unread message on a read column. """
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.read', '=', not domain[0][2])]
def _get_starred(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('starred', '=', True),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_starred(self, cr, uid, obj, name, domain, context=None):
""" Search for messages to read by the current user. Condition is
inversed because we search unread message on a read column. """
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.starred', '=', domain[0][2])]
def name_get(self, cr, uid, ids, context=None):
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for message in self.browse(cr, uid, ids, context=context):
name = '%s: %s' % (message.subject or '', message.body or '')
res.append((message.id, self._shorten_name(name.lstrip(' :'))))
return res
_columns = {
'type': fields.selection([
('email', 'Email'),
('comment', 'Comment'),
('notification', 'System notification'),
], 'Type',
help="Message type: email for email message, notification for system "\
"message, comment for other messages such as user replies"),
'email_from': fields.char('From',
help="Email address of the sender. This field is set when no matching partner is found for incoming emails."),
'author_id': fields.many2one('res.partner', 'Author', select=1,
ondelete='set null',
help="Author of the message. If not set, email_from may hold an email address that did not match any partner."),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
'notified_partner_ids': fields.many2many('res.partner', 'mail_notification',
'message_id', 'partner_id', 'Notified partners',
help='Partners that have a notification pushing this message in their mailboxes'),
'attachment_ids': fields.many2many('ir.attachment', 'message_attachment_rel',
'message_id', 'attachment_id', 'Attachments'),
'parent_id': fields.many2one('mail.message', 'Parent Message', select=True,
ondelete='set null', help="Initial thread message."),
'child_ids': fields.one2many('mail.message', 'parent_id', 'Child Messages'),
'model': fields.char('Related Document Model', size=128, select=1),
'res_id': fields.integer('Related Document ID', select=1),
'record_name': fields.function(_get_record_name, type='char',
store=True, string='Message Record Name',
help="Name get of the related document."),
'notification_ids': fields.one2many('mail.notification', 'message_id',
string='Notifications', auto_join=True,
help='Technical field holding the message notifications. Use notified_partner_ids to access notified partners.'),
'subject': fields.char('Subject'),
'date': fields.datetime('Date'),
'message_id': fields.char('Message-Id', help='Message unique identifier', select=1, readonly=1),
'body': fields.html('Contents', help='Automatically sanitized HTML contents'),
'to_read': fields.function(_get_to_read, fnct_search=_search_to_read,
type='boolean', string='To read',
help='Current user has an unread notification linked to this message'),
'starred': fields.function(_get_starred, fnct_search=_search_starred,
type='boolean', string='Starred',
help='Current user has a starred notification linked to this message'),
'subtype_id': fields.many2one('mail.message.subtype', 'Subtype',
ondelete='set null', select=1,),
'vote_user_ids': fields.many2many('res.users', 'mail_vote',
'message_id', 'user_id', string='Votes',
help='Users that voted for this message'),
}
def _needaction_domain_get(self, cr, uid, context=None):
return [('to_read', '=', True)]
def _get_default_author(self, cr, uid, context=None):
return self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
_defaults = {
'type': 'email',
'date': lambda *a: fields.datetime.now(),
'author_id': lambda self, cr, uid, ctx={}: self._get_default_author(cr, uid, ctx),
'body': '',
}
#------------------------------------------------------
# Vote/Like
#------------------------------------------------------
def vote_toggle(self, cr, uid, ids, context=None):
''' Toggles vote. Performed using read to avoid access rights issues.
Done as SUPERUSER_ID because uid may vote for a message he cannot modify. '''
for message in self.read(cr, uid, ids, ['vote_user_ids'], context=context):
new_has_voted = not (uid in message.get('vote_user_ids'))
if new_has_voted:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(4, uid)]}, context=context)
else:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(3, uid)]}, context=context)
return new_has_voted or False
#------------------------------------------------------
# download an attachment
#------------------------------------------------------
def download_attachment(self, cr, uid, id_message, attachment_id, context=None):
""" Return the content of linked attachments. """
message = self.browse(cr, uid, id_message, context=context)
if attachment_id in [attachment.id for attachment in message.attachment_ids]:
attachment = self.pool.get('ir.attachment').browse(cr, SUPERUSER_ID, attachment_id, context=context)
if attachment.datas and attachment.datas_fname:
return {
'base64': attachment.datas,
'filename': attachment.datas_fname,
}
return False
#------------------------------------------------------
# Notification API
#------------------------------------------------------
def set_message_read(self, cr, uid, msg_ids, read, create_missing=True, context=None):
""" Set messages as (un)read. Technically, the notifications related
to uid are set to (un)read. If for some msg_ids there are missing
notifications (i.e. due to load more or thread parent fetching),
they are created.
:param bool read: set notification as (un)read
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('read', '=', not read)]
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)read
if len(notif_ids) == len(msg_ids) or not create_missing:
return notification_obj.write(cr, uid, notif_ids, {'read': read}, context=context)
# some messages do not have notifications: find which one, create notification, update read status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, {'partner_id': user_pid, 'read': read, 'message_id': msg_id}, context=context)
return notification_obj.write(cr, uid, notif_ids, {'read': read}, context=context)
def set_message_starred(self, cr, uid, msg_ids, starred, create_missing=True, context=None):
""" Set messages as (un)starred. Technically, the notifications related
to uid are set to (un)starred.
:param bool starred: set notification as (un)starred
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('starred', '=', not starred)]
values = {
'starred': starred
}
if starred:
values['read'] = False
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)starred
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
# some messages do not have notifications: find which one, create notification, update starred status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, dict(values, partner_id=user_pid, message_id=msg_id), context=context)
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
#------------------------------------------------------
# Message loading for web interface
#------------------------------------------------------
def _message_read_dict_postprocess(self, cr, uid, messages, message_tree, context=None):
""" Post-processing on values given by message_read. This method will
handle partners in batch to avoid doing numerous queries.
:param list messages: list of message, as get_dict result
:param dict message_tree: {[msg.id]: msg browse record}
"""
res_partner_obj = self.pool.get('res.partner')
ir_attachment_obj = self.pool.get('ir.attachment')
pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=None)['partner_id'][0]
# 1. Aggregate partners (author_id and partner_ids) and attachments
partner_ids = set()
attachment_ids = set()
for key, message in message_tree.iteritems():
if message.author_id:
partner_ids |= set([message.author_id.id])
if message.notified_partner_ids:
partner_ids |= set([partner.id for partner in message.notified_partner_ids])
if message.attachment_ids:
attachment_ids |= set([attachment.id for attachment in message.attachment_ids])
# Read partners as SUPERUSER -> display the names like classic m2o even if no access
partners = res_partner_obj.name_get(cr, SUPERUSER_ID, list(partner_ids), context=context)
partner_tree = dict((partner[0], partner) for partner in partners)
# 2. Attachments as SUPERUSER, because could receive msg and attachments for doc uid cannot see
attachments = ir_attachment_obj.read(cr, SUPERUSER_ID, list(attachment_ids), ['id', 'datas_fname', 'name'], context=context)
attachments_tree = dict((attachment['id'], {'id': attachment['id'], 'filename': attachment['datas_fname'], 'name': attachment['name']}) for attachment in attachments)
# 3. Update message dictionaries
for message_dict in messages:
message_id = message_dict.get('id')
message = message_tree[message_id]
if message.author_id:
author = partner_tree[message.author_id.id]
else:
author = (0, message.email_from)
partner_ids = []
for partner in message.notified_partner_ids:
if partner.id in partner_tree:
partner_ids.append(partner_tree[partner.id])
attachment_ids = []
for attachment in message.attachment_ids:
if attachment.id in attachments_tree:
attachment_ids.append(attachments_tree[attachment.id])
message_dict.update({
'is_author': pid == author[0],
'author_id': author,
'partner_ids': partner_ids,
'attachment_ids': attachment_ids,
})
return True
def _message_read_dict(self, cr, uid, message, parent_id=False, context=None):
""" Return a dict representation of the message. This representation is
used in the JS client code, to display the messages. Partners and
attachments related stuff will be done in post-processing in batch.
:param dict message: mail.message browse record
"""
# private message: no model, no res_id
is_private = False
if not message.model or not message.res_id:
is_private = True
# votes and favorites: res.users ids, no prefetching should be done
vote_nb = len(message.vote_user_ids)
has_voted = uid in [user.id for user in message.vote_user_ids]
try:
body_html = html_email_clean(message.body)
except Exception:
body_html = '<p><b>Encoding Error : </b><br/>Unable to convert this message (id: %s).</p>' % message.id
_logger.exception(Exception)
return {'id': message.id,
'type': message.type,
'body': body_html,
'model': message.model,
'res_id': message.res_id,
'record_name': message.record_name,
'subject': message.subject,
'date': message.date,
'to_read': message.to_read,
'parent_id': parent_id,
'is_private': is_private,
'author_id': False,
'is_author': False,
'partner_ids': [],
'vote_nb': vote_nb,
'has_voted': has_voted,
'is_favorite': message.starred,
'attachment_ids': [],
}
def _message_read_add_expandables(self, cr, uid, messages, message_tree, parent_tree,
message_unload_ids=[], thread_level=0, domain=[], parent_id=False, context=None):
""" Create expandables for message_read, to load new messages.
1. get the expandable for new threads
if display is flat (thread_level == 0):
fetch message_ids < min(already displayed ids), because we
want a flat display, ordered by id
else:
fetch message_ids that are not childs of already displayed
messages
2. get the expandables for new messages inside threads if display
is not flat
for each thread header, search for its childs
for each hole in the child list based on message displayed,
create an expandable
:param list messages: list of message structure for the Chatter
widget to which expandables are added
:param dict message_tree: dict [id]: browse record of this message
:param dict parent_tree: dict [parent_id]: [child_ids]
:param list message_unload_ids: list of message_ids we do not want
to load
:return bool: True
"""
def _get_expandable(domain, message_nb, parent_id, max_limit):
return {
'domain': domain,
'nb_messages': message_nb,
'type': 'expandable',
'parent_id': parent_id,
'max_limit': max_limit,
}
if not messages:
return True
message_ids = sorted(message_tree.keys())
# 1. get the expandable for new threads
if thread_level == 0:
exp_domain = domain + [('id', '<', min(message_unload_ids + message_ids))]
else:
exp_domain = domain + ['!', ('id', 'child_of', message_unload_ids + parent_tree.keys())]
ids = self.search(cr, uid, exp_domain, context=context, limit=1)
if ids:
# inside a thread: prepend
if parent_id:
messages.insert(0, _get_expandable(exp_domain, -1, parent_id, True))
# new threads: append
else:
messages.append(_get_expandable(exp_domain, -1, parent_id, True))
# 2. get the expandables for new messages inside threads if display is not flat
if thread_level == 0:
return True
for message_id in message_ids:
message = message_tree[message_id]
# generate only for thread header messages (TDE note: parent_id may be False is uid cannot see parent_id, seems ok)
if message.parent_id:
continue
# check there are message for expandable
child_ids = set([child.id for child in message.child_ids]) - set(message_unload_ids)
child_ids = sorted(list(child_ids), reverse=True)
if not child_ids:
continue
# make groups of unread messages
id_min, id_max, nb = max(child_ids), 0, 0
for child_id in child_ids:
if not child_id in message_ids:
nb += 1
if id_min > child_id:
id_min = child_id
if id_max < child_id:
id_max = child_id
elif nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(child_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, False))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
id_min, id_max, nb = max(child_ids), 0, 0
else:
id_min, id_max, nb = max(child_ids), 0, 0
if nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(message_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, id_min))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
return True
def message_read(self, cr, uid, ids=None, domain=None, message_unload_ids=None,
thread_level=0, context=None, parent_id=False, limit=None):
""" Read messages from mail.message, and get back a list of structured
messages to be displayed as discussion threads. If IDs is set,
fetch these records. Otherwise use the domain to fetch messages.
After having fetch messages, their ancestors will be added to obtain
well formed threads, if uid has access to them.
After reading the messages, expandable messages are added in the
message list (see ``_message_read_add_expandables``). It consists
in messages holding the 'read more' data: number of messages to
read, domain to apply.
:param list ids: optional IDs to fetch
:param list domain: optional domain for searching ids if ids not set
:param list message_unload_ids: optional ids we do not want to fetch,
because i.e. they are already displayed somewhere
:param int parent_id: context of parent_id
- if parent_id reached when adding ancestors, stop going further
in the ancestor search
- if set in flat mode, ancestor_id is set to parent_id
:param int limit: number of messages to fetch, before adding the
ancestors and expandables
:return list: list of message structure for the Chatter widget
"""
assert thread_level in [0, 1], 'message_read() thread_level should be 0 (flat) or 1 (1 level of thread); given %s.' % thread_level
domain = domain if domain is not None else []
message_unload_ids = message_unload_ids if message_unload_ids is not None else []
if message_unload_ids:
domain += [('id', 'not in', message_unload_ids)]
notification_obj = self.pool.get('mail.notification')
limit = limit or self._message_read_limit
message_tree = {}
message_list = []
parent_tree = {}
# no specific IDS given: fetch messages according to the domain, add their parents if uid has access to
if ids is None:
ids = self.search(cr, uid, domain, context=context, limit=limit)
# fetch parent if threaded, sort messages
for message in self.browse(cr, uid, ids, context=context):
message_id = message.id
if message_id in message_tree:
continue
message_tree[message_id] = message
# find parent_id
if thread_level == 0:
tree_parent_id = parent_id
else:
tree_parent_id = message_id
parent = message
while parent.parent_id and parent.parent_id.id != parent_id:
parent = parent.parent_id
tree_parent_id = parent.id
if not parent.id in message_tree:
message_tree[parent.id] = parent
# newest messages first
parent_tree.setdefault(tree_parent_id, [])
if tree_parent_id != message_id:
parent_tree[tree_parent_id].append(self._message_read_dict(cr, uid, message_tree[message_id], parent_id=tree_parent_id, context=context))
if thread_level:
for key, message_id_list in parent_tree.iteritems():
message_id_list.sort(key=lambda item: item['id'])
message_id_list.insert(0, self._message_read_dict(cr, uid, message_tree[key], context=context))
# create final ordered message_list based on parent_tree
parent_list = parent_tree.items()
parent_list = sorted(parent_list, key=lambda item: max([msg.get('id') for msg in item[1]]) if item[1] else item[0], reverse=True)
message_list = [message for (key, msg_list) in parent_list for message in msg_list]
# get the child expandable messages for the tree
self._message_read_dict_postprocess(cr, uid, message_list, message_tree, context=context)
self._message_read_add_expandables(cr, uid, message_list, message_tree, parent_tree,
thread_level=thread_level, message_unload_ids=message_unload_ids, domain=domain, parent_id=parent_id, context=context)
return message_list
#------------------------------------------------------
# mail_message internals
#------------------------------------------------------
def init(self, cr):
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'mail_message_model_res_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE INDEX mail_message_model_res_id_idx ON mail_message (model, res_id)""")
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
doc_ids = doc_dict.keys()
allowed_doc_ids = self.pool.get(doc_model).search(cr, uid, [('id', 'in', doc_ids)], context=context)
return set([message_id for allowed_doc_id in allowed_doc_ids for message_id in doc_dict[allowed_doc_id]])
def _find_allowed_doc_ids(self, cr, uid, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
allowed_ids = set()
for doc_model, doc_dict in model_ids.iteritems():
if not model_access_obj.check(cr, uid, doc_model, 'read', False):
continue
allowed_ids |= self._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
return allowed_ids
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to remove
ids uid could not see according to our custom rules. Please refer
to check_access_rule for more details about those rules.
After having received ids of a classic search, keep only:
- if author_id == pid, uid is the author, OR
- a notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document is model, res_id
- otherwise: remove the id
"""
# Rules do not apply to administrator
if uid == SUPERUSER_ID:
return super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
# Perform a super with count as False, to have the ids, not a counter
ids = super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=False, access_rights_uid=access_rights_uid)
if not ids and count:
return 0
elif not ids:
return ids
pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'])['partner_id'][0]
author_ids, partner_ids, allowed_ids = set([]), set([]), set([])
model_ids = {}
messages = super(mail_message, self).read(cr, uid, ids, ['author_id', 'model', 'res_id', 'notified_partner_ids'], context=context)
for message in messages:
if message.get('author_id') and message.get('author_id')[0] == pid:
author_ids.add(message.get('id'))
elif pid in message.get('notified_partner_ids'):
partner_ids.add(message.get('id'))
elif message.get('model') and message.get('res_id'):
model_ids.setdefault(message.get('model'), {}).setdefault(message.get('res_id'), set()).add(message.get('id'))
allowed_ids = self._find_allowed_doc_ids(cr, uid, model_ids, context=context)
final_ids = author_ids | partner_ids | allowed_ids
if count:
return len(final_ids)
else:
# re-construct a list based on ids, because set did not keep the original order
id_list = [id for id in ids if id in final_ids]
return id_list
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Access rules of mail.message:
- read: if
- author_id == pid, uid is the author, OR
- mail_notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document if model, res_id
- otherwise: raise
- create: if
- no model, no res_id, I create a private message OR
- pid in message_follower_ids if model, res_id OR
- mail_notification (parent_id.id, pid) exists, uid has been notified of the parent, OR
- uid have write access on the related document if model, res_id, OR
- otherwise: raise
- write: if
- author_id == pid, uid is the author, OR
- uid has write access on the related document if model, res_id
- otherwise: raise
- unlink: if
- uid has write access on the related document if model, res_id
- otherwise: raise
"""
def _generate_model_record_ids(msg_val, msg_ids=[]):
""" :param model_record_ids: {'model': {'res_id': (msg_id, msg_id)}, ... }
:param message_values: {'msg_id': {'model': .., 'res_id': .., 'author_id': ..}}
"""
model_record_ids = {}
for id in msg_ids:
if msg_val[id]['model'] and msg_val[id]['res_id']:
model_record_ids.setdefault(msg_val[id]['model'], dict()).setdefault(msg_val[id]['res_id'], set()).add(msg_val[id]['res_id'])
return model_record_ids
if uid == SUPERUSER_ID:
return
if isinstance(ids, (int, long)):
ids = [ids]
not_obj = self.pool.get('mail.notification')
fol_obj = self.pool.get('mail.followers')
partner_id = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=None)['partner_id'][0]
# Read mail_message.ids to have their values
message_values = dict.fromkeys(ids)
cr.execute('SELECT DISTINCT id, model, res_id, author_id, parent_id FROM "%s" WHERE id = ANY (%%s)' % self._table, (ids,))
for id, rmod, rid, author_id, parent_id in cr.fetchall():
message_values[id] = {'model': rmod, 'res_id': rid, 'author_id': author_id, 'parent_id': parent_id}
# Author condition (READ, WRITE, CREATE (private)) -> could become an ir.rule ?
author_ids = []
if operation == 'read' or operation == 'write':
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('author_id') and message.get('author_id') == partner_id]
elif operation == 'create':
author_ids = [mid for mid, message in message_values.iteritems()
if not message.get('model') and not message.get('res_id')]
# Parent condition, for create (check for received notifications for the created message parent)
notified_ids = []
if operation == 'create':
parent_ids = [message.get('parent_id') for mid, message in message_values.iteritems()
if message.get('parent_id')]
not_ids = not_obj.search(cr, SUPERUSER_ID, [('message_id.id', 'in', parent_ids), ('partner_id', '=', partner_id)], context=context)
not_parent_ids = [notif.message_id.id for notif in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('parent_id') in not_parent_ids]
# Notification condition, for read (check for received notifications and create (in message_follower_ids)) -> could become an ir.rule, but not till we do not have a many2one variable field
other_ids = set(ids).difference(set(author_ids), set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
if operation == 'read':
not_ids = not_obj.search(cr, SUPERUSER_ID, [
('partner_id', '=', partner_id),
('message_id', 'in', ids),
], context=context)
notified_ids = [notification.message_id.id for notification in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
elif operation == 'create':
for doc_model, doc_dict in model_record_ids.items():
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', doc_model),
('res_id', 'in', list(doc_dict.keys())),
('partner_id', '=', partner_id),
], context=context)
fol_mids = [follower.res_id for follower in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == doc_model and message.get('res_id') in fol_mids]
# CRUD: Access rights related to the document
other_ids = other_ids.difference(set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
document_related_ids = []
for model, doc_dict in model_record_ids.items():
model_obj = self.pool.get(model)
mids = model_obj.exists(cr, uid, doc_dict.keys())
if operation in ['create', 'write', 'unlink']:
model_obj.check_access_rights(cr, uid, 'write')
model_obj.check_access_rule(cr, uid, mids, 'write', context=context)
else:
model_obj.check_access_rights(cr, uid, operation)
model_obj.check_access_rule(cr, uid, mids, operation, context=context)
document_related_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == model and message.get('res_id') in mids]
# Calculate remaining ids: if not void, raise an error
other_ids = other_ids.difference(set(document_related_ids))
if not other_ids:
return
raise orm.except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
default_starred = context.pop('default_starred', False)
if not values.get('message_id') and values.get('res_id') and values.get('model'):
values['message_id'] = tools.generate_tracking_message_id('%(res_id)s-%(model)s' % values)
elif not values.get('message_id'):
values['message_id'] = tools.generate_tracking_message_id('private')
newid = super(mail_message, self).create(cr, uid, values, context)
self._notify(cr, uid, newid, context=context)
# TDE FIXME: handle default_starred. Why not setting an inv on starred ?
# Because starred will call set_message_starred, that looks for notifications.
# When creating a new mail_message, it will create a notification to a message
# that does not exist, leading to an error (key not existing). Also this
# this means unread notifications will be created, yet we can not assure
# this is what we want.
if default_starred:
self.set_message_starred(cr, uid, [newid], True, context=context)
return newid
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" Override to explicitely call check_access_rule, that is not called
by the ORM. It instead directly fetches ir.rules and apply them. """
self.check_access_rule(cr, uid, ids, 'read', context=context)
res = super(mail_message, self).read(cr, uid, ids, fields=fields, context=context, load=load)
return res
def unlink(self, cr, uid, ids, context=None):
# cascade-delete attachments that are directly attached to the message (should only happen
# for mail.messages that act as parent for a standalone mail.mail record).
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
attachments_to_delete = []
for message in self.browse(cr, uid, ids, context=context):
for attach in message.attachment_ids:
if attach.res_model == self._name and attach.res_id == message.id:
attachments_to_delete.append(attach.id)
if attachments_to_delete:
self.pool.get('ir.attachment').unlink(cr, uid, attachments_to_delete, context=context)
return super(mail_message, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
""" Overridden to avoid duplicating fields that are unique to each email """
if default is None:
default = {}
default.update(message_id=False, headers=False)
return super(mail_message, self).copy(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Messaging API
#------------------------------------------------------
# TDE note: this code is not used currently, will be improved in a future merge, when quoted context
# will be added to email send for notifications. Currently only WIP.
MAIL_TEMPLATE = """<div>
% if message:
${display_message(message)}
% endif
% for ctx_msg in context_messages:
${display_message(ctx_msg)}
% endfor
% if add_expandable:
${display_expandable()}
% endif
${display_message(header_message)}
</div>
<%def name="display_message(message)">
<div>
Subject: ${message.subject}<br />
Body: ${message.body}
</div>
</%def>
<%def name="display_expandable()">
<div>This is an expandable.</div>
</%def>
"""
def message_quote_context(self, cr, uid, id, context=None, limit=3, add_original=False):
"""
1. message.parent_id = False: new thread, no quote_context
2. get the lasts messages in the thread before message
3. get the message header
4. add an expandable between them
:param dict quote_context: options for quoting
:return string: html quote
"""
add_expandable = False
message = self.browse(cr, uid, id, context=context)
if not message.parent_id:
return ''
context_ids = self.search(cr, uid, [
('parent_id', '=', message.parent_id.id),
('id', '<', message.id),
], limit=limit, context=context)
if len(context_ids) >= limit:
add_expandable = True
context_ids = context_ids[0:-1]
context_ids.append(message.parent_id.id)
context_messages = self.browse(cr, uid, context_ids, context=context)
header_message = context_messages.pop()
try:
if not add_original:
message = False
result = MakoTemplate(self.MAIL_TEMPLATE).render_unicode(message=message,
context_messages=context_messages,
header_message=header_message,
add_expandable=add_expandable,
# context kw would clash with mako internals
ctx=context,
format_exceptions=True)
result = result.strip()
return result
except Exception:
_logger.exception("failed to render mako template for quoting message")
return ''
return result
def _notify(self, cr, uid, newid, context=None):
""" Add the related record followers to the destination partner_ids if is not a private message.
Call mail_notification.notify to manage the email sending
"""
notification_obj = self.pool.get('mail.notification')
message = self.browse(cr, uid, newid, context=context)
partners_to_notify = set([])
# message has no subtype_id: pure log message -> no partners, no one notified
if not message.subtype_id:
return True
# all followers of the mail.message document have to be added as partners and notified
if message.model and message.res_id:
fol_obj = self.pool.get("mail.followers")
# browse as SUPERUSER because rules could restrict the search results
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', message.model),
('res_id', '=', message.res_id),
('subtype_ids', 'in', message.subtype_id.id)
], context=context)
partners_to_notify |= set(fo.partner_id for fo in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context))
# remove me from notified partners, unless the message is written on my own wall
if message.author_id and message.model == "res.partner" and message.res_id == message.author_id.id:
partners_to_notify |= set([message.author_id])
elif message.author_id:
partners_to_notify -= set([message.author_id])
# all partner_ids of the mail.message have to be notified regardless of the above (even the author if explicitly added!)
if message.partner_ids:
partners_to_notify |= set(message.partner_ids)
# notify
if partners_to_notify:
notification_obj._notify(cr, uid, newid, partners_to_notify=[p.id for p in partners_to_notify], context=context)
message.refresh()
# An error appear when a user receive a notification without notifying
# the parent message -> add a read notification for the parent
if message.parent_id:
# all notified_partner_ids of the mail.message have to be notified for the parented messages
partners_to_parent_notify = set(message.notified_partner_ids).difference(message.parent_id.notified_partner_ids)
for partner in partners_to_parent_notify:
notification_obj.create(cr, uid, {
'message_id': message.parent_id.id,
'partner_id': partner.id,
'read': True,
}, context=context)
#------------------------------------------------------
# Tools
#------------------------------------------------------
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool.get('res.partner').browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following partners chosen as recipients for the email have no email address linked :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Partners email addresses not found'),
'message': warning_msg,
}
}
|
import pandas as pd
import numpy as np
import inspect
from matplotlib import pyplot as plt
plt.style.use('bmh')
import RuleSet
import r_funcs as rf
def make_rsets(df, params_dict,
xtrap=False, return_sum = False, return_setting_sums=False,
trim=False, ann_sum=False):
'''Helper function to make rulesets objects based on input parameters
Default just returns a dict of rulesets with the params added.
Setting `xtrap` flag will return dict of rulesets populated with results of calling xtrap on each
- i.e. it will actually do the extrapolation functions and assign to .past, .fut, .joined and .sum
Setting `return_sum` or `return_setting_sums` flags will return summed datasets as expected.
Setting `trim` with a tuple or list (start, end) will trim the output sum dfs
'''
cut_off = params_dict['cut_off']
biol_shed = params_dict['biol_shed']
non_biol_shed = params_dict['non_biol_shed']
loe_delay = params_dict['loe_delay']
biol_term_gr = params_dict['biol_term_gr']
non_biol_term_gr = params_dict['non_biol_term_gr']
biol_coh_gr = params_dict['biol_coh_gr']
non_biol_coh_gr = params_dict['non_biol_coh_gr']
n_pers = params_dict['n_pers']
rsets = {}
# existing product rulesets - NB MUST SET CUTOFF TO MINUS 1 TO AVOID OVERLAP
rsets['biol_sec'] = RuleSet.RuleSet(df, name='biol_sec',
index_slice = dict(biol=True, setting='secondary', start_month=slice(None, cut_off-1, None)),
func = rf.r_trend,
f_args = dict(shed=biol_shed, term_gr=biol_term_gr, loe_delay=loe_delay))
rsets['nonbiol_sec'] = RuleSet.RuleSet(df, name='nonbiol_sec',
index_slice = dict(biol=False, setting='secondary', start_month=slice(None, cut_off-1, None)),
func = rf.r_trend,
f_args = dict(shed=non_biol_shed, term_gr=non_biol_term_gr, loe_delay=loe_delay))
rsets['biol_prim'] = RuleSet.RuleSet(df, name='biol_prim',
index_slice = dict(biol=True, setting='primary', start_month=slice(None, cut_off-1, None)),
func = rf.r_trend,
f_args = dict(shed=biol_shed, term_gr=biol_term_gr, loe_delay=loe_delay))
rsets['nonbiol_prim'] = RuleSet.RuleSet(df, name='nonbiol_prim',
index_slice = dict(biol=False, setting='primary', start_month=slice(None, cut_off-1, None)),
func = rf.r_trend,
f_args = dict(shed=non_biol_shed, term_gr=non_biol_term_gr, loe_delay=loe_delay))
# future launches rulesets
rsets['biol_sec_fut'] = RuleSet.RuleSet(df, name='biol_sec_fut',
index_slice = dict(biol=True, setting='secondary', start_month=slice(cut_off, None, None)),
func = rf.r_fut_tr,
f_args = dict(shed=biol_shed, loe_delay=loe_delay, term_gr=biol_term_gr, coh_gr=biol_coh_gr/12, cut_off=cut_off))
rsets['nonbiol_sec_fut'] = RuleSet.RuleSet(df, name='nonbiol_sec_fut',
index_slice = dict(biol=False, setting='secondary', start_month=slice(cut_off, None, None)),
func = rf.r_fut_tr,
f_args = dict(shed=non_biol_shed, loe_delay=loe_delay, term_gr=non_biol_term_gr, coh_gr=non_biol_coh_gr/12, cut_off=cut_off))
rsets['biol_prim_fut'] = RuleSet.RuleSet(df, name='biol_prim_fut',
index_slice = dict(biol=True, setting='primary', start_month=slice(cut_off, None, None)),
func = rf.r_fut_tr,
f_args = dict(shed=biol_shed, loe_delay=loe_delay, term_gr=biol_term_gr, coh_gr=biol_coh_gr/12, cut_off=cut_off))
rsets['nonbiol_prim_fut'] = RuleSet.RuleSet(df, name='nonbiol_prim_fut',
index_slice = dict(biol=False, setting='primary', start_month=slice(cut_off, None, None)),
func = rf.r_fut_tr,
f_args = dict(shed=non_biol_shed, loe_delay=loe_delay, term_gr=non_biol_term_gr, coh_gr=non_biol_coh_gr/12, cut_off=cut_off))
if xtrap or return_sum or return_setting_sums:
for r in rsets:
rsets[r].xtrap(n_pers)
if return_sum or return_setting_sums:
sums = pd.concat([rsets[x].summed for x in rsets], axis=1)
if trim:
sums = sums.loc[slice(pd.Period(trim[0], freq='M'), pd.Period(trim[1], freq='M'),None),:]
if ann_sum:
sums = sums.groupby(sums.index.year).sum(axis=1)
if return_sum:
if return_setting_sums:
print('Returning single sum only - to return sums by setting you need to turn off the `return_sum` flag')
return sums
elif return_setting_sums:
by_setting = sums.groupby(lambda x: 'sec' in x, axis=1).sum()
by_setting.columns = ['Primary', 'Secondary']
return by_setting
else: return rsets
##____________________________________________________________________________________________________________##
def plot_rset_projs(rs_dict_in, selection=None, agg_filename=None, num_plots=12, n_pers=120, out_folder='figs/test_plot_rset/',
save_fig=True, _debug=False):
'''Helper function for plotting projections of individual products in rulesets.
For a dict of rulesets, sorts by max sales and takes num_plots highest.
Then for each generates a plot containing each product in the high sales set.
Selection: pass a list of product names instead of finding products with max sales
agg_filename: puts together in a single image file if pass a name (no need for a .png ending)
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
plt.close("all")
pad = 35
# first exclude future rsets
rs_dict = {x:rs_dict_in[x] for x in rs_dict_in.keys() if 'fut' not in x}
# set up to make a single figure if aggregating
if agg_filename is not None:
if selection is not None:
if _debug: print('aggregating for a selection')
num_plots = len(selection)
if _debug: print('num_plots'.ljust(pad), num_plots)
else:
if _debug: print('aggregating across rulesets')
num_plots = len(rs_dict * num_plots)
if _debug: print('num_plots'.ljust(pad), num_plots)
fig, ax = plt.subplots(num_plots, figsize=(12, num_plots * 6))
# iterate non future rsets
for r in [x for x in rs_dict.keys() if not x.endswith('_fut')]:
# select data by max sales
print('\nin rset'.ljust(pad), r)
selected = None
# use selection if passed
if selection is not None:
selected = rs_dict[r].past.loc[selection]
# otherwise take top by sales
else:
selected = rs_dict[r].past.loc[rs_dict[r].past.max(axis=1).sort_values(ascending=False).head(num_plots).index]
if _debug: print('length of selection'.ljust(pad), len(selected))
#get into £m annualised
selected *=12/10**8
# get the LIST OF dfs from r_trend with an _out_type='df' flag
df_list = rf.r_trend(selected, n_pers=n_pers,
shed = rs_dict[r].f_args['shed'],
term_gr = rs_dict[r].f_args['term_gr'],
loe_delay = rs_dict[r].f_args['loe_delay'],
_out_type='df')
plat_dur = rs_dict[r].f_args['shed'].plat_dur
total_dur = rs_dict[r].f_args['shed'].uptake_dur + plat_dur
if _debug: print('plat length is '.ljust(pad), plat_dur)
if _debug: print('total length is '.ljust(pad), total_dur)
# make graph for this ruleset (remember in a loop here already), if not already made one for aggregate
if agg_filename is None:
fig, ax = plt.subplots(num_plots, figsize=(12, num_plots * 6))
# now loop through the returned dataframes - one per product
for i, df in enumerate(df_list):
if _debug: print('\ndf number'.ljust(pad), i)
if _debug: print('..corresponding product name'.ljust(pad), selected.iloc[i].name[0])
loe_date = pd.Period(selected.iloc[i].name[6], freq='M')
if _debug: print('..with loe'.ljust(pad), loe_date)
# make the index, from the selected input dataframe, adding n_pers
ind_start = selected.columns[0]
if _debug: print('ind start'.ljust(pad), ind_start)
ind = pd.PeriodIndex(start=ind_start, periods = len(selected.columns) + n_pers).to_timestamp()
if _debug: print('ind end'.ljust(pad), ind[-1])
if _debug: print('total periods'.ljust(pad), len(ind))
# snip df to length of index - THERE IS A STRAY PERIOD COMING FROM SOMEWHERE
if _debug: print('length of dfs'.ljust(pad), len(df))
if len(df) > len(ind):
if _debug: print('snipping df.. ..')
df = df.iloc[:len(ind), :]
if _debug: print("length now".ljust(pad), len(df))
ind_end = pd.Period(ind[-1], freq='M')
if _debug: print('index end'.ljust(pad), ind_end)
# make an axes iterator that works with case if single plot
ax_it = None
if num_plots == 1: ax_it = ax
else: ax_it = ax[i]
# and now loop through the actual columns in the dataframe for the product
for col in df:
ax_it.plot(ind, zero_to_nan(df[col]))
ax_it.set_title(selected.iloc[i].name[0] + ", loe: " + str(loe_date))
if i%4 == 0:
ax_it.legend(['actual', 'mov ave.', 'projected'])
pat_exp = pd.Period(selected.iloc[i].name[6], freq='M')
lim_0 = max(ind_start, (pat_exp - total_dur)).to_timestamp()
lim_1 = max(ind_start, (pat_exp - plat_dur)).to_timestamp()
lim_2 = min(ind_end, max(ind_start, (pat_exp))).to_timestamp()
lim_3 = None
if lim_1 > ind_start.to_timestamp():
ax_it.axvspan(lim_0, lim_1, facecolor='g', alpha=0.1)
# only draw the line if in scope
if lim_1 < ind_end.to_timestamp():
ax_it.axvline(x=lim_1, linestyle='--', color='gray')
if lim_2 > ind_start.to_timestamp():
ax_it.axvspan(lim_1, lim_2, facecolor='r', alpha=0.1)
# only draw the line if in scope
if lim_2 < ind_end.to_timestamp():
ax_it.axvline(x=lim_2, color='gray')
ax_it.set_ylim(0)
if agg_filename is None:
fig.savefig(out_folder + r + '.png')
if agg_filename is not None:
fig.savefig(out_folder + agg_filename + '.png')
if _debug: print("\nLEAVING: ", inspect.stack()[0][3])
##_________________________________________________________________________##
def zero_to_nan(values):
"""Replace every 0 with 'nan' and return a copy."""
return [float('nan') if x==0 else x for x in values]
temp changes, resolving conflict
import pandas as pd
import numpy as np
import inspect
from matplotlib import pyplot as plt
plt.style.use('bmh')
import RuleSet
import r_funcs as rf
def make_rsets(df, params_dict,
xtrap=False, return_all_sums = False, return_setting_sums=False, return_sum = False,
trim=False, _debug=False):
'''Helper function to make rulesets objects based on input parameters
Default just returns a dict of rulesets with the params added.
Setting `xtrap` flag will return dict of rulesets populated with results of calling xtrap on each
- i.e. it will actually do the extrapolation functions and assign to .past, .fut, .joined and .sum
Setting `return_sum` or `return_setting_sums` flags will return summed datasets as expected.
Setting `trim` with a tuple or list (start, end) will trim the output sum dfs
'''
cut_off = params_dict['cut_off']
biol_shed = params_dict['biol_shed']
non_biol_shed = params_dict['non_biol_shed']
loe_delay = params_dict['loe_delay']
biol_term_gr = params_dict['biol_term_gr']
non_biol_term_gr = params_dict['non_biol_term_gr']
biol_coh_gr = params_dict['biol_coh_gr']
non_biol_coh_gr = params_dict['non_biol_coh_gr']
n_pers = params_dict['n_pers']
rsets = {}
# existing product rulesets - NB MUST SET CUTOFF TO MINUS 1 TO AVOID OVERLAP
rsets['biol_sec'] = RuleSet.RuleSet(df, name='biol_sec',
index_slice = dict(biol=True, setting='secondary', start_month=slice(None, cut_off-1, None)),
func = rf.r_trend,
f_args = dict(shed=biol_shed, term_gr=biol_term_gr, loe_delay=loe_delay))
rsets['nonbiol_sec'] = RuleSet.RuleSet(df, name='nonbiol_sec',
index_slice = dict(biol=False, setting='secondary', start_month=slice(None, cut_off-1, None)),
func = rf.r_trend,
f_args = dict(shed=non_biol_shed, term_gr=non_biol_term_gr, loe_delay=loe_delay))
rsets['biol_prim'] = RuleSet.RuleSet(df, name='biol_prim',
index_slice = dict(biol=True, setting='primary', start_month=slice(None, cut_off-1, None)),
func = rf.r_trend,
f_args = dict(shed=biol_shed, term_gr=biol_term_gr, loe_delay=loe_delay))
rsets['nonbiol_prim'] = RuleSet.RuleSet(df, name='nonbiol_prim',
index_slice = dict(biol=False, setting='primary', start_month=slice(None, cut_off-1, None)),
func = rf.r_trend,
f_args = dict(shed=non_biol_shed, term_gr=non_biol_term_gr, loe_delay=loe_delay))
# future launches rulesets
rsets['biol_sec_fut'] = RuleSet.RuleSet(df, name='biol_sec_fut',
index_slice = dict(biol=True, setting='secondary', start_month=slice(cut_off, None, None)),
func = rf.r_fut_tr,
f_args = dict(shed=biol_shed, loe_delay=loe_delay, term_gr=biol_term_gr, coh_gr=biol_coh_gr, cut_off=cut_off))
rsets['nonbiol_sec_fut'] = RuleSet.RuleSet(df, name='nonbiol_sec_fut',
index_slice = dict(biol=False, setting='secondary', start_month=slice(cut_off, None, None)),
func = rf.r_fut_tr,
f_args = dict(shed=non_biol_shed, loe_delay=loe_delay, term_gr=non_biol_term_gr, coh_gr=non_biol_coh_gr, cut_off=cut_off))
rsets['biol_prim_fut'] = RuleSet.RuleSet(df, name='biol_prim_fut',
index_slice = dict(biol=True, setting='primary', start_month=slice(cut_off, None, None)),
func = rf.r_fut_tr,
f_args = dict(shed=biol_shed, loe_delay=loe_delay, term_gr=biol_term_gr, coh_gr=biol_coh_gr, cut_off=cut_off))
rsets['nonbiol_prim_fut'] = RuleSet.RuleSet(df, name='nonbiol_prim_fut',
index_slice = dict(biol=False, setting='primary', start_month=slice(cut_off, None, None)),
func = rf.r_fut_tr,
f_args = dict(shed=non_biol_shed, loe_delay=loe_delay, term_gr=non_biol_term_gr, coh_gr=non_biol_coh_gr, cut_off=cut_off))
if xtrap or return_all_sums or return_setting_sums or return_sum:
for r in rsets:
if _debug: print('xtrapping rset ', r, end=" ")
rsets[r].xtrap(n_pers)
if _debug: print(' ..OK')
# if any sums reqd, make the full set
if return_all_sums or return_setting_sums or return_sum:
if _debug: print('making all sums')
sums = pd.concat([rsets[x].summed for x in rsets], axis=1)
if trim:
sums = sums.loc[slice(pd.Period(trim[0], freq='M'), pd.Period(trim[1], freq='M'),None),:]
# if all sums reqd, just return
if return_all_sums:
return sums
# if sums by setting reqd
elif return_setting_sums:
if _debug: print('making sums by setting')
sums = sums.groupby(lambda x: 'sec' in x, axis=1).sum()
sums.columns = ['Primary', 'Secondary']
return sums
# if a single sum reqd
elif return_sum:
if _debug: print('making single sum')
sums = sums.sum(axis=1)
if return_setting_sums or return_all_sums:
print('Returning single sum only - to return all sums, or by setting you need to turn off the `return_sum` flag')
return sums
# default returns the whole rulesets (with or without dfs depending on if xtrap was called)
else: return rsets
##____________________________________________________________________________________________________________##
def load_spend_dset(path='../spend_data_proc/consol_ey_dset/spend_dset_07FEB18a.pkl', phx_adj=1.6, add_start_m=True, _debug=False):
'''Helper function to load the psned dataset
Option to make the general adjustments to pharmex (with a passed multiplier phx_adj)
Currently adds a start month (optionally, default true). This is needed to make slices of the df in the rulesest,
but may be better done in generating the original spend dset. (TODO)
'''
pad = 30
df = pd.read_pickle(path)
if _debug:
print('\noriginal sum, £m'.ljust(pad), "{:0,.3f}".format(df.sum().sum()/10**8))
by_setting = df.groupby(level=1).sum().sum(axis=1)/10**8
print("by_setting.index[0]".ljust(pad), "{:0,.3f}".format(by_setting[0]))
print("by_setting.index[1]".ljust(pad), "{:0,.3f}".format(by_setting[1]))
# secondary spend before adjusting
if _debug:
sec1 = df.groupby(level=1).sum().loc['secondary',:].sum()
print('\napplying ratio'.ljust(pad), phx_adj, end='\n')
df.loc[pd.IndexSlice[:,'secondary'],:] *= phx_adj
if _debug:
by_setting = df.groupby(level=1).sum().sum(axis=1)/10**8
print('\nfinal sum, £m'.ljust(pad), "{:0,.3f}".format(by_setting.sum()))
print("by_setting.index[0]".ljust(pad), "{:0,.3f}".format(by_setting[0]))
print("by_setting.index[1]".ljust(pad), "{:0,.3f}".format(by_setting[1]))
print("\nratio actually applied ".ljust(pad), "{:0,.3f}".format(df.groupby(level=1).sum().loc['secondary',:].sum() / sec1))
print('len'.ljust(pad), len(df))
# add a start month
if add_start_m:
start_m_ind = pd.PeriodIndex(df.index.get_level_values(level='adjusted_launch_date'), freq='M', name='start_month')
df.set_index(start_m_ind, append=True, inplace=True)
return df
##____________________________________________________________________________________________________________##
def plot_rset_projs(rs_dict_in, selection=None, agg_filename=None, num_plots=12, n_pers=120, xlims=None,
out_folder='figs/test_plot_rset/', save_fig=True, _debug=False):
'''Helper function for plotting projections of individual products in rulesets.
For a dict of rulesets, sorts by max sales and takes num_plots highest.
Then for each generates a plot containing each product in the high sales set.
Selection: pass a list of product names instead of finding products with max sales
agg_filename: puts together in a single image file if pass a filename (no need for a .png ending)
- most useful wwhen passing a selection
- will then find instances of the selection elements across all the rulesets
- so need to extend selection names to include all instances, eg a name may occur in several.
- Do this with ext_selection.
- Also use the length of this to set the number of plots, and
- use ext_selection to hold the annotated names (identifying rset they were found it)
- also use an additional iterator, agg_i, which keeps incrementing across different rsets
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
plt.close("all")
pad = 35
# first exclude future rsets
rs_dict = {x:rs_dict_in[x] for x in rs_dict_in.keys() if 'fut' not in x}
# set up to make a single figure if aggregating
if agg_filename is not None:
if selection is not None:
# tricky. Need to know how many across all rsets
ext_selection = []
for r in rs_dict_in:
ext_selection.extend([x + " - " + r for x in rs_dict_in[r].past.index.get_level_values('molecule')
for s in selection if s in x])
if _debug: print('extended list\n', ext_selection)
num_plots = len(ext_selection)
if _debug: print('num_plots (selection)'.ljust(pad), num_plots)
else:
num_plots = len(rs_dict * num_plots)
if _debug: print('num_plots (rulesets)'.ljust(pad), num_plots)
fig, ax = plt.subplots(num_plots, figsize=(12, num_plots * 6))
# make an iterator for if aggregating in a single fig
agg_i = 0
# iterate non future rsets
for r in [x for x in rs_dict.keys() if not x.endswith('_fut')]:
# select data by max sales
print('\nin rset'.ljust(pad), r)
selected = None
# use selection if passed
if selection is not None:
selected = rs_dict[r].past.loc[selection]
# otherwise take top by sales
else:
selected = rs_dict[r].past.loc[rs_dict[r].past.max(axis=1).sort_values(ascending=False).head(num_plots).index]
if _debug: print('length of selection'.ljust(pad), len(selected))
#get into £m annualised
selected *=12/10**8
# get the LIST OF dfs from r_trend with an _out_type='df' flag
df_list = rf.r_trend(selected, n_pers=n_pers,
shed = rs_dict[r].f_args['shed'],
term_gr = rs_dict[r].f_args['term_gr'],
loe_delay = rs_dict[r].f_args['loe_delay'],
_out_type='df')
plat_dur = rs_dict[r].f_args['shed'].plat_dur
total_dur = rs_dict[r].f_args['shed'].uptake_dur + plat_dur
if _debug: print('plat length is '.ljust(pad), plat_dur)
if _debug: print('total length is '.ljust(pad), total_dur)
# make graph for this ruleset (remember in a loop here already), if not already made one for aggregate
if agg_filename is None:
fig, ax = plt.subplots(num_plots, figsize=(12, num_plots * 6))
# now loop through the returned dataframes - one per product
for i, df in enumerate(df_list):
if _debug: print('\ndf number'.ljust(pad), i)
if _debug: print('..corresponding product name'.ljust(pad), selected.iloc[i].name[0])
loe_date = pd.Period(selected.iloc[i].name[6], freq='M')
if _debug: print('..with loe'.ljust(pad), loe_date)
# make the index, from the selected input dataframe, adding n_pers
ind_start = selected.columns[0]
if _debug: print('ind start'.ljust(pad), ind_start)
ind = pd.PeriodIndex(start=ind_start, periods = len(selected.columns) + n_pers).to_timestamp()
if _debug: print('ind end'.ljust(pad), ind[-1])
if _debug: print('total periods'.ljust(pad), len(ind))
# snip df to length of index - THERE IS A STRAY PERIOD COMING FROM SOMEWHERE
if _debug: print('length of dfs'.ljust(pad), len(df))
if len(df) > len(ind):
if _debug: print('snipping df.. ..')
df = df.iloc[:len(ind), :]
if _debug: print("length now".ljust(pad), len(df))
ind_end = pd.Period(ind[-1], freq='M')
if _debug: print('index end'.ljust(pad), ind_end)
# make an axes iterator that works with case if single plot
ax_it = None
if num_plots == 1: ax_it = ax
elif agg_filename: ax_it = ax[agg_i]
else: ax_it = ax[i]
# and now loop through the actual columns in the dataframe for the product
for col in df:
ax_it.plot(ind, zero_to_nan(df[col]))
plot_name = selected.iloc[i].name[0]
if agg_filename: plot_name = ext_selection[agg_i]
else: plot_name = selected.iloc[i].name[0]
ax_it.set_title(plot_name + ", loe: " + str(loe_date))
if i%4 == 0:
ax_it.legend(['actual', 'mov ave.', 'projected'])
pat_exp = pd.Period(selected.iloc[i].name[6], freq='M')
lim_0 = max(ind_start, (pat_exp - total_dur)).to_timestamp()
lim_1 = max(ind_start, (pat_exp - plat_dur)).to_timestamp()
lim_2 = min(ind_end, max(ind_start, (pat_exp))).to_timestamp()
lim_3 = None
if lim_1 > ind_start.to_timestamp():
ax_it.axvspan(lim_0, lim_1, facecolor='g', alpha=0.1)
# only draw the line if in scope
if lim_1 < ind_end.to_timestamp():
ax_it.axvline(x=lim_1, linestyle='--', color='gray')
if lim_2 > ind_start.to_timestamp():
ax_it.axvspan(lim_1, lim_2, facecolor='r', alpha=0.1)
# only draw the line if in scope
if lim_2 < ind_end.to_timestamp():
ax_it.axvline(x=lim_2, color='gray')
ax_it.set_ylim(0)
if xlims is not None:
ax_it.set_xlim(xlims)
agg_i +=1
if agg_filename is None:
fig.savefig(out_folder + r + '.png')
if agg_filename is not None:
fig.savefig(out_folder + agg_filename + '.png')
if _debug: print("\nLEAVING: ", inspect.stack()[0][3])
##_________________________________________________________________________##
def zero_to_nan(values):
"""Replace every 0 with 'nan' and return a copy.
MAKE THIS A UFUNC"""
return [float('nan') if x==0 else x for x in values]
##_________________________________________________________________________##
def plot_hi_lo_base(hi, lo, df, scen_names, base_params=None, base_out=None,
trim=('1-2010', '12-2023'), colors=['darkred', 'seagreen', 'grey'], figsize=(12,6),
outfile=None, fig=None, ax=None, return_fig=False):
if base_params is None and base_out is None:
print("need either base parameters or actual data")
return
if base_params: base_out = tst.make_rsets(df, hi, return_sum=True, trim=trim)
hi_out = make_rsets(df, hi, return_sum=True, trim=trim)
lo_out = make_rsets(df, lo, return_sum=True, trim=trim)
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=figsize)
ind = base_out.index.to_timestamp()
for i, s in enumerate([hi_out, lo_out, base_out]):
ax.plot(ind, s*12/10**11, color=colors[i], alpha=0.5)
ax.legend(scen_names)
if outfile: fig.savefig(outfile)
if return_fig: return fig
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving in the HDF5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import uuid
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training as training_module
from tensorflow.python.training.tracking import util as trackable
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
@keras_parameterized.run_with_all_saved_model_formats
def test_weight_loading(self):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'saved_model')
save_format = testing_utils.get_save_format()
with self.cached_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
model.save_weights(saved_model_dir, save_format=save_format)
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRUV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTMV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = hdf5_format.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.cached_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = hdf5_format.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
@keras_parameterized.run_with_all_saved_model_formats
def test_nested_model_weight_loading(self):
save_format = testing_utils.get_save_format()
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'saved_model')
batch_size = 5
shape = (None, None, 3)
with self.cached_session():
def gen_model():
def seq_model():
model = keras.models.Sequential([
keras.layers.Conv2D(3, 1, input_shape=shape),
keras.layers.BatchNormalization()])
return model
x = inner_inputs = keras.layers.Input((None, None, 3))
x = seq_model()(x)
x = seq_model()(x)
inner_model = keras.models.Model(inner_inputs, x)
inputs = keras.layers.Input(shape)
return keras.models.Model(inputs, inner_model(inputs))
model = gen_model()
x = np.random.random((batch_size, 1, 1, 3))
ref_y = model.predict(x)
model.save_weights(saved_model_dir, save_format=save_format)
model = gen_model()
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(y, ref_y)
def test_sequential_weight_loading_group_name_with_incorrect_length(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, use_bias=False,
input_dim=input_dim, name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegex(
ValueError, r'Layer #0 \(named \"d1\"\) expects 1 '
r'weight\(s\), but the saved weights have 2 '
r'element\(s\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose(keras.backend.get_value(ref_model.layers[1].kernel),
keras.backend.get_value(model.layers[1].kernel))
def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with ops.Graph().as_default(), self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
keras.backend.set_value(ref_model.layers[1].bias, [3.5] * num_classes)
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden + 5, input_dim=input_dim,
name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegex(
ValueError, r'Layer #0 \(named "d1"\), weight '
r'<tf\.Variable \'d1_1\/kernel:0\' '
r'shape=\(3, 10\) dtype=float32> has '
r'shape \(3, 10\), but the saved weight has '
r'shape \(3, 5\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose([3.5] * num_classes,
keras.backend.get_value(model.layers[1].bias))
@keras_parameterized.run_with_all_saved_model_formats
class TestWholeModelSaving(keras_parameterized.TestCase):
def _save_model_dir(self, dirname='saved_model'):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def _assert_same_weights_and_metrics(self, model, loaded_model):
"""Checks that the loaded weights and metrics are the same as the original.
Args:
model: original model
loaded_model: loaded model
"""
self.assertAllClose(model.weights, loaded_model.weights)
if loaded_model.optimizer:
if testing_utils.get_save_format() == 'tf':
# TODO(b/153110928): Keras TF format doesn't restore optimizer weights
# currently.
return
self.assertAllClose(model.optimizer.weights,
loaded_model.optimizer.weights)
# In V1/Graph mode, the model isn't built, so the metrics are not loaded
# immediately (requires model to be called on some data before building
# metrics).
check_metrics = tf2.enabled() and context.executing_eagerly()
if check_metrics:
self.assertAllEqual([m.name for m in model.metrics],
[m.name for m in loaded_model.metrics])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_save_and_load(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
if save_format == 'h5' and testing_utils.get_model_type() == 'subclass':
return # HDF5 format currently does not allow saving classed models.
with self.cached_session():
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(2),
keras.layers.RepeatVector(3),
keras.layers.TimeDistributed(keras.layers.Dense(3))],
input_shape=(3,))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=constant_op.constant(0.2)),
],
weighted_metrics=[
keras.metrics.categorical_crossentropy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=constant_op.constant(0.2)),
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, loaded_model)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
eval_out = model.evaluate(x, y)
eval_out2 = loaded_model.evaluate(x, y)
self.assertArrayNear(eval_out, eval_out2, 0.001)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_sequential_model_saving_without_input_shape(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(name='cat_acc')
],
weighted_metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(name='cat_acc2')
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
model.save(saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_sequential_model_saving_without_compile(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
# Save the model without any compilation or training.
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with ops.Graph().as_default(), self.cached_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(
saved_model_dir,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_with_tf_optimizer(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_right_after_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
if not ops.executing_eagerly_outside_functions():
model._make_train_function()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_lambda_numpy_array_arguments(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
if h5py is None:
self.skipTest('h5py required to run this test')
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amount of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(
'adam', loss=keras.losses.MeanSquaredError(), metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if save_format in ['tf', 'tensorflow']:
return
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(saved_model_dir, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_model_with_long_weights_names(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if save_format in ['h5', 'hdf5', 'keras']:
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(saved_model_dir, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_model_saving_to_pre_created_h5py_file(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with ops.Graph().as_default(), self.cached_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.Adam(),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
out1 = loaded_model.predict(x)
self.assertAllClose(out, out1, atol=1e-05)
if save_format in ['tf', 'tensorflow']:
return
# Test h5 format specifically
fd, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File('_', driver='core',
backing_store=False) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_saving_constant_initializer_with_numpy(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
2,
input_shape=(3,),
kernel_initializer=keras.initializers.Constant(np.ones((3, 2)))))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_group_naming_h5py(self):
# Test saving model with layer which name is prefix to a previous layer
# name.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
input_layer = keras.layers.Input((None, None, 3), name='test_input')
x = keras.layers.Conv2D(1, 1, name='conv1/conv')(input_layer)
x = keras.layers.Activation('relu', name='conv1')(x)
model = keras.models.Model(inputs=input_layer, outputs=x)
model.save_weights(h5_path)
model.load_weights(h5_path)
def test_primitive_attrs_contain_no_extraneous_strings(self):
if h5py is None:
self.skipTest('h5py required to run this test')
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=[2]))
model.save(saved_model_dir, save_format=save_format)
if save_format in ['tf', 'tensorflow']:
return
h5file = h5py.File(saved_model_dir, 'r')
self.assertRegex(h5file.attrs['keras_version'], r'^[\d]+\.[\d]+\.[\S]+$')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_functional_model_with_custom_loss_and_metric(self):
def _make_model():
inputs = keras.Input(shape=(4,))
x = keras.layers.Dense(8, activation='relu')(inputs)
outputs = keras.layers.Dense(3, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
custom_loss = keras.layers.Lambda(lambda x: keras.backend.sum(x * x))(x)
model.add_loss(custom_loss)
model.add_metric(custom_loss, aggregation='mean', name='custom_loss')
return model
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = _make_model()
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=optimizers.gradient_descent_v2.SGD(),
metrics=[keras.metrics.SparseCategoricalCrossentropy()])
x = np.random.normal(size=(32, 4))
y = np.random.randint(0, 3, size=32)
model.train_on_batch(x, y)
evaluation_results = model.evaluate(x, y)
# Save and reload model.
model.save(saved_model_dir, save_format=save_format)
del model # Prevent misuse.
loaded_model = keras.models.load_model(saved_model_dir)
loaded_model_eval_results = loaded_model.evaluate(x, y)
# Assert all evaluation results are the same.
self.assertAllClose(evaluation_results, loaded_model_eval_results, 1e-9)
# Check correctness of the loss calculation.
self.assertAllGreater(evaluation_results, 0.)
evaluation_results = dict(
zip(loaded_model.metrics_names, evaluation_results))
self.assertNear(
evaluation_results['sparse_categorical_crossentropy'] +
evaluation_results['custom_loss'], evaluation_results['loss'], 1e-6)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_save_uncompiled_model_with_optimizer(self):
with self.cached_session() as session:
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(3,))])
# Set the model's optimizer but don't compile. This can happen if the
# model is trained with a custom training loop.
model.optimizer = keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001)
if not context.executing_eagerly():
session.run([v.initializer for v in model.variables])
model.save(saved_model_dir, save_format=save_format)
if save_format in ['tf', 'tensorflow']:
loaded = keras.models.load_model(saved_model_dir)
self.assertIsInstance(loaded.optimizer,
keras.optimizer_v2.optimizer_v2.OptimizerV2)
@combinations.generate(combinations.combine(mode=['eager']))
def test_functional_model_with_getitem_op_layer(self):
inp = keras.Input(shape=(8))
out = inp[:]
model = keras.Model(
inputs=[inp],
outputs=out)
batch_size = 7
x = array_ops.stack([
math_ops.range(8) for _ in range(batch_size)])
args = [x]
expected = x[:]
self.assertAllEqual(model(args), expected)
self.assertAllEqual(model.predict(args, batch_size=batch_size), expected)
# Make sure it can be successfully saved and loaded
save_format = testing_utils.get_save_format()
saved_model_dir = self._save_model_dir()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
self.assertAllEqual(loaded_model(args), expected)
self.assertAllEqual(loaded_model.predict(args, batch_size=batch_size),
expected)
# Factory functions to create models that will be serialized inside a Network.
def _make_graph_network(input_size, output_size):
inputs = keras.Input(input_size)
x = keras.layers.Dense(8, activation='relu')(inputs)
y = keras.layers.Dense(output_size)(x)
return keras.Model(inputs=inputs, outputs=y)
def _make_sequential(input_size, output_size):
del input_size
return keras.Sequential([
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(output_size),
])
def _make_sequential_built(input_size, output_size):
model = _make_sequential(input_size, output_size)
model.build((None, input_size))
return model
def _make_sequential_graph_network(input_size, output_size):
return keras.Sequential([
keras.layers.InputLayer(input_size),
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(output_size),
])
def _make_sequential_input_shape(input_size, output_size):
return keras.Sequential([
keras.layers.Dense(8, activation='relu', input_shape=(input_size,)),
keras.layers.Dense(output_size),
])
class _make_subclassed(keras.Model): # pylint: disable=invalid-name
def __init__(self, input_size, output_size):
super(_make_subclassed, self).__init__()
self._config = {'input_size': input_size, 'output_size': output_size}
self._hidden_layer = keras.layers.Dense(8, activation='relu', name='hidden')
self._logits_layer = keras.layers.Dense(output_size, name='logits')
def call(self, inputs):
x = self._hidden_layer(inputs)
return self._logits_layer(x)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config):
return cls(**config)
class _make_subclassed_built(_make_subclassed): # pylint: disable=invalid-name
def __init__(self, input_size, output_size):
super(_make_subclassed_built, self).__init__(input_size, output_size)
self.build((None, input_size))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestWholeModelSavingWithNesting(test.TestCase, parameterized.TestCase):
"""Tests saving a whole model that contains other models."""
@parameterized.named_parameters([
('graph_network', _make_graph_network),
('sequential', _make_sequential),
('sequential_built', _make_sequential_built),
('sequential_graph_network', _make_sequential_graph_network),
('sequential_input_shape', _make_sequential_input_shape),
('subclassed', _make_subclassed),
('subclassed_built', _make_subclassed_built),
])
def test_functional(self, model_fn):
"""Tests serializing a model that uses a nested model to share weights."""
if h5py is None:
self.skipTest('h5py required to run this test')
def _make_model():
inputs = (keras.Input(shape=(4,), name='examples'),
keras.Input(shape=(4,), name='neighbors'))
base_model = model_fn(inputs[0].shape.as_list()[-1], 2)
outputs = keras.layers.add([base_model(inputs[0]), base_model(inputs[1])])
return keras.Model(inputs=inputs, outputs=outputs)
with self.cached_session():
x = (np.random.normal(size=(16, 4)).astype(np.float32),
np.random.normal(size=(16, 4)).astype(np.float32))
model = _make_model()
predictions = model(x)
# Save and reload.
model_path = os.path.join(self.get_temp_dir(), 'model.h5')
model.save(model_path)
del model
loaded_model = keras.models.load_model(
model_path,
custom_objects={
'_make_subclassed': _make_subclassed,
'_make_subclassed_built': _make_subclassed_built,
},
compile=False)
self.assertAllClose(loaded_model(x), predictions, 1e-9)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(test.TestCase, parameterized.TestCase):
def test_keras_optimizer_warning(self):
graph = ops.Graph()
with graph.as_default(), self.session(graph):
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer=optimizers.Adam(), metrics=['acc'])
if not ops.executing_eagerly_outside_functions():
model._make_train_function()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
with test.mock.patch.object(logging, 'warning') as mock_log:
model.save_weights(prefix)
self.assertRegex(str(mock_log.call_args), 'Keras optimizer')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_tensorflow_format_overwrite(self):
with self.cached_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_default_session(self):
with ops.Graph().as_default():
self.assertFalse(ops.get_default_session())
data = np.random.random((1000, 32)).astype(np.float32)
labels = np.random.random((1000, 10)).astype(np.float32)
model = keras.models.Sequential([
keras.layers.Dense(10, activation='softmax'),
keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer=training_module.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels)
fname = os.path.join(self.get_temp_dir(), 'weights', 'ckpt')
model.save_weights(fname)
model.load_weights(fname)
def test_no_graph_pollution(self):
with ops.get_default_graph().as_default():
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertLen(graph.get_operations(), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertLen(graph.get_operations(), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.cached_session():
model = make_model_fn()
model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
train_x = np.random.random((3, 2))
train_y = np.random.random((3,))
x = constant_op.constant(train_x, dtype=dtypes.float32)
model.train_on_batch(train_x, train_y)
model.save_weights(prefix, save_format='tf')
ref_y_before_train = model.predict(train_x)
model.train_on_batch(train_x, train_y)
ref_y_after_train = model.predict(train_x)
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
self.assertAllClose(ref_y_before_train, self.evaluate(model(x)))
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
self.assertAllClose(
ref_y_before_train,
self.evaluate(load_model(x)))
load_model = make_model_fn()
load_model.load_weights(prefix)
# We need to run some of the restore ops for predict(), but not all
# variables have been created yet (optimizer slot variables). Tests
# incremental restore.
load_model.predict(train_x)
load_model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
load_model.train_on_batch(train_x, train_y)
self.assertAllClose(ref_y_after_train, self.evaluate(load_model(x)))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn):
with self.cached_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
self.assertEqual(
prefix,
checkpoint_management.latest_checkpoint(temp_dir))
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
status = second_model.load_weights(prefix)
second_model(x)
status.run_restore_ops()
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
status = model.load_weights(prefix)
status.run_restore_ops(session)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
y = keras.layers.Dropout(rate=0.1)(b)
return keras.models.Model(a, y)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_incompatible_checkpoint(self):
save_path = trackable.Checkpoint().save(
os.path.join(self.get_temp_dir(), 'ckpt'))
m = DummySubclassModel()
with self.assertRaisesRegex(AssertionError, 'Nothing to load'):
m.load_weights(save_path)
m.dense = keras.layers.Dense(2)
m.dense(constant_op.constant([[1.]]))
with self.assertRaisesRegex(AssertionError,
'Nothing except the root object matched'):
m.load_weights(save_path)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_directory_passed(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), str(uuid.uuid4()), 'ckpt/')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_relative_path(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
os.chdir(self.get_temp_dir())
prefix = 'ackpt'
self.evaluate(v.assign(42.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('ackpt.index'))
self.evaluate(v.assign(1.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
prefix = 'subdir/ackpt'
self.evaluate(v.assign(43.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('subdir/ackpt.index'))
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(43., self.evaluate(v))
prefix = 'ackpt/'
self.evaluate(v.assign(44.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('ackpt/.index'))
self.evaluate(v.assign(3.))
m.load_weights(prefix)
self.assertEqual(44., self.evaluate(v))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_nonexistent_prefix_directory(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), str(uuid.uuid4()), 'bckpt')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
class DummySubclassModel(training.Model):
pass
if __name__ == '__main__':
test.main()
add test coverage
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving in the HDF5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import uuid
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training as training_module
from tensorflow.python.training.tracking import util as trackable
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
@keras_parameterized.run_with_all_saved_model_formats
def test_weight_loading(self):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'saved_model')
save_format = testing_utils.get_save_format()
with self.cached_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
model.save_weights(saved_model_dir, save_format=save_format)
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRUV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTMV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = hdf5_format.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.cached_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = hdf5_format.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
@keras_parameterized.run_with_all_saved_model_formats
def test_nested_model_weight_loading(self):
save_format = testing_utils.get_save_format()
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'saved_model')
batch_size = 5
shape = (None, None, 3)
with self.cached_session():
def gen_model():
def seq_model():
model = keras.models.Sequential([
keras.layers.Conv2D(3, 1, input_shape=shape),
keras.layers.BatchNormalization()])
return model
x = inner_inputs = keras.layers.Input((None, None, 3))
x = seq_model()(x)
x = seq_model()(x)
inner_model = keras.models.Model(inner_inputs, x)
inputs = keras.layers.Input(shape)
return keras.models.Model(inputs, inner_model(inputs))
model = gen_model()
x = np.random.random((batch_size, 1, 1, 3))
ref_y = model.predict(x)
model.save_weights(saved_model_dir, save_format=save_format)
model = gen_model()
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(y, ref_y)
def test_sequential_weight_loading_group_name_with_incorrect_length(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, use_bias=False,
input_dim=input_dim, name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegex(
ValueError, r'Layer #0 \(named \"d1\"\) expects 1 '
r'weight\(s\), but the saved weights have 2 '
r'element\(s\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose(keras.backend.get_value(ref_model.layers[1].kernel),
keras.backend.get_value(model.layers[1].kernel))
def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with ops.Graph().as_default(), self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
keras.backend.set_value(ref_model.layers[1].bias, [3.5] * num_classes)
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden + 5, input_dim=input_dim,
name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegex(
ValueError, r'Layer #0 \(named "d1"\), weight '
r'<tf\.Variable \'d1_1\/kernel:0\' '
r'shape=\(3, 10\) dtype=float32> has '
r'shape \(3, 10\), but the saved weight has '
r'shape \(3, 5\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose([3.5] * num_classes,
keras.backend.get_value(model.layers[1].bias))
@keras_parameterized.run_with_all_saved_model_formats
class TestWholeModelSaving(keras_parameterized.TestCase):
def _save_model_dir(self, dirname='saved_model'):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def _assert_same_weights_and_metrics(self, model, loaded_model):
"""Checks that the loaded weights and metrics are the same as the original.
Args:
model: original model
loaded_model: loaded model
"""
self.assertAllClose(model.weights, loaded_model.weights)
if loaded_model.optimizer:
if testing_utils.get_save_format() == 'tf':
# TODO(b/153110928): Keras TF format doesn't restore optimizer weights
# currently.
return
self.assertAllClose(model.optimizer.weights,
loaded_model.optimizer.weights)
# In V1/Graph mode, the model isn't built, so the metrics are not loaded
# immediately (requires model to be called on some data before building
# metrics).
check_metrics = tf2.enabled() and context.executing_eagerly()
if check_metrics:
self.assertAllEqual([m.name for m in model.metrics],
[m.name for m in loaded_model.metrics])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_save_and_load(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
if save_format == 'h5' and testing_utils.get_model_type() == 'subclass':
return # HDF5 format currently does not allow saving classed models.
with self.cached_session():
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(2),
keras.layers.RepeatVector(3),
keras.layers.TimeDistributed(keras.layers.Dense(3))],
input_shape=(3,))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=constant_op.constant(0.2)),
],
weighted_metrics=[
keras.metrics.categorical_crossentropy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=constant_op.constant(0.2)),
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, loaded_model)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
eval_out = model.evaluate(x, y)
eval_out2 = loaded_model.evaluate(x, y)
self.assertArrayNear(eval_out, eval_out2, 0.001)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_sequential_model_saving_without_input_shape(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(name='cat_acc')
],
weighted_metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(name='cat_acc2')
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
model.save(saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_sequential_model_saving_without_compile(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
# Save the model without any compilation or training.
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with ops.Graph().as_default(), self.cached_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(
saved_model_dir,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_with_tf_optimizer(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_right_after_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
if not ops.executing_eagerly_outside_functions():
model._make_train_function()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_lambda_numpy_array_arguments(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
if h5py is None:
self.skipTest('h5py required to run this test')
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amount of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(
'adam', loss=keras.losses.MeanSquaredError(), metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if save_format in ['tf', 'tensorflow']:
return
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(saved_model_dir, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_model_with_long_weights_names(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if save_format in ['h5', 'hdf5', 'keras']:
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(saved_model_dir, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_model_saving_to_pre_created_h5py_file(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with ops.Graph().as_default(), self.cached_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.Adam(),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
out1 = loaded_model.predict(x)
self.assertAllClose(out, out1, atol=1e-05)
if save_format in ['tf', 'tensorflow']:
return
# Test h5 format specifically
fd, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File('_', driver='core',
backing_store=False) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_model_saving_to_new_dir_path(self):
saved_model_dir = self._save_model_dir()
saved_model_dir = os.path.join(saved_model_dir, 'newdir')
saved_model_dir = os.path.join(saved_model_dir, 'saved_model')
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_model_raise_exception_with_failed_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
saved_model_dir = self._save_model_dir()
saved_model_path = os.path.join(saved_model_dir, 'saved_model.h5')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
with self.assertRaises(OSError):
with h5py.File(saved_model_path, 'w') as f:
keras.models.save_model(model, saved_model_path)
def test_saving_constant_initializer_with_numpy(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
2,
input_shape=(3,),
kernel_initializer=keras.initializers.Constant(np.ones((3, 2)))))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_group_naming_h5py(self):
# Test saving model with layer which name is prefix to a previous layer
# name.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
input_layer = keras.layers.Input((None, None, 3), name='test_input')
x = keras.layers.Conv2D(1, 1, name='conv1/conv')(input_layer)
x = keras.layers.Activation('relu', name='conv1')(x)
model = keras.models.Model(inputs=input_layer, outputs=x)
model.save_weights(h5_path)
model.load_weights(h5_path)
def test_primitive_attrs_contain_no_extraneous_strings(self):
if h5py is None:
self.skipTest('h5py required to run this test')
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=[2]))
model.save(saved_model_dir, save_format=save_format)
if save_format in ['tf', 'tensorflow']:
return
h5file = h5py.File(saved_model_dir, 'r')
self.assertRegex(h5file.attrs['keras_version'], r'^[\d]+\.[\d]+\.[\S]+$')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_functional_model_with_custom_loss_and_metric(self):
def _make_model():
inputs = keras.Input(shape=(4,))
x = keras.layers.Dense(8, activation='relu')(inputs)
outputs = keras.layers.Dense(3, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
custom_loss = keras.layers.Lambda(lambda x: keras.backend.sum(x * x))(x)
model.add_loss(custom_loss)
model.add_metric(custom_loss, aggregation='mean', name='custom_loss')
return model
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = _make_model()
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=optimizers.gradient_descent_v2.SGD(),
metrics=[keras.metrics.SparseCategoricalCrossentropy()])
x = np.random.normal(size=(32, 4))
y = np.random.randint(0, 3, size=32)
model.train_on_batch(x, y)
evaluation_results = model.evaluate(x, y)
# Save and reload model.
model.save(saved_model_dir, save_format=save_format)
del model # Prevent misuse.
loaded_model = keras.models.load_model(saved_model_dir)
loaded_model_eval_results = loaded_model.evaluate(x, y)
# Assert all evaluation results are the same.
self.assertAllClose(evaluation_results, loaded_model_eval_results, 1e-9)
# Check correctness of the loss calculation.
self.assertAllGreater(evaluation_results, 0.)
evaluation_results = dict(
zip(loaded_model.metrics_names, evaluation_results))
self.assertNear(
evaluation_results['sparse_categorical_crossentropy'] +
evaluation_results['custom_loss'], evaluation_results['loss'], 1e-6)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_save_uncompiled_model_with_optimizer(self):
with self.cached_session() as session:
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(3,))])
# Set the model's optimizer but don't compile. This can happen if the
# model is trained with a custom training loop.
model.optimizer = keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001)
if not context.executing_eagerly():
session.run([v.initializer for v in model.variables])
model.save(saved_model_dir, save_format=save_format)
if save_format in ['tf', 'tensorflow']:
loaded = keras.models.load_model(saved_model_dir)
self.assertIsInstance(loaded.optimizer,
keras.optimizer_v2.optimizer_v2.OptimizerV2)
@combinations.generate(combinations.combine(mode=['eager']))
def test_functional_model_with_getitem_op_layer(self):
inp = keras.Input(shape=(8))
out = inp[:]
model = keras.Model(
inputs=[inp],
outputs=out)
batch_size = 7
x = array_ops.stack([
math_ops.range(8) for _ in range(batch_size)])
args = [x]
expected = x[:]
self.assertAllEqual(model(args), expected)
self.assertAllEqual(model.predict(args, batch_size=batch_size), expected)
# Make sure it can be successfully saved and loaded
save_format = testing_utils.get_save_format()
saved_model_dir = self._save_model_dir()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
self.assertAllEqual(loaded_model(args), expected)
self.assertAllEqual(loaded_model.predict(args, batch_size=batch_size),
expected)
# Factory functions to create models that will be serialized inside a Network.
def _make_graph_network(input_size, output_size):
inputs = keras.Input(input_size)
x = keras.layers.Dense(8, activation='relu')(inputs)
y = keras.layers.Dense(output_size)(x)
return keras.Model(inputs=inputs, outputs=y)
def _make_sequential(input_size, output_size):
del input_size
return keras.Sequential([
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(output_size),
])
def _make_sequential_built(input_size, output_size):
model = _make_sequential(input_size, output_size)
model.build((None, input_size))
return model
def _make_sequential_graph_network(input_size, output_size):
return keras.Sequential([
keras.layers.InputLayer(input_size),
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(output_size),
])
def _make_sequential_input_shape(input_size, output_size):
return keras.Sequential([
keras.layers.Dense(8, activation='relu', input_shape=(input_size,)),
keras.layers.Dense(output_size),
])
class _make_subclassed(keras.Model): # pylint: disable=invalid-name
def __init__(self, input_size, output_size):
super(_make_subclassed, self).__init__()
self._config = {'input_size': input_size, 'output_size': output_size}
self._hidden_layer = keras.layers.Dense(8, activation='relu', name='hidden')
self._logits_layer = keras.layers.Dense(output_size, name='logits')
def call(self, inputs):
x = self._hidden_layer(inputs)
return self._logits_layer(x)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config):
return cls(**config)
class _make_subclassed_built(_make_subclassed): # pylint: disable=invalid-name
def __init__(self, input_size, output_size):
super(_make_subclassed_built, self).__init__(input_size, output_size)
self.build((None, input_size))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestWholeModelSavingWithNesting(test.TestCase, parameterized.TestCase):
"""Tests saving a whole model that contains other models."""
@parameterized.named_parameters([
('graph_network', _make_graph_network),
('sequential', _make_sequential),
('sequential_built', _make_sequential_built),
('sequential_graph_network', _make_sequential_graph_network),
('sequential_input_shape', _make_sequential_input_shape),
('subclassed', _make_subclassed),
('subclassed_built', _make_subclassed_built),
])
def test_functional(self, model_fn):
"""Tests serializing a model that uses a nested model to share weights."""
if h5py is None:
self.skipTest('h5py required to run this test')
def _make_model():
inputs = (keras.Input(shape=(4,), name='examples'),
keras.Input(shape=(4,), name='neighbors'))
base_model = model_fn(inputs[0].shape.as_list()[-1], 2)
outputs = keras.layers.add([base_model(inputs[0]), base_model(inputs[1])])
return keras.Model(inputs=inputs, outputs=outputs)
with self.cached_session():
x = (np.random.normal(size=(16, 4)).astype(np.float32),
np.random.normal(size=(16, 4)).astype(np.float32))
model = _make_model()
predictions = model(x)
# Save and reload.
model_path = os.path.join(self.get_temp_dir(), 'model.h5')
model.save(model_path)
del model
loaded_model = keras.models.load_model(
model_path,
custom_objects={
'_make_subclassed': _make_subclassed,
'_make_subclassed_built': _make_subclassed_built,
},
compile=False)
self.assertAllClose(loaded_model(x), predictions, 1e-9)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(test.TestCase, parameterized.TestCase):
def test_keras_optimizer_warning(self):
graph = ops.Graph()
with graph.as_default(), self.session(graph):
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer=optimizers.Adam(), metrics=['acc'])
if not ops.executing_eagerly_outside_functions():
model._make_train_function()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
with test.mock.patch.object(logging, 'warning') as mock_log:
model.save_weights(prefix)
self.assertRegex(str(mock_log.call_args), 'Keras optimizer')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_tensorflow_format_overwrite(self):
with self.cached_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_default_session(self):
with ops.Graph().as_default():
self.assertFalse(ops.get_default_session())
data = np.random.random((1000, 32)).astype(np.float32)
labels = np.random.random((1000, 10)).astype(np.float32)
model = keras.models.Sequential([
keras.layers.Dense(10, activation='softmax'),
keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer=training_module.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels)
fname = os.path.join(self.get_temp_dir(), 'weights', 'ckpt')
model.save_weights(fname)
model.load_weights(fname)
def test_no_graph_pollution(self):
with ops.get_default_graph().as_default():
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertLen(graph.get_operations(), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertLen(graph.get_operations(), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.cached_session():
model = make_model_fn()
model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
train_x = np.random.random((3, 2))
train_y = np.random.random((3,))
x = constant_op.constant(train_x, dtype=dtypes.float32)
model.train_on_batch(train_x, train_y)
model.save_weights(prefix, save_format='tf')
ref_y_before_train = model.predict(train_x)
model.train_on_batch(train_x, train_y)
ref_y_after_train = model.predict(train_x)
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
self.assertAllClose(ref_y_before_train, self.evaluate(model(x)))
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
self.assertAllClose(
ref_y_before_train,
self.evaluate(load_model(x)))
load_model = make_model_fn()
load_model.load_weights(prefix)
# We need to run some of the restore ops for predict(), but not all
# variables have been created yet (optimizer slot variables). Tests
# incremental restore.
load_model.predict(train_x)
load_model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
load_model.train_on_batch(train_x, train_y)
self.assertAllClose(ref_y_after_train, self.evaluate(load_model(x)))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn):
with self.cached_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
self.assertEqual(
prefix,
checkpoint_management.latest_checkpoint(temp_dir))
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
status = second_model.load_weights(prefix)
second_model(x)
status.run_restore_ops()
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
status = model.load_weights(prefix)
status.run_restore_ops(session)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
y = keras.layers.Dropout(rate=0.1)(b)
return keras.models.Model(a, y)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_incompatible_checkpoint(self):
save_path = trackable.Checkpoint().save(
os.path.join(self.get_temp_dir(), 'ckpt'))
m = DummySubclassModel()
with self.assertRaisesRegex(AssertionError, 'Nothing to load'):
m.load_weights(save_path)
m.dense = keras.layers.Dense(2)
m.dense(constant_op.constant([[1.]]))
with self.assertRaisesRegex(AssertionError,
'Nothing except the root object matched'):
m.load_weights(save_path)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_directory_passed(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), str(uuid.uuid4()), 'ckpt/')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_relative_path(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
os.chdir(self.get_temp_dir())
prefix = 'ackpt'
self.evaluate(v.assign(42.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('ackpt.index'))
self.evaluate(v.assign(1.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
prefix = 'subdir/ackpt'
self.evaluate(v.assign(43.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('subdir/ackpt.index'))
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(43., self.evaluate(v))
prefix = 'ackpt/'
self.evaluate(v.assign(44.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('ackpt/.index'))
self.evaluate(v.assign(3.))
m.load_weights(prefix)
self.assertEqual(44., self.evaluate(v))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_nonexistent_prefix_directory(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), str(uuid.uuid4()), 'bckpt')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
class DummySubclassModel(training.Model):
pass
if __name__ == '__main__':
test.main()
|
"""
Copyright (c) 2008 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import time
from dbus import UInt64
config = None
gui = None
com = None
commands = {}
def parseInput(text):
"""
TODO: document
"""
if not text:
return
serverTab,channelTab = gui.tabs.getCurrentTabs()
if text[:2]=="//" or text[0] != "/":
if not channelTab:
return
com.sendMessage(serverTab.name, channelTab.name, text)
else:
list = text[1:].split(" ")
cmd = list[0]
if not cmd:
return gui.myPrint("No command given.")
elif not commands.has_key(cmd):
if not serverTab:
return gui.myPrint("No server active.")
gui.myPrint("Unknown command '%s'; Forwarding raw." % (cmd))
com.raw(serverTab.name, cmd.upper() + " ".join(list[1:]))
else:
commands[cmd](serverTab, channelTab, list[1:])
def makiConnect(currentServer, currentChannel, args):
"""
Connect to the given server.
Usage: /connect <server>
"""
if not args:
return gui.myPrint("Usage: /connect <servername>")
com.connectServer(args[0])
def makiQuit(currentServer, currentChannel, args):
"""
Quit the given server with an optional reason.
If no server is given, the current server is quit.
Usage: /quit <server> [<reason>]
/quit [<reason>]
"""
if args:
# /quit <server> [<reason>]
if gui.tabs.searchTab(None, args[0]):
com.quitServer(args[0], " ".join(args[1:]))
else:
# /quit [<reason>]
if not currentServer:
return gui.myPrint("Could not determine server.")
com.quitServer(currentServer.name, " ".join(args))
else:
# /quit
if not currentServer:
return gui.myPrint("Could not determine server.")
com.quitServer(currentServer.name,"")
def makiNick(currentServer, currentChannel, args):
"""
Change your current nick to the given nick.
Usage: /nick <new nick>
"""
if not args:
return gui.myPrint("Usage: /nick <new nick>")
if not currentServer:
return gui.myPrint("Can't determine my server.")
com.nick(currentServer.name, args[0])
def makiPart(currentServer, currentChannel, args):
"""
Part the given channel with an optional reason.
If no channel is given, the current channel is parted.
Usage: /part <channel> [<reason>]
/part [<reason>]
"""
if args and currentServer:
# /part <channel> [<reason>]
if gui.tabs.searchTab(currentServer.name, args[0]):
com.part(args[0], " ".join(args[1:]))
else:
# /part [<reason>]
if not currentChannel:
return gui.myPrint("Could not determine channel.")
com.part(currentServer.name, currentChannel.name, " ".join(args))
else:
# /part
if not currentChannel:
return gui.myPrint("Could not determine channel.")
com.part(currentServer.name, currentChannel.name,"")
def makiJoin(currentServer, currentChannel, args):
"""
Joins the given channel with the optional key.
Usage: /join <channel> [<key>]
"""
if not currentServer:
return gui.myPrint("Can't determine server.")
if not args:
return gui.myPrint("Usage: /join <channel> [<key>]")
com.join(currentServer.name, args[0], " ".join(args[1:]))
def makiAction(currentServer, currentChannel, args):
"""
Sends an action in third person view.
Usage: /me <text>
Example: nemo types: /me giggles.
Results in: nemo giggles.
"""
if not args:
return gui.myPrint("Usage: /me <text>")
if not currentChannel:
return gui.myPrint("Can't find active channel.")
com.action(currentServer.name, currentChannel.name, " ".join(args))
def makiKick(currentServer, currentTab, args):
"""
Kick the given user with an optional reason from the
current channel.
Usage: /kick <user> [<reason>]
"""
if not args:
return gui.myPrint("Usage: /kick <user> [<reason>]")
if not channelTab or not channelTab.is_channel():
return gui.myPrint("You're not on a channel")
self.com.kick(serverTab.name, channelTab.name, args[0], " ".join(args[1:]))
def makiMode(currentServer, currentChannel, args):
"""
Sets a mode on the target.
Usage: /mode <target> (+|-)<mode> [<param>]
Example: /mode #xesio +o nemo
OR: /mode nemo +x
OR: /mode #xesio +m
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /mode <target> (+|-)<mode> [<param>]")
if not currentServer:
return gui.myPrint("Could not determine server.")
if len(args) > 2:
# a parameter is given
param = " ".join(args[2:])
com.mode(currentServer.name, args[0], "%s %s" % (args[1],param))
def makiTopic(serverTab, channelTab, args):
"""
Sets the topic in the current channel.
Usage: /topic <text>
"""
if not args:
return gui.myPrint("Usage: /topic <text>")
else:
topic = " ".join(args)
if not channelTab or not channelTab.is_channel():
return gui.myPrint("No channel active.")
com.setTopic(serverTab.name, channelTab.name, topic)
def makiAway(serverTab, channelTab, args):
"""
Sets you away with an optional reason.
Usage: /away [<reason>]
"""
if not serverTab:
return gui.myPrint("Can't determine server.")
com.setAway(serverTab.name, " ".join(args))
def makiBack(serverTab, channelTab, args):
"""
Sets you back from being away.
Usage: /back
"""
if not serverTab:
return gui.myPrint("Can't determine server.")
com.setBack(serverTab.name)
def makiNickserv(serverTab, channelTab, args):
"""
Authenticates you at NickServ with
the data stored in maki.
Usage: /nickserv
"""
if not serverTab:
return gui.myPrint("Can't determine server.")
com.nickserv(server)
def makiCTCP(serverTab, channelTab, args):
"""
Sends a CTCP message to the given target.
Usage: /ctcp <target> <message>
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /ctcp <target> <message>")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.ctcp(serverTab.name, args[0], args[1])
def makiNotice(serverTab, channelTab, args):
"""
Sends a notice to the given target.
The difference between /ctcp and /notice
is, that /ctcp sends directly to the user
while /notice sends the message over the
server.
Usage: /notice <target> <message>
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /notice <target> <message>")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.notice(serverTab.name, args[0], " ".join(args[1:]))
def makiMessage(serverTab, channelTab, args):
"""
Sends a message (PRIVMSG) to the target.
The target can be a channel or a user.
Usage: /msg <target> <message>
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /msg <target> <message>")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.sendMessage(serverTab.name, args[0], " ".join(args[1]))
def makiOper(serverTab, channelTab, args):
"""
Authentificate as IRC operator.
Usage: /oper <user> <pass>
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /oper <user> <pass>")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.oper(serverTab.name, args[0], " ".join(args[1:]))
def makiKill(serverTab, channelTab, args):
"""
Kill a user on the network with an optional reason.
Usage: /kill <user> [<reason>]
"""
if not args or len(args) < 1:
return gui.myPrint("Usage: /kill <user> [<reason>]")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.kill(serverTab.name, args[0], " ".join(args[1:]))
def makiList(serverTab, channelTab, args):
"""
Start a channel listing.
If channel is given, only the channel
is listed.
Usage: /list [<channel>]
"""
if not serverTab:
return gui.myPrint("Could not determine server.")
try:
# channel specific listing?
channel = args[0]
except IndexError:
# start a complete list..
channel = ""
gui.serverPrint(time.time(), serverTab.name, "Start of list.")
com.list(serverTab.name, channel)
def makiRaw(serverTab, channelTab, args):
"""
Sends a command with optional args to maki
which acts only as forwarder. The command
goes unchanged to the server.
Usage: /raw <command> [<further text>]
"""
if not args:
return gui.myPrint("Usage: /raw <command>")
if not serverTab:
return gui.myPrint("Could not determine server.")
# upper-case the command
args[0] = args[0].upper()
com.raw(server, " ".join(args))
def makiWhois(currentServer, currentChannel, args):
"""
Query a user's identity on the current server.
Usage: /whois <user mask>
"""
if not args:
return gui.myPrint("No server activated.")
com.sushi.whois(currentServer.name, args[0])
""" TEKKA USER COMMANDS """
def tekkaQuery(currentServer, currentTab, args):
"""
Starts a query dialog with the given user.
Usage: /query <nick>
"""
if not args:
return gui.myPrint("Usage: /query <nick>")
if not currentServer:
return gui.myPrint("Can't determine server.")
nick = args[0]
if not gui.tabs.searchTab(currentServer.name, nick):
# no query started
tab = gui.tabs.createQuery(currentServer.name, nick)
tab.connected = True
gui.tabs.addTab(currentServer.name, tab)
output = tab.buffer
# fetch and write history to query (if any)
for line in com.fetchLog(currentServer.name, nick,
UInt64(config.get("chatting","last_log_lines","10"))):
output.insertHTML(output.get_end_iter(),
"<font foreground='#DDDDDD'>%s</font>" % gui.escape(line))
def tekkaClear(currentServer, currentTab, args):
"""
Clears the output of the current channel.
Usage: /clear
"""
if currentTab: currentTab.buffer.set_text("")
elif currentServer: currentServer.buffer.set_text("")
def tekkaHelp(currentServer, currentTab, args):
"""
Prints the doc-string of the given command.
Usage: /help <command>
"""
if not args:
return gui.myPrint("Usage: /help <command>")
if commands.has_key(args[0]):
gui.myPrint(commands[args[0]].__doc__.replace("\t",""))
else:
gui.myPrint("No help for %s available." % (args[0]))
def setup(_config, _gui, _com):
"""
Setup the command module.
* Set modules
* Set command mapping
"""
global config, gui, com, commands
config = _config
gui = _gui
com = _com
commands = {
"connect" : makiConnect,
"nick" : makiNick,
"part" : makiPart,
"join" : makiJoin,
"j" : makiJoin,
"me" : makiAction,
"kick" : makiKick,
"mode" : makiMode,
"topic": makiTopic,
"quit" : makiQuit,
"away" : makiAway,
"back" : makiBack,
"nickserv" : makiNickserv,
"ctcp" : makiCTCP,
"notice" : makiNotice,
"msg" : makiMessage,
"oper" : makiOper,
"kill" : makiKill,
"list" : makiList,
"raw" : makiRaw,
"whois" : makiWhois,
"query": tekkaQuery,
"clear": tekkaClear,
"help": tekkaHelp
}
Fix /kick.
"""
Copyright (c) 2008 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import time
from dbus import UInt64
config = None
gui = None
com = None
commands = {}
def parseInput(text):
"""
TODO: document
"""
if not text:
return
serverTab,channelTab = gui.tabs.getCurrentTabs()
if text[:2]=="//" or text[0] != "/":
if not channelTab:
return
com.sendMessage(serverTab.name, channelTab.name, text)
else:
list = text[1:].split(" ")
cmd = list[0]
if not cmd:
return gui.myPrint("No command given.")
elif not commands.has_key(cmd):
if not serverTab:
return gui.myPrint("No server active.")
gui.myPrint("Unknown command '%s'; Forwarding raw." % (cmd))
com.raw(serverTab.name, cmd.upper() + " ".join(list[1:]))
else:
commands[cmd](serverTab, channelTab, list[1:])
def makiConnect(currentServer, currentChannel, args):
"""
Connect to the given server.
Usage: /connect <server>
"""
if not args:
return gui.myPrint("Usage: /connect <servername>")
com.connectServer(args[0])
def makiQuit(currentServer, currentChannel, args):
"""
Quit the given server with an optional reason.
If no server is given, the current server is quit.
Usage: /quit <server> [<reason>]
/quit [<reason>]
"""
if args:
# /quit <server> [<reason>]
if gui.tabs.searchTab(None, args[0]):
com.quitServer(args[0], " ".join(args[1:]))
else:
# /quit [<reason>]
if not currentServer:
return gui.myPrint("Could not determine server.")
com.quitServer(currentServer.name, " ".join(args))
else:
# /quit
if not currentServer:
return gui.myPrint("Could not determine server.")
com.quitServer(currentServer.name,"")
def makiNick(currentServer, currentChannel, args):
"""
Change your current nick to the given nick.
Usage: /nick <new nick>
"""
if not args:
return gui.myPrint("Usage: /nick <new nick>")
if not currentServer:
return gui.myPrint("Can't determine my server.")
com.nick(currentServer.name, args[0])
def makiPart(currentServer, currentChannel, args):
"""
Part the given channel with an optional reason.
If no channel is given, the current channel is parted.
Usage: /part <channel> [<reason>]
/part [<reason>]
"""
if args and currentServer:
# /part <channel> [<reason>]
if gui.tabs.searchTab(currentServer.name, args[0]):
com.part(args[0], " ".join(args[1:]))
else:
# /part [<reason>]
if not currentChannel:
return gui.myPrint("Could not determine channel.")
com.part(currentServer.name, currentChannel.name, " ".join(args))
else:
# /part
if not currentChannel:
return gui.myPrint("Could not determine channel.")
com.part(currentServer.name, currentChannel.name,"")
def makiJoin(currentServer, currentChannel, args):
"""
Joins the given channel with the optional key.
Usage: /join <channel> [<key>]
"""
if not currentServer:
return gui.myPrint("Can't determine server.")
if not args:
return gui.myPrint("Usage: /join <channel> [<key>]")
com.join(currentServer.name, args[0], " ".join(args[1:]))
def makiAction(currentServer, currentChannel, args):
"""
Sends an action in third person view.
Usage: /me <text>
Example: nemo types: /me giggles.
Results in: nemo giggles.
"""
if not args:
return gui.myPrint("Usage: /me <text>")
if not currentChannel:
return gui.myPrint("Can't find active channel.")
com.action(currentServer.name, currentChannel.name, " ".join(args))
def makiKick(currentServer, currentTab, args):
"""
Kick the given user with an optional reason from the
current channel.
Usage: /kick <user> [<reason>]
"""
if not args:
return gui.myPrint("Usage: /kick <user> [<reason>]")
if not currentTab or not currentTab.is_channel():
return gui.myPrint("You're not on a channel")
com.kick(currentServer.name, currentTab.name, args[0], " ".join(args[1:]))
def makiMode(currentServer, currentChannel, args):
"""
Sets a mode on the target.
Usage: /mode <target> (+|-)<mode> [<param>]
Example: /mode #xesio +o nemo
OR: /mode nemo +x
OR: /mode #xesio +m
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /mode <target> (+|-)<mode> [<param>]")
if not currentServer:
return gui.myPrint("Could not determine server.")
if len(args) > 2:
# a parameter is given
param = " ".join(args[2:])
com.mode(currentServer.name, args[0], "%s %s" % (args[1],param))
def makiTopic(serverTab, channelTab, args):
"""
Sets the topic in the current channel.
Usage: /topic <text>
"""
if not args:
return gui.myPrint("Usage: /topic <text>")
else:
topic = " ".join(args)
if not channelTab or not channelTab.is_channel():
return gui.myPrint("No channel active.")
com.setTopic(serverTab.name, channelTab.name, topic)
def makiAway(serverTab, channelTab, args):
"""
Sets you away with an optional reason.
Usage: /away [<reason>]
"""
if not serverTab:
return gui.myPrint("Can't determine server.")
com.setAway(serverTab.name, " ".join(args))
def makiBack(serverTab, channelTab, args):
"""
Sets you back from being away.
Usage: /back
"""
if not serverTab:
return gui.myPrint("Can't determine server.")
com.setBack(serverTab.name)
def makiNickserv(serverTab, channelTab, args):
"""
Authenticates you at NickServ with
the data stored in maki.
Usage: /nickserv
"""
if not serverTab:
return gui.myPrint("Can't determine server.")
com.nickserv(server)
def makiCTCP(serverTab, channelTab, args):
"""
Sends a CTCP message to the given target.
Usage: /ctcp <target> <message>
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /ctcp <target> <message>")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.ctcp(serverTab.name, args[0], args[1])
def makiNotice(serverTab, channelTab, args):
"""
Sends a notice to the given target.
The difference between /ctcp and /notice
is, that /ctcp sends directly to the user
while /notice sends the message over the
server.
Usage: /notice <target> <message>
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /notice <target> <message>")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.notice(serverTab.name, args[0], " ".join(args[1:]))
def makiMessage(serverTab, channelTab, args):
"""
Sends a message (PRIVMSG) to the target.
The target can be a channel or a user.
Usage: /msg <target> <message>
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /msg <target> <message>")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.sendMessage(serverTab.name, args[0], " ".join(args[1]))
def makiOper(serverTab, channelTab, args):
"""
Authentificate as IRC operator.
Usage: /oper <user> <pass>
"""
if not args or len(args) < 2:
return gui.myPrint("Usage: /oper <user> <pass>")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.oper(serverTab.name, args[0], " ".join(args[1:]))
def makiKill(serverTab, channelTab, args):
"""
Kill a user on the network with an optional reason.
Usage: /kill <user> [<reason>]
"""
if not args or len(args) < 1:
return gui.myPrint("Usage: /kill <user> [<reason>]")
if not serverTab:
return gui.myPrint("Could not determine server.")
com.kill(serverTab.name, args[0], " ".join(args[1:]))
def makiList(serverTab, channelTab, args):
"""
Start a channel listing.
If channel is given, only the channel
is listed.
Usage: /list [<channel>]
"""
if not serverTab:
return gui.myPrint("Could not determine server.")
try:
# channel specific listing?
channel = args[0]
except IndexError:
# start a complete list..
channel = ""
gui.serverPrint(time.time(), serverTab.name, "Start of list.")
com.list(serverTab.name, channel)
def makiRaw(serverTab, channelTab, args):
"""
Sends a command with optional args to maki
which acts only as forwarder. The command
goes unchanged to the server.
Usage: /raw <command> [<further text>]
"""
if not args:
return gui.myPrint("Usage: /raw <command>")
if not serverTab:
return gui.myPrint("Could not determine server.")
# upper-case the command
args[0] = args[0].upper()
com.raw(server, " ".join(args))
def makiWhois(currentServer, currentChannel, args):
"""
Query a user's identity on the current server.
Usage: /whois <user mask>
"""
if not args:
return gui.myPrint("No server activated.")
com.sushi.whois(currentServer.name, args[0])
""" TEKKA USER COMMANDS """
def tekkaQuery(currentServer, currentTab, args):
"""
Starts a query dialog with the given user.
Usage: /query <nick>
"""
if not args:
return gui.myPrint("Usage: /query <nick>")
if not currentServer:
return gui.myPrint("Can't determine server.")
nick = args[0]
if not gui.tabs.searchTab(currentServer.name, nick):
# no query started
tab = gui.tabs.createQuery(currentServer.name, nick)
tab.connected = True
gui.tabs.addTab(currentServer.name, tab)
output = tab.buffer
# fetch and write history to query (if any)
for line in com.fetchLog(currentServer.name, nick,
UInt64(config.get("chatting","last_log_lines","10"))):
output.insertHTML(output.get_end_iter(),
"<font foreground='#DDDDDD'>%s</font>" % gui.escape(line))
def tekkaClear(currentServer, currentTab, args):
"""
Clears the output of the current channel.
Usage: /clear
"""
if currentTab: currentTab.buffer.set_text("")
elif currentServer: currentServer.buffer.set_text("")
def tekkaHelp(currentServer, currentTab, args):
"""
Prints the doc-string of the given command.
Usage: /help <command>
"""
if not args:
return gui.myPrint("Usage: /help <command>")
if commands.has_key(args[0]):
gui.myPrint(commands[args[0]].__doc__.replace("\t",""))
else:
gui.myPrint("No help for %s available." % (args[0]))
def setup(_config, _gui, _com):
"""
Setup the command module.
* Set modules
* Set command mapping
"""
global config, gui, com, commands
config = _config
gui = _gui
com = _com
commands = {
"connect" : makiConnect,
"nick" : makiNick,
"part" : makiPart,
"join" : makiJoin,
"j" : makiJoin,
"me" : makiAction,
"kick" : makiKick,
"mode" : makiMode,
"topic": makiTopic,
"quit" : makiQuit,
"away" : makiAway,
"back" : makiBack,
"nickserv" : makiNickserv,
"ctcp" : makiCTCP,
"notice" : makiNotice,
"msg" : makiMessage,
"oper" : makiOper,
"kill" : makiKill,
"list" : makiList,
"raw" : makiRaw,
"whois" : makiWhois,
"query": tekkaQuery,
"clear": tekkaClear,
"help": tekkaHelp
}
|
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import StringIO
import numpy
import os
import shutil
import tempfile
from nose.plugins.attrib import attr
from openquake.commonlib.writers import write_csv
from openquake.commonlib.tests import check_equal
from openquake.engine.db import models
from openquake.engine.export import core as hazard_export
from qa_tests import _utils as qa_utils
from qa_tests._utils import BaseQATestCase, compare_hazard_curve_with_csv
from openquake.qa_tests_data.classical import (
case_1, case_2, case_3, case_4, case_5, case_6, case_7, case_8, case_9,
case_10, case_11, case_12, case_13, case_14, case_15, case_16, case_17,
case_19)
aaae = numpy.testing.assert_array_almost_equal
class ClassicalHazardCase1TestCase(qa_utils.BaseQATestCase):
EXPECTED_PGA_XML = """<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
<hazardCurves sourceModelTreePath="b1" gsimTreePath="b1" IMT="PGA" investigationTime="1.0">
<IMLs>1.000000000E-01 4.000000000E-01 6.000000000E-01</IMLs>
<hazardCurve>
<gml:Point>
<gml:pos>0.0 0.0</gml:pos>
</gml:Point>
<poEs>4.570134863E-01 5.862678774E-02 6.866164397E-03</poEs>
</hazardCurve>
</hazardCurves>
</nrml>
"""
EXPECTED_SA_XML = """<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
<hazardCurves sourceModelTreePath="b1" gsimTreePath="b1" IMT="SA" investigationTime="1.0" saPeriod="0.1" saDamping="5.0">
<IMLs>1.000000000E-01 4.000000000E-01 6.000000000E-01</IMLs>
<hazardCurve>
<gml:Point>
<gml:pos>0.0 0.0</gml:pos>
</gml:Point>
<poEs>6.086747647E-01 3.308304637E-01 2.014712169E-01</poEs>
</hazardCurve>
</hazardCurves>
</nrml>
"""
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_1.__file__), 'job.ini')
expected_curve_pga = [0.4570, 0.0587, 0.0069]
expected_curve_sa = [
0.608675003748, 0.330831513139, 0.201472214825
]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
curves = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id
)
[pga_curve] = curves.filter(hazard_curve__imt='PGA')
numpy.testing.assert_array_almost_equal(
expected_curve_pga, pga_curve.poes, decimal=4
)
[sa_curve] = curves.filter(
hazard_curve__imt='SA', hazard_curve__sa_period=0.1
)
numpy.testing.assert_array_almost_equal(
expected_curve_sa, sa_curve.poes, decimal=4
)
# Test the exports as well:
exported_file = hazard_export.export(
pga_curve.hazard_curve.output.id, result_dir)
self.assert_xml_equal(
StringIO.StringIO(self.EXPECTED_PGA_XML), exported_file)
exported_file = hazard_export.export(
sa_curve.hazard_curve.output.id, result_dir)
self.assert_xml_equal(
StringIO.StringIO(self.EXPECTED_SA_XML), exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase2TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_2.__file__), 'job.ini')
expected_curve_poes = [0.0095, 0.00076, 0.000097, 0.0]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=3)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_2.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase3TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_3.__file__), 'job.ini')
expected_curve_poes = [0.63212, 0.47291, 0.04084]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=2)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_3.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase4TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_4.__file__), 'job.ini')
expected_curve_poes = [0.63212, 0.61186, 0.25110]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=3)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_4.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase5TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical', 'slow')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_5.__file__), 'job.ini')
expected_curve_poes = [0.632120, 0.54811, 0.15241]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=3)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_5.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase6TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical', 'slow')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_6.__file__), 'job.ini')
expected_curve_poes = [0.86466, 0.82460, 0.36525]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=2)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_6.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase7TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical', 'slow')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_7.__file__), 'job.ini')
expected_curve_poes_b1 = [0.86466, 0.82460, 0.36525]
expected_curve_poes_b2 = [0.63212, 0.61186, 0.25110]
expected_mean_poes = [0.794898, 0.760778, 0.331005]
job = self.run_hazard(cfg)
# Test the poe values for the two curves.
actual_curve_b1, actual_curve_b2 = (
models.HazardCurveData.objects
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__lt_realization__isnull=False)
.order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
)
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1'], actual_curve_b1.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b2'], actual_curve_b2.hazard_curve.lt_realization.sm_lt_path)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1, actual_curve_b1.poes, decimal=3)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b2, actual_curve_b2.poes, decimal=3)
# Test the mean curve:
[mean_curve] = models.HazardCurveData.objects\
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='mean')
numpy.testing.assert_array_almost_equal(
expected_mean_poes, mean_curve.poes, decimal=3)
# Test the exports as well:
exported_file_b1 = hazard_export.export(
actual_curve_b1.hazard_curve.output.id, result_dir)
check_equal(case_7.__file__, 'expected_b1.xml', exported_file_b1)
exported_file_b2 = hazard_export.export(
actual_curve_b2.hazard_curve.output.id, result_dir)
check_equal(case_7.__file__, 'expected_b2.xml', exported_file_b2)
# mean:
exported_file_mean = hazard_export.export(
mean_curve.hazard_curve.output.id, result_dir)
check_equal(case_7.__file__, 'expected_mean.xml', exported_file_mean)
shutil.rmtree(result_dir)
class ClassicalHazardCase8TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_8.__file__), 'job.ini')
expected_curve_poes_b1_b2 = [0.095163, 0.012362, 0.002262, 0.0]
expected_curve_poes_b1_b3 = [0.009950, 0.00076, 9.99995E-6, 0.0]
expected_curve_poes_b1_b4 = [0.0009995, 4.5489E-5, 4.07365E-6, 0.0]
job = self.run_hazard(cfg)
# Test the poe values for the three curves:
curve_b1_b2, curve_b1_b3, curve_b1_b4 = (
models.HazardCurveData.objects
.filter(hazard_curve__output__oq_job=job.id)
.order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
)
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1', 'b2'],
curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b3'],
curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b4'],
curve_b1_b4.hazard_curve.lt_realization.sm_lt_path)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=3)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=3)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=3)
# Test the exports as well:
exported_file_b1_b2 = hazard_export.export(
curve_b1_b2.hazard_curve.output.id, result_dir)
check_equal(case_8.__file__, 'expected_b1_b2.xml', exported_file_b1_b2)
exported_file_b1_b3 = hazard_export.export(
curve_b1_b3.hazard_curve.output.id, result_dir)
check_equal(case_8.__file__, 'expected_b1_b3.xml', exported_file_b1_b3)
exported_file_b1_b4 = hazard_export.export(
curve_b1_b4.hazard_curve.output.id, result_dir)
check_equal(case_8.__file__, 'expected_b1_b4.xml', exported_file_b1_b4)
shutil.rmtree(result_dir)
class ClassicalHazardCase9TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_9.__file__), 'job.ini')
expected_curve_poes_b1_b2 = [0.00995, 0.00076, 9.7E-5, 0.0]
expected_curve_poes_b1_b3 = [0.00995, 0.00076, 0.000104, 0.0]
job = self.run_hazard(cfg)
# Test the poe values for the two curves:
curve_b1_b2, curve_b1_b3 = models.HazardCurveData.objects \
.filter(hazard_curve__output__oq_job=job.id) \
.order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1', 'b2'],
curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b3'],
curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)
# Test the exports as well:
exported_file_b1_b2 = hazard_export.export(
curve_b1_b2.hazard_curve.output.id, result_dir)
check_equal(case_9.__file__, 'expected_b1_b2.xml', exported_file_b1_b2)
exported_file_b1_b3 = hazard_export.export(
curve_b1_b3.hazard_curve.output.id, result_dir)
check_equal(case_9.__file__, 'expected_b1_b3.xml', exported_file_b1_b3)
shutil.rmtree(result_dir)
class ClassicalHazardCase10TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_10.__file__), 'job.ini')
expected_curve_poes_b1_b2 = [0.00995, 0.00076, 9.7E-5, 0.0]
expected_curve_poes_b1_b3 = [0.043, 0.0012, 7.394E-5, 0.0]
job = self.run_hazard(cfg)
# Test the poe values for the two curves:
curve_b1_b2, curve_b1_b3 = models.HazardCurveData.objects\
.filter(hazard_curve__output__oq_job=job.id)\
.order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1', 'b2'],
curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b3'],
curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)
# Test the exports as well:
exported_file_b1_b2 = hazard_export.export(
curve_b1_b2.hazard_curve.output.id, result_dir)
check_equal(case_10.__file__, 'expected_b1_b2.xml',
exported_file_b1_b2)
exported_file_b1_b3 = hazard_export.export(
curve_b1_b3.hazard_curve.output.id, result_dir)
check_equal(case_10.__file__, 'expected_b1_b3.xml',
exported_file_b1_b3)
shutil.rmtree(result_dir)
class ClassicalHazardCase11TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
current = case_11.__file__
result_dir = tempfile.mkdtemp()
aaae = numpy.testing.assert_array_almost_equal
cfg = os.path.join(os.path.dirname(current), 'job.ini')
expected_curve_poes_b1_b2 = [0.0055, 0.00042, 5.77E-5, 0.0]
expected_curve_poes_b1_b3 = [0.00995, 0.00076, 9.7E-5, 0.0]
expected_curve_poes_b1_b4 = [0.018, 0.0013, 0.00014, 0.0]
expected_mean_poes = [0.01067, 0.0008, 9.774E-5, 0.0]
expected_q0_1_poes = [0.0055, 0.00042, 5.77E-5, 0.0]
expected_q0_9_poes = [0.013975, 0.00103, 0.0001185, 0.0]
job = self.run_hazard(cfg)
# Test the poe values for the two curves:
curve_b1_b2, curve_b1_b3, curve_b1_b4 = (
models.HazardCurveData.objects
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__lt_realization__isnull=False)
.order_by(
'hazard_curve__lt_realization__lt_model__sm_lt_path'))
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1', 'b2'],
curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b3'],
curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b4'],
curve_b1_b4.hazard_curve.lt_realization.sm_lt_path)
aaae(expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
aaae(expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)
aaae(expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=4)
# Test the mean curve:
[mean_curve] = models.HazardCurveData.objects\
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='mean')
aaae(expected_mean_poes, mean_curve.poes, decimal=4)
# Test the quantile curves:
quantile_0_1_curve, quantile_0_9_curve = \
models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='quantile'
).order_by('hazard_curve__quantile')
aaae(expected_q0_1_poes, quantile_0_1_curve.poes, decimal=4)
aaae(expected_q0_9_poes, quantile_0_9_curve.poes, decimal=4)
# Test the exports as well:
exported_file_b1_b2 = hazard_export.export(
curve_b1_b2.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_b1_b2.xml', exported_file_b1_b2)
exported_file_b1_b3 = hazard_export.export(
curve_b1_b3.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_b1_b3.xml', exported_file_b1_b3)
exported_file_b1_b4 = hazard_export.export(
curve_b1_b4.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_b1_b4.xml', exported_file_b1_b4)
exported_file_mean = hazard_export.export(
mean_curve.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_mean.xml', exported_file_mean)
q01_file = hazard_export.export(
quantile_0_1_curve.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_quantile_0_1.xml', q01_file)
q09_file = hazard_export.export(
quantile_0_9_curve.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_quantile_0_9.xml', q09_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase12TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
aaae = numpy.testing.assert_array_almost_equal
cfg = os.path.join(os.path.dirname(case_12.__file__), 'job.ini')
expected_curve_poes = [0.75421006, 0.08098179, 0.00686616]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
aaae(expected_curve_poes, curve.poes, decimal=2)
# Test the exports as well:
exported_file = hazard_export.export(
curve.hazard_curve.output.id, result_dir)
check_equal(case_12.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
def get_mean_curves(job, imt, period=None):
curves = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job, hazard_curve__statistics='mean',
hazard_curve__imt=imt, hazard_curve__sa_period=period)
data = []
for hazard_curve_data in curves:
loc = hazard_curve_data.location
data.append([loc.x, loc.y] + hazard_curve_data.poes)
return sorted(data) # by lon, lat
# this test is described in https://bugs.launchpad.net/oq-engine/+bug/1226061
# I am only comparing the mean curves
class ClassicalHazardCase13TestCase(BaseQATestCase):
CURRENTDIR = os.path.dirname(case_13.__file__)
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(self.CURRENTDIR, 'job.ini')
job = self.run_hazard(cfg)
curves_PGA = get_mean_curves(job, 'PGA')
actual = write_csv(os.path.join(result_dir, 'PGA.csv'), curves_PGA)
check_equal(case_13.__file__, 'expected/mean-PGA.csv', actual)
curves_SA = get_mean_curves(job, 'SA', 0.2)
actual = write_csv(os.path.join(result_dir, 'SA.csv'), curves_SA)
check_equal(case_13.__file__, 'expected/mean-SA.csv', actual)
shutil.rmtree(result_dir)
# this test is described in https://bugs.launchpad.net/oq-engine/+bug/1226102
# the CSV files with the expected hazard_curves were provided by Damiano
class ClassicalHazardCase14TestCase(BaseQATestCase):
CURRENTDIR = os.path.dirname(case_14.__file__)
@attr('qa', 'hazard', 'classical')
def test(self):
cfg = os.path.join(self.CURRENTDIR, 'job.ini')
job = self.run_hazard(cfg)
compare_hazard_curve_with_csv(
job, ['simple_fault'], ['AbrahamsonSilva2008'],
'PGA', None, None,
os.path.join(self.CURRENTDIR, 'AS2008_expected_curves.dat'), ' ',
rtol=0.01)
compare_hazard_curve_with_csv(
job, ['simple_fault'], ['CampbellBozorgnia2008'],
'PGA', None, None,
os.path.join(self.CURRENTDIR, 'CB2008_expected_curves.dat'), ' ',
rtol=0.01)
# this test is described in https://bugs.launchpad.net/oq-engine/+bug/1226061
# the CSV files with the expected hazard_curves were provided by Damiano
class ClassicalHazardCase15TestCase(BaseQATestCase):
CURRENTDIR = os.path.dirname(case_15.__file__)
@attr('qa', 'hazard', 'classical')
def test(self):
cfg = os.path.join(self.CURRENTDIR, 'job.ini')
job = self.run_hazard(cfg)
lt_paths = [
[['SM1'], ['BA2008', 'C2003']],
[['SM1'], ['BA2008', 'T2002']],
[['SM1'], ['CB2008', 'C2003']],
[['SM1'], ['CB2008', 'T2002']],
[['SM2', 'a3pt2b0pt8'], ['BA2008', '@']],
[['SM2', 'a3pt2b0pt8'], ['CB2008', '@']],
[['SM2', 'a3b1'], ['BA2008', '@']],
[['SM2', 'a3b1'], ['CB2008', '@']],
]
csvdir = os.path.join(self.CURRENTDIR, 'expected_results')
j = '_'.join
for sm_path, gsim_path in lt_paths:
fname = 'PGA/hazard_curve-smltp_%s-gsimltp_%s.csv' % (
j(sm_path), j(gsim_path))
compare_hazard_curve_with_csv(
job, sm_path, gsim_path, 'PGA', None, None,
os.path.join(csvdir, fname), ' ', rtol=1e-7)
fname = 'SA-0.1/hazard_curve-smltp_%s-gsimltp_%s.csv' % (
j(sm_path), j(gsim_path))
compare_hazard_curve_with_csv(
job, sm_path, gsim_path, 'SA', 0.1, 5.0,
os.path.join(csvdir, fname), ' ', rtol=1e-7)
# NB: this is a regression test to make sure that the sampling
# works well even for huge source model logic trees, since
# in the past we had issues, https://bugs.launchpad.net/oq-engine/+bug/1312020
class ClassicalHazardCase16TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
expected_mean_poes = [0.327905527354, 0.324717826053, 0.316179020913]
expected_q0_1_poes = [0.198642855479, 0.19587955512, 0.188594171735]
expected_q0_9_poes = [0.585553284108, 0.581083306028, 0.568977776502]
job = self.run_hazard(
os.path.join(os.path.dirname(case_16.__file__), 'job.ini'))
# mean
[mean_curve] = models.HazardCurveData.objects \
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='mean')
aaae(expected_mean_poes, mean_curve.poes, decimal=7)
# quantiles
quantile_0_1_curve, quantile_0_9_curve = \
models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='quantile').order_by(
'hazard_curve__quantile')
aaae(expected_q0_1_poes, quantile_0_1_curve.poes, decimal=7)
aaae(expected_q0_9_poes, quantile_0_9_curve.poes, decimal=7)
# test with 5 samplings of 2 sources, with weight 20% and 80%, and 1 point
class ClassicalHazardCase17TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
cfg = os.path.join(os.path.dirname(case_17.__file__), 'job.ini')
expected_curves_pga = [
[0.27612144, 0.035435631, 0.011434286],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0]]
job = self.run_hazard(cfg)
curves = [c.poes for c in models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id, hazard_curve__imt='PGA'
).order_by('hazard_curve')]
numpy.testing.assert_array_almost_equal(
expected_curves_pga, curves, decimal=7)
class ClassicalHazardCase19TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_19.__file__), 'job.ini')
job = self.run_hazard(cfg)
curves = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='mean'
)
[pga_curve] = curves.filter(hazard_curve__imt='PGA')
exported_file = hazard_export.export(
pga_curve.hazard_curve.output.id, result_dir, 'csv')
# NB: the format of the exported file is 'lon lat poe1 ... poeN'
# we discard lon and lat and extract the poes
actual = [' '.join(line.split(' ')[2:]).strip()
for line in open(exported_file)]
fname = os.path.join(os.path.dirname(case_19.__file__), 'expected',
'hazard_curve-mean.csv')
# NB: the format of the expected file is lon lat, poe1 ... poeN, ...
# we extract the poes
# TODO: unify the engine and oq-lite export formats
expected = [line.split(',')[1] for line in open(fname)]
self.assertEqual(actual, expected)
shutil.rmtree(result_dir)
Fixed ClassicalHazardCase19TestCase
Former-commit-id: 06fb3a3187ad7c3927d0ee65da94fe96ba7c8e1c [formerly 06fb3a3187ad7c3927d0ee65da94fe96ba7c8e1c [formerly 22210460b3274d3ed4fc178ecc45f27d46bd91e1]]
Former-commit-id: 821590fd8c468f1549c649ebe40506ec22853548
Former-commit-id: b26d2aa057d369eb4fd9982599831939c5e9a67b
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import StringIO
import numpy
import os
import shutil
import tempfile
from nose.plugins.attrib import attr
from openquake.commonlib.writers import write_csv
from openquake.commonlib.tests import check_equal
from openquake.engine.db import models
from openquake.engine.export import core as hazard_export
from qa_tests import _utils as qa_utils
from qa_tests._utils import BaseQATestCase, compare_hazard_curve_with_csv
from openquake.qa_tests_data.classical import (
case_1, case_2, case_3, case_4, case_5, case_6, case_7, case_8, case_9,
case_10, case_11, case_12, case_13, case_14, case_15, case_16, case_17,
case_19)
aaae = numpy.testing.assert_array_almost_equal
class ClassicalHazardCase1TestCase(qa_utils.BaseQATestCase):
EXPECTED_PGA_XML = """<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
<hazardCurves sourceModelTreePath="b1" gsimTreePath="b1" IMT="PGA" investigationTime="1.0">
<IMLs>1.000000000E-01 4.000000000E-01 6.000000000E-01</IMLs>
<hazardCurve>
<gml:Point>
<gml:pos>0.0 0.0</gml:pos>
</gml:Point>
<poEs>4.570134863E-01 5.862678774E-02 6.866164397E-03</poEs>
</hazardCurve>
</hazardCurves>
</nrml>
"""
EXPECTED_SA_XML = """<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
<hazardCurves sourceModelTreePath="b1" gsimTreePath="b1" IMT="SA" investigationTime="1.0" saPeriod="0.1" saDamping="5.0">
<IMLs>1.000000000E-01 4.000000000E-01 6.000000000E-01</IMLs>
<hazardCurve>
<gml:Point>
<gml:pos>0.0 0.0</gml:pos>
</gml:Point>
<poEs>6.086747647E-01 3.308304637E-01 2.014712169E-01</poEs>
</hazardCurve>
</hazardCurves>
</nrml>
"""
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_1.__file__), 'job.ini')
expected_curve_pga = [0.4570, 0.0587, 0.0069]
expected_curve_sa = [
0.608675003748, 0.330831513139, 0.201472214825
]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
curves = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id
)
[pga_curve] = curves.filter(hazard_curve__imt='PGA')
numpy.testing.assert_array_almost_equal(
expected_curve_pga, pga_curve.poes, decimal=4
)
[sa_curve] = curves.filter(
hazard_curve__imt='SA', hazard_curve__sa_period=0.1
)
numpy.testing.assert_array_almost_equal(
expected_curve_sa, sa_curve.poes, decimal=4
)
# Test the exports as well:
exported_file = hazard_export.export(
pga_curve.hazard_curve.output.id, result_dir)
self.assert_xml_equal(
StringIO.StringIO(self.EXPECTED_PGA_XML), exported_file)
exported_file = hazard_export.export(
sa_curve.hazard_curve.output.id, result_dir)
self.assert_xml_equal(
StringIO.StringIO(self.EXPECTED_SA_XML), exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase2TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_2.__file__), 'job.ini')
expected_curve_poes = [0.0095, 0.00076, 0.000097, 0.0]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=3)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_2.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase3TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_3.__file__), 'job.ini')
expected_curve_poes = [0.63212, 0.47291, 0.04084]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=2)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_3.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase4TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_4.__file__), 'job.ini')
expected_curve_poes = [0.63212, 0.61186, 0.25110]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=3)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_4.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase5TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical', 'slow')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_5.__file__), 'job.ini')
expected_curve_poes = [0.632120, 0.54811, 0.15241]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=3)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_5.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase6TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical', 'slow')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_6.__file__), 'job.ini')
expected_curve_poes = [0.86466, 0.82460, 0.36525]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[actual_curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
numpy.testing.assert_array_almost_equal(
expected_curve_poes, actual_curve.poes, decimal=2)
# Test the export as well:
exported_file = hazard_export.export(
actual_curve.hazard_curve.output.id, result_dir)
check_equal(case_6.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase7TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical', 'slow')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_7.__file__), 'job.ini')
expected_curve_poes_b1 = [0.86466, 0.82460, 0.36525]
expected_curve_poes_b2 = [0.63212, 0.61186, 0.25110]
expected_mean_poes = [0.794898, 0.760778, 0.331005]
job = self.run_hazard(cfg)
# Test the poe values for the two curves.
actual_curve_b1, actual_curve_b2 = (
models.HazardCurveData.objects
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__lt_realization__isnull=False)
.order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
)
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1'], actual_curve_b1.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b2'], actual_curve_b2.hazard_curve.lt_realization.sm_lt_path)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1, actual_curve_b1.poes, decimal=3)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b2, actual_curve_b2.poes, decimal=3)
# Test the mean curve:
[mean_curve] = models.HazardCurveData.objects\
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='mean')
numpy.testing.assert_array_almost_equal(
expected_mean_poes, mean_curve.poes, decimal=3)
# Test the exports as well:
exported_file_b1 = hazard_export.export(
actual_curve_b1.hazard_curve.output.id, result_dir)
check_equal(case_7.__file__, 'expected_b1.xml', exported_file_b1)
exported_file_b2 = hazard_export.export(
actual_curve_b2.hazard_curve.output.id, result_dir)
check_equal(case_7.__file__, 'expected_b2.xml', exported_file_b2)
# mean:
exported_file_mean = hazard_export.export(
mean_curve.hazard_curve.output.id, result_dir)
check_equal(case_7.__file__, 'expected_mean.xml', exported_file_mean)
shutil.rmtree(result_dir)
class ClassicalHazardCase8TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_8.__file__), 'job.ini')
expected_curve_poes_b1_b2 = [0.095163, 0.012362, 0.002262, 0.0]
expected_curve_poes_b1_b3 = [0.009950, 0.00076, 9.99995E-6, 0.0]
expected_curve_poes_b1_b4 = [0.0009995, 4.5489E-5, 4.07365E-6, 0.0]
job = self.run_hazard(cfg)
# Test the poe values for the three curves:
curve_b1_b2, curve_b1_b3, curve_b1_b4 = (
models.HazardCurveData.objects
.filter(hazard_curve__output__oq_job=job.id)
.order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
)
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1', 'b2'],
curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b3'],
curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b4'],
curve_b1_b4.hazard_curve.lt_realization.sm_lt_path)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=3)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=3)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=3)
# Test the exports as well:
exported_file_b1_b2 = hazard_export.export(
curve_b1_b2.hazard_curve.output.id, result_dir)
check_equal(case_8.__file__, 'expected_b1_b2.xml', exported_file_b1_b2)
exported_file_b1_b3 = hazard_export.export(
curve_b1_b3.hazard_curve.output.id, result_dir)
check_equal(case_8.__file__, 'expected_b1_b3.xml', exported_file_b1_b3)
exported_file_b1_b4 = hazard_export.export(
curve_b1_b4.hazard_curve.output.id, result_dir)
check_equal(case_8.__file__, 'expected_b1_b4.xml', exported_file_b1_b4)
shutil.rmtree(result_dir)
class ClassicalHazardCase9TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_9.__file__), 'job.ini')
expected_curve_poes_b1_b2 = [0.00995, 0.00076, 9.7E-5, 0.0]
expected_curve_poes_b1_b3 = [0.00995, 0.00076, 0.000104, 0.0]
job = self.run_hazard(cfg)
# Test the poe values for the two curves:
curve_b1_b2, curve_b1_b3 = models.HazardCurveData.objects \
.filter(hazard_curve__output__oq_job=job.id) \
.order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1', 'b2'],
curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b3'],
curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)
# Test the exports as well:
exported_file_b1_b2 = hazard_export.export(
curve_b1_b2.hazard_curve.output.id, result_dir)
check_equal(case_9.__file__, 'expected_b1_b2.xml', exported_file_b1_b2)
exported_file_b1_b3 = hazard_export.export(
curve_b1_b3.hazard_curve.output.id, result_dir)
check_equal(case_9.__file__, 'expected_b1_b3.xml', exported_file_b1_b3)
shutil.rmtree(result_dir)
class ClassicalHazardCase10TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_10.__file__), 'job.ini')
expected_curve_poes_b1_b2 = [0.00995, 0.00076, 9.7E-5, 0.0]
expected_curve_poes_b1_b3 = [0.043, 0.0012, 7.394E-5, 0.0]
job = self.run_hazard(cfg)
# Test the poe values for the two curves:
curve_b1_b2, curve_b1_b3 = models.HazardCurveData.objects\
.filter(hazard_curve__output__oq_job=job.id)\
.order_by('hazard_curve__lt_realization__lt_model__sm_lt_path')
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1', 'b2'],
curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b3'],
curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
numpy.testing.assert_array_almost_equal(
expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)
# Test the exports as well:
exported_file_b1_b2 = hazard_export.export(
curve_b1_b2.hazard_curve.output.id, result_dir)
check_equal(case_10.__file__, 'expected_b1_b2.xml',
exported_file_b1_b2)
exported_file_b1_b3 = hazard_export.export(
curve_b1_b3.hazard_curve.output.id, result_dir)
check_equal(case_10.__file__, 'expected_b1_b3.xml',
exported_file_b1_b3)
shutil.rmtree(result_dir)
class ClassicalHazardCase11TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
current = case_11.__file__
result_dir = tempfile.mkdtemp()
aaae = numpy.testing.assert_array_almost_equal
cfg = os.path.join(os.path.dirname(current), 'job.ini')
expected_curve_poes_b1_b2 = [0.0055, 0.00042, 5.77E-5, 0.0]
expected_curve_poes_b1_b3 = [0.00995, 0.00076, 9.7E-5, 0.0]
expected_curve_poes_b1_b4 = [0.018, 0.0013, 0.00014, 0.0]
expected_mean_poes = [0.01067, 0.0008, 9.774E-5, 0.0]
expected_q0_1_poes = [0.0055, 0.00042, 5.77E-5, 0.0]
expected_q0_9_poes = [0.013975, 0.00103, 0.0001185, 0.0]
job = self.run_hazard(cfg)
# Test the poe values for the two curves:
curve_b1_b2, curve_b1_b3, curve_b1_b4 = (
models.HazardCurveData.objects
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__lt_realization__isnull=False)
.order_by(
'hazard_curve__lt_realization__lt_model__sm_lt_path'))
# Sanity check, to make sure we have the curves ordered correctly:
self.assertEqual(
['b1', 'b2'],
curve_b1_b2.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b3'],
curve_b1_b3.hazard_curve.lt_realization.sm_lt_path)
self.assertEqual(
['b1', 'b4'],
curve_b1_b4.hazard_curve.lt_realization.sm_lt_path)
aaae(expected_curve_poes_b1_b2, curve_b1_b2.poes, decimal=4)
aaae(expected_curve_poes_b1_b3, curve_b1_b3.poes, decimal=4)
aaae(expected_curve_poes_b1_b4, curve_b1_b4.poes, decimal=4)
# Test the mean curve:
[mean_curve] = models.HazardCurveData.objects\
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='mean')
aaae(expected_mean_poes, mean_curve.poes, decimal=4)
# Test the quantile curves:
quantile_0_1_curve, quantile_0_9_curve = \
models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='quantile'
).order_by('hazard_curve__quantile')
aaae(expected_q0_1_poes, quantile_0_1_curve.poes, decimal=4)
aaae(expected_q0_9_poes, quantile_0_9_curve.poes, decimal=4)
# Test the exports as well:
exported_file_b1_b2 = hazard_export.export(
curve_b1_b2.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_b1_b2.xml', exported_file_b1_b2)
exported_file_b1_b3 = hazard_export.export(
curve_b1_b3.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_b1_b3.xml', exported_file_b1_b3)
exported_file_b1_b4 = hazard_export.export(
curve_b1_b4.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_b1_b4.xml', exported_file_b1_b4)
exported_file_mean = hazard_export.export(
mean_curve.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_mean.xml', exported_file_mean)
q01_file = hazard_export.export(
quantile_0_1_curve.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_quantile_0_1.xml', q01_file)
q09_file = hazard_export.export(
quantile_0_9_curve.hazard_curve.output.id, result_dir)
check_equal(current, 'expected_quantile_0_9.xml', q09_file)
shutil.rmtree(result_dir)
class ClassicalHazardCase12TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
aaae = numpy.testing.assert_array_almost_equal
cfg = os.path.join(os.path.dirname(case_12.__file__), 'job.ini')
expected_curve_poes = [0.75421006, 0.08098179, 0.00686616]
job = self.run_hazard(cfg)
# Test the poe values of the single curve:
[curve] = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id)
aaae(expected_curve_poes, curve.poes, decimal=2)
# Test the exports as well:
exported_file = hazard_export.export(
curve.hazard_curve.output.id, result_dir)
check_equal(case_12.__file__, 'expected_hazard_curves.xml',
exported_file)
shutil.rmtree(result_dir)
def get_mean_curves(job, imt, period=None):
curves = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job, hazard_curve__statistics='mean',
hazard_curve__imt=imt, hazard_curve__sa_period=period)
data = []
for hazard_curve_data in curves:
loc = hazard_curve_data.location
data.append([loc.x, loc.y] + hazard_curve_data.poes)
return sorted(data) # by lon, lat
# this test is described in https://bugs.launchpad.net/oq-engine/+bug/1226061
# I am only comparing the mean curves
class ClassicalHazardCase13TestCase(BaseQATestCase):
CURRENTDIR = os.path.dirname(case_13.__file__)
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(self.CURRENTDIR, 'job.ini')
job = self.run_hazard(cfg)
curves_PGA = get_mean_curves(job, 'PGA')
actual = write_csv(os.path.join(result_dir, 'PGA.csv'), curves_PGA)
check_equal(case_13.__file__, 'expected/mean-PGA.csv', actual)
curves_SA = get_mean_curves(job, 'SA', 0.2)
actual = write_csv(os.path.join(result_dir, 'SA.csv'), curves_SA)
check_equal(case_13.__file__, 'expected/mean-SA.csv', actual)
shutil.rmtree(result_dir)
# this test is described in https://bugs.launchpad.net/oq-engine/+bug/1226102
# the CSV files with the expected hazard_curves were provided by Damiano
class ClassicalHazardCase14TestCase(BaseQATestCase):
CURRENTDIR = os.path.dirname(case_14.__file__)
@attr('qa', 'hazard', 'classical')
def test(self):
cfg = os.path.join(self.CURRENTDIR, 'job.ini')
job = self.run_hazard(cfg)
compare_hazard_curve_with_csv(
job, ['simple_fault'], ['AbrahamsonSilva2008'],
'PGA', None, None,
os.path.join(self.CURRENTDIR, 'AS2008_expected_curves.dat'), ' ',
rtol=0.01)
compare_hazard_curve_with_csv(
job, ['simple_fault'], ['CampbellBozorgnia2008'],
'PGA', None, None,
os.path.join(self.CURRENTDIR, 'CB2008_expected_curves.dat'), ' ',
rtol=0.01)
# this test is described in https://bugs.launchpad.net/oq-engine/+bug/1226061
# the CSV files with the expected hazard_curves were provided by Damiano
class ClassicalHazardCase15TestCase(BaseQATestCase):
CURRENTDIR = os.path.dirname(case_15.__file__)
@attr('qa', 'hazard', 'classical')
def test(self):
cfg = os.path.join(self.CURRENTDIR, 'job.ini')
job = self.run_hazard(cfg)
lt_paths = [
[['SM1'], ['BA2008', 'C2003']],
[['SM1'], ['BA2008', 'T2002']],
[['SM1'], ['CB2008', 'C2003']],
[['SM1'], ['CB2008', 'T2002']],
[['SM2', 'a3pt2b0pt8'], ['BA2008', '@']],
[['SM2', 'a3pt2b0pt8'], ['CB2008', '@']],
[['SM2', 'a3b1'], ['BA2008', '@']],
[['SM2', 'a3b1'], ['CB2008', '@']],
]
csvdir = os.path.join(self.CURRENTDIR, 'expected_results')
j = '_'.join
for sm_path, gsim_path in lt_paths:
fname = 'PGA/hazard_curve-smltp_%s-gsimltp_%s.csv' % (
j(sm_path), j(gsim_path))
compare_hazard_curve_with_csv(
job, sm_path, gsim_path, 'PGA', None, None,
os.path.join(csvdir, fname), ' ', rtol=1e-7)
fname = 'SA-0.1/hazard_curve-smltp_%s-gsimltp_%s.csv' % (
j(sm_path), j(gsim_path))
compare_hazard_curve_with_csv(
job, sm_path, gsim_path, 'SA', 0.1, 5.0,
os.path.join(csvdir, fname), ' ', rtol=1e-7)
# NB: this is a regression test to make sure that the sampling
# works well even for huge source model logic trees, since
# in the past we had issues, https://bugs.launchpad.net/oq-engine/+bug/1312020
class ClassicalHazardCase16TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
expected_mean_poes = [0.327905527354, 0.324717826053, 0.316179020913]
expected_q0_1_poes = [0.198642855479, 0.19587955512, 0.188594171735]
expected_q0_9_poes = [0.585553284108, 0.581083306028, 0.568977776502]
job = self.run_hazard(
os.path.join(os.path.dirname(case_16.__file__), 'job.ini'))
# mean
[mean_curve] = models.HazardCurveData.objects \
.filter(hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='mean')
aaae(expected_mean_poes, mean_curve.poes, decimal=7)
# quantiles
quantile_0_1_curve, quantile_0_9_curve = \
models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='quantile').order_by(
'hazard_curve__quantile')
aaae(expected_q0_1_poes, quantile_0_1_curve.poes, decimal=7)
aaae(expected_q0_9_poes, quantile_0_9_curve.poes, decimal=7)
# test with 5 samplings of 2 sources, with weight 20% and 80%, and 1 point
class ClassicalHazardCase17TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
cfg = os.path.join(os.path.dirname(case_17.__file__), 'job.ini')
expected_curves_pga = [
[0.27612144, 0.035435631, 0.011434286],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0]]
job = self.run_hazard(cfg)
curves = [c.poes for c in models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id, hazard_curve__imt='PGA'
).order_by('hazard_curve')]
numpy.testing.assert_array_almost_equal(
expected_curves_pga, curves, decimal=7)
class ClassicalHazardCase19TestCase(qa_utils.BaseQATestCase):
@attr('qa', 'hazard', 'classical')
def test(self):
result_dir = tempfile.mkdtemp()
cfg = os.path.join(os.path.dirname(case_19.__file__), 'job.ini')
job = self.run_hazard(cfg)
curves = models.HazardCurveData.objects.filter(
hazard_curve__output__oq_job=job.id,
hazard_curve__statistics='mean'
)
[pga_curve] = curves.filter(hazard_curve__imt='PGA')
exported_file = hazard_export.export(
pga_curve.hazard_curve.output.id, result_dir, 'csv')
# NB: the format of the exported file is 'lon lat poe1 ... poeN'
# we discard lon and lat and extract the poes
actual = [' '.join(line.split(' ')[2:]).strip()
for line in open(exported_file)]
fname = os.path.join(os.path.dirname(case_19.__file__), 'expected',
'hazard_curve-mean.csv')
# NB: the format of the expected file is lon lat, poe1 ... poeN, ...
# we extract the poes
# TODO: unify the engine and oq-lite export formats
expected = [line.split(',')[1] for line in open(fname)][1:]
self.assertEqual(actual, expected)
shutil.rmtree(result_dir)
|
#!/usr/bin/env python
# encoding: utf-8
""" executable to interact with notifo.com using the notifo.py library """
import notifo
from optparse import OptionParser
def init_parser():
""" function to init option parser """
usage = "usage: %prog -u user -s secret -n name [-l label]\
[-t title] [-c callback] TEXT"
parser = OptionParser(usage, version="%prog " + notifo.__version__)
parser.add_option("-u", "--user", action="store", dest="user",
help="your notifo username")
parser.add_option("-s", "--secret", action="store", dest="secret",
help="your notifo API secret")
parser.add_option("-n", "--name", action="store", dest="name",
help="recipient for the notification")
parser.add_option("-l", "--label", action="store", dest="label",
help="label for the notification")
parser.add_option("-t", "--title", action="store", dest="title",
help="title of the notification")
parser.add_option("-c", "--callback", action="store", dest="callback",
help="callback URL to call")
(options, args) = parser.parse_args()
return (parser, options, args)
def main():
""" main function """
# get options and arguments
(parser, options, args) = init_parser()
# initialize result variable
result = None
# check for values which are always needed
if not options.user:
parser.error("No user given.")
if not options.secret:
parser.error("No API secret given.")
if not options.name:
parser.error("No recipient given.")
# If there is no message, we probably want to subscribe a user
if len(args) < 1:
parser.error("No message text given.")
else:
params = {}
params["to"] = options.name
params["msg"] = args[0]
if options.label:
params["label"] = options.label
if options.title:
params["title"] = options.title
if options.callback:
params["uri"] = options.callback
# send notification
result = notifo.send_notification(options.user, options.secret, **params)
if result is None:
print "Something went wrong. Check parameters and try again."
if __name__ == '__main__':
main()
fix whitespace
#!/usr/bin/env python
# encoding: utf-8
""" executable to interact with notifo.com using the notifo.py library """
import notifo
from optparse import OptionParser
def init_parser():
""" function to init option parser """
usage = "usage: %prog -u user -s secret -n name [-l label] \
[-t title] [-c callback] TEXT"
parser = OptionParser(usage, version="%prog " + notifo.__version__)
parser.add_option("-u", "--user", action="store", dest="user",
help="your notifo username")
parser.add_option("-s", "--secret", action="store", dest="secret",
help="your notifo API secret")
parser.add_option("-n", "--name", action="store", dest="name",
help="recipient for the notification")
parser.add_option("-l", "--label", action="store", dest="label",
help="label for the notification")
parser.add_option("-t", "--title", action="store", dest="title",
help="title of the notification")
parser.add_option("-c", "--callback", action="store", dest="callback",
help="callback URL to call")
(options, args) = parser.parse_args()
return (parser, options, args)
def main():
""" main function """
# get options and arguments
(parser, options, args) = init_parser()
# initialize result variable
result = None
# check for values which are always needed
if not options.user:
parser.error("No user given.")
if not options.secret:
parser.error("No API secret given.")
if not options.name:
parser.error("No recipient given.")
# If there is no message, we probably want to subscribe a user
if len(args) < 1:
parser.error("No message text given.")
else:
params = {}
params["to"] = options.name
params["msg"] = args[0]
if options.label:
params["label"] = options.label
if options.title:
params["title"] = options.title
if options.callback:
params["uri"] = options.callback
# send notification
result = notifo.send_notification(options.user, options.secret, **params)
if result is None:
print "Something went wrong. Check parameters and try again."
if __name__ == '__main__':
main()
|
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "5a10618f10f9b35269aea32772afb017af27f81a"
TFRT_SHA256 = "a06e0b59c4d5a7515bf1db2dfa5a4366ce22f7703d535585c25dda2828df2cf7"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
Update TFRT dependency to use revision
http://github.com/tensorflow/runtime/commit/95830591aaf17b79ee72708b05f8348fba32e518.
PiperOrigin-RevId: 419407707
Change-Id: I36e16826cd317e05fe73f594da45882b8252596d
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "95830591aaf17b79ee72708b05f8348fba32e518"
TFRT_SHA256 = "e067db4c5eba6b2f272ebf397d0b9f0b2af5df1e42c714fc025b5a394c8cc45e"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
|
"""Base functionality of ffmpeg HA wrapper."""
import logging
import shlex
import subprocess
_LOGGER = logging.getLogger(__name__)
ITER_STDOUT = 'OUT'
ITER_STDERR = 'ERR'
class HAFFmpeg(object):
"""Base HA FFmpeg process.
Object is iterable but only for data streams! For other things use the
process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin, chunk_size=1024, iter_input=ITER_STDOUT):
"""Base initialize."""
self._ffmpeg = ffmpeg_bin
self._argv = [ffmpeg_bin]
self._proc = None
self._chunk_size = chunk_size
self._iter_input = iter_input
# pylint: disable=too-many-arguments
def open(self, cmd, output="-", extra_cmd=None, text=False,
stdout_pipe=True, stderr_pipe=False):
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self._proc is not None:
_LOGGER.critical("FFmpeg is allready running!")
return
# add cmds
self._argv.append(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
# add output
self._argv.append(output)
# start ffmpeg
self._proc = subprocess.Popen(
self._argv,
stderr=stderr,
stdout=stdout,
universal_newlines=text
)
def close(self, timeout=15):
"""Stop a ffmpeg instance."""
if self._proc is None:
_LOGGER.error("FFmpeg isn't running!")
return
# send stop to ffmpeg
self._proc.terminate()
try:
self._proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
_LOGGER.warning("Timeout while waiting of FFmpeg.")
# clean ffmpeg cmd
self._argv = [self._ffmpeg]
self._proc = None
@property
def process(self):
"""Return a Popen object or None of not running."""
return self._proc
def __iter__(self):
"""Read data from ffmpeg PIPE/STDERR as iter."""
return self
def __next__(self):
"""Get next buffer data."""
if self._proc is None or self._proc.poll() is not None:
raise StopIteration
# generate reading from
if self._iter_input == ITER_STDERR:
read_from = self._proc.stderr
else:
read_from = self._proc.stdout
# check if reading from pipe
if read_from is None:
raise StopIteration
return read_from.read(self._chunk_size)
add debug log infos
"""Base functionality of ffmpeg HA wrapper."""
import logging
import shlex
import subprocess
_LOGGER = logging.getLogger(__name__)
ITER_STDOUT = 'OUT'
ITER_STDERR = 'ERR'
class HAFFmpeg(object):
"""Base HA FFmpeg process.
Object is iterable but only for data streams! For other things use the
process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin, chunk_size=1024, iter_input=ITER_STDOUT):
"""Base initialize."""
self._ffmpeg = ffmpeg_bin
self._argv = [ffmpeg_bin]
self._proc = None
self._chunk_size = chunk_size
self._iter_input = iter_input
# pylint: disable=too-many-arguments
def open(self, cmd, output="-", extra_cmd=None, text=False,
stdout_pipe=True, stderr_pipe=False):
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self._proc is not None:
_LOGGER.critical("FFmpeg is allready running!")
return
# add cmds
self._argv.append(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
# add output
self._argv.append(output)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s.", str(self._argv))
self._proc = subprocess.Popen(
self._argv,
stderr=stderr,
stdout=stdout,
universal_newlines=text
)
def close(self, timeout=15):
"""Stop a ffmpeg instance."""
if self._proc is None:
_LOGGER.error("FFmpeg isn't running!")
return
# send stop to ffmpeg
self._proc.terminate()
try:
self._proc.wait(timeout=timeout)
_LOGGER.debug("Close FFmpeg process.")
except subprocess.TimeoutExpired:
_LOGGER.warning("Timeout while waiting of FFmpeg.")
# clean ffmpeg cmd
self._argv = [self._ffmpeg]
self._proc = None
@property
def process(self):
"""Return a Popen object or None of not running."""
return self._proc
def __iter__(self):
"""Read data from ffmpeg PIPE/STDERR as iter."""
return self
def __next__(self):
"""Get next buffer data."""
if self._proc is None or self._proc.poll() is not None:
raise StopIteration
# generate reading from
if self._iter_input == ITER_STDERR:
read_from = self._proc.stderr
else:
read_from = self._proc.stdout
# check if reading from pipe
if read_from is None:
raise StopIteration
return read_from.read(self._chunk_size)
|
import ConfigParser
import sys
import time
import logging
import logging.handlers
import os
from campus_factory.ClusterStatus import ClusterStatus
from campus_factory.ClusterStatus import CondorConfig
from campus_factory.Parsers import RunExternal
from campus_factory.OfflineAds.OfflineAds import OfflineAds
class Factory:
"""
The main class of the factory. Designed to be run inside the condor scheduler.
@author: Derek Weitzel (dweitzel@cse.unl.edu)
"""
def __init__(self, options):
"""
Initialization function.
@param options: A set of options in the form of an options parser
Required options: config - location of configuration File
"""
self.options = options
def Intialize(self):
"""
Function to initialize the factory's variables such as configuration
and logging
"""
# Read in the configuration file
self.config_file = self.options.config
self.config = ConfigParser.ConfigParser()
files_read = self.config.read([self.config_file])
# check if no files read in
if len(files_read) < 1:
sys.stderr.write("No configuration files found. Location = %s\n" % self.config_file)
sys.exit(1)
self._SetLogging()
if self.config.get("general", "useoffline").lower() == "true":
self.UseOffline = True
else:
self.UseOffline = False
try:
self.condor_config = CondorConfig()
except EnvironmentError, inst:
logging.exception(str(inst))
raise inst
def _SetLogging(self):
"""
Setting the logging level and set the logging.
"""
logging_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
level = logging_levels.get(self.config.get("general", "loglevel"))
logdirectory = self.config.get("general", "logdirectory")
handler = logging.handlers.RotatingFileHandler(os.path.join(logdirectory, "campus_factory.log"),
maxBytes=10000000, backupCount=5)
root_logger = logging.getLogger()
root_logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def Restart(self):
status = ClusterStatus()
# Get the factory id
factoryID = status.GetFactoryID()
# Hold then release the factory in the queue
(stderr, stdout) = RunExternal("condor_hold %s" % factoryID)
print "Stderr = %s" % stderr.strip()
#print "Stdout = %s" % stdout.strip()
(stderr, stdout) = RunExternal("condor_release %s" % factoryID)
print "Stderr = %s" % stderr.strip()
#print "Stdout = %s" % stdout.strip()
def Stop(self):
status = ClusterStatus()
# Get the factory id
factoryID = status.GetFactoryID()
# Remove the factory job
(stderr, stdout) = RunExternal("condor_rm %s" % factoryID)
print "Stderr = %s" % stderr.strip()
def Start(self):
"""
Start the Factory
"""
self.Intialize()
status = ClusterStatus(status_constraint="IsUndefined(Offline)")
offline = OfflineAds()
# First, daemonize?
while 1:
logging.info("Starting iteration...")
if self.UseOffline:
toSubmit = offline.Update( [self.GetClusterUnique()] )
# Check for idle glideins (idle startd's)
idleslots = status.GetIdleGlideins()
if idleslots == None:
logging.info("Received None from idle glideins, going to try later")
self.SleepFactory()
continue
logging.debug("Idle glideins = %i" % idleslots)
if idleslots >= int(self.config.get("general", "MAXIDLEGLIDEINS")):
logging.info("Too many idle glideins")
self.SleepFactory()
continue
# Check for idle glidein jobs
idlejobs = status.GetIdleGlideinJobs()
if idlejobs == None:
logging.info("Received None from idle glidein jobs, going to try later")
self.SleepFactory()
continue
logging.debug("Queued jobs = %i" % idlejobs)
if idlejobs >= int(self.config.get("general", "maxqueuedjobs")):
logging.info("Too many queued jobs")
self.SleepFactory()
continue
# Get the offline ads to update.
if self.UseOffline:
num_submit = offline.GetDelinquentSites( [self.GetClusterUnique()] )
logging.debug("toSubmit from offline %s", str(toSubmit))
logging.debug("num_submit = %s\n", str(num_submit))
if (len(toSubmit) > 0) or num_submit[self.GetClusterUnique()]:
idleuserjobs = max([ num_submit[self.GetClusterUnique()], 5 ])
logging.debug("OFfline ads detected jobs should be submitted. Idle user jobs set to %i", idleuserjobs)
else:
logging.debug("Offline ads did not detect any matches or Delinquencies.")
idleuserjobs = 0
else:
# Check for idle jobs to flock from
if self.config.has_option("general", "FLOCK_FROM"):
schedds = self.config.get("general", "FLOCK_FROM").split(",")
logging.debug("Using FLOCK_FROM from the factory config.")
else:
schedds = self.condor_config.get('FLOCK_FROM').split(",")
logging.debug("Using FLOCK_FROM from the condor configuration")
logging.debug("Schedds to query: %s" % str(schedds))
idleuserjobs = status.GetIdleJobs(schedds)
if idleuserjobs == None:
logging.info("Received None from idle user jobs, going to try later")
self.SleepFactory()
continue
logging.debug("Idle jobs = %i" % idleuserjobs)
if idleuserjobs < 1:
logging.info("No idle jobs")
self.SleepFactory()
continue
# Determine how many glideins to submit
num_submit = self.GetNumSubmit(idleslots, idlejobs, idleuserjobs)
logging.info("Submitting %i glidein jobs", num_submit)
self.SubmitGlideins(num_submit)
self.SleepFactory()
def SleepFactory(self):
sleeptime = int(self.config.get("general", "iterationtime"))
logging.info("Sleeping for %i seconds" % sleeptime)
time.sleep(sleeptime)
def GetNumSubmit(self, idleslots, idlejobs, idleuserjobs):
"""
Calculate the number of glideins to submit.
@param idleslots: Number of idle startd's
@param idlejobs: Number of glideins in queue, but not active
@param idleuserjobs: Number of idle user jobs from FLOCK_FROM
@return: int - Number of glideins to submit
"""
# If we have already submitted enough glideins to fufill the request,
# don't submit more.
if max([idlejobs, idleslots]) >= idleuserjobs:
logging.debug("The number of idlejobs or idleslots fufills the requested idleuserjobs, not submitting any glideins")
return 0
status = ClusterStatus()
# Check that running glideins are reporting to the collector
running_glidein_jobs = status.GetRunningGlideinJobs()
logging.debug("Number of running_glidein_jobs = %i", running_glidein_jobs)
running_glideins = status.GetRunningGlideins()
logging.debug("Number of running glideins = %i", running_glideins)
if ((running_glidein_jobs * .9) > running_glideins):
logging.error("I'm guessing glideins are not reporting to the collector, not submitting")
return 0
# Ok, so now submit until we can't submit any more, or there are less user jobs
return min([int(self.config.get("general", "maxqueuedjobs")) - idlejobs, \
idleuserjobs,\
int(self.config.get("general", "MaxIdleGlideins")) - idleslots])
def SubmitGlideins(self, numSubmit):
"""
Submit numSubmit glideins.
@param numSubmit: The number of glideins to submit.
"""
# Substitute values in submit file
file = "share/glidein_jobs/job.submit.template"
# Submit jobs
for i in range(numSubmit):
self.SingleSubmit(file)
# Delete the submit file
def SingleSubmit(self, file):
"""
Submit a single glidein job
@param file: The file (string) to submit
"""
# TODO: These options should be moved to a better location
options = {"WN_TMP": self.config.get("general", "worker_tmp"), \
"GLIDEIN_HOST": self.condor_config.get("CONDOR_HOST"), \
"GLIDEIN_Site": self.GetClusterUnique()}
options_str = ""
for key in options.keys():
options_str += " -a %s=\"%s\"" % (key, options[key])
(stdout, stderr) = RunExternal("condor_submit %s %s" % (file, options_str))
logging.debug("stdout: %s" % stdout)
logging.debug("stderr: %s" % stderr)
def GetClusterUnique(self):
"""
@return: str - The unique identifier for each cluster. Assuming only 1 cluster for now.
"""
if self.config.has_option("general", "GLIDEIN_Site"):
return self.config.get("general", "GLIDEIN_Site")
else:
return self.condor_config.get("COLLECTOR_NAME")
Fixed bug in detecting idle glidein startd's
git-svn-id: 85696d106266b6d060c615e9cd4fc3a3fcc590a5@133 1fda6f16-4416-0410-bfd6-867a04880151
import ConfigParser
import sys
import time
import logging
import logging.handlers
import os
from campus_factory.ClusterStatus import ClusterStatus
from campus_factory.ClusterStatus import CondorConfig
from campus_factory.Parsers import RunExternal
from campus_factory.OfflineAds.OfflineAds import OfflineAds
class Factory:
"""
The main class of the factory. Designed to be run inside the condor scheduler.
@author: Derek Weitzel (dweitzel@cse.unl.edu)
"""
def __init__(self, options):
"""
Initialization function.
@param options: A set of options in the form of an options parser
Required options: config - location of configuration File
"""
self.options = options
def Intialize(self):
"""
Function to initialize the factory's variables such as configuration
and logging
"""
# Read in the configuration file
self.config_file = self.options.config
self.config = ConfigParser.ConfigParser()
files_read = self.config.read([self.config_file])
# check if no files read in
if len(files_read) < 1:
sys.stderr.write("No configuration files found. Location = %s\n" % self.config_file)
sys.exit(1)
self._SetLogging()
if self.config.get("general", "useoffline").lower() == "true":
self.UseOffline = True
else:
self.UseOffline = False
try:
self.condor_config = CondorConfig()
except EnvironmentError, inst:
logging.exception(str(inst))
raise inst
def _SetLogging(self):
"""
Setting the logging level and set the logging.
"""
logging_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
level = logging_levels.get(self.config.get("general", "loglevel"))
logdirectory = self.config.get("general", "logdirectory")
handler = logging.handlers.RotatingFileHandler(os.path.join(logdirectory, "campus_factory.log"),
maxBytes=10000000, backupCount=5)
root_logger = logging.getLogger()
root_logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def Restart(self):
status = ClusterStatus()
# Get the factory id
factoryID = status.GetFactoryID()
# Hold then release the factory in the queue
(stderr, stdout) = RunExternal("condor_hold %s" % factoryID)
print "Stderr = %s" % stderr.strip()
#print "Stdout = %s" % stdout.strip()
(stderr, stdout) = RunExternal("condor_release %s" % factoryID)
print "Stderr = %s" % stderr.strip()
#print "Stdout = %s" % stdout.strip()
def Stop(self):
status = ClusterStatus()
# Get the factory id
factoryID = status.GetFactoryID()
# Remove the factory job
(stderr, stdout) = RunExternal("condor_rm %s" % factoryID)
print "Stderr = %s" % stderr.strip()
def Start(self):
"""
Start the Factory
"""
self.Intialize()
status = ClusterStatus(status_constraint="IsUndefined(Offline)")
offline = OfflineAds()
# First, daemonize?
while 1:
logging.info("Starting iteration...")
if self.UseOffline:
toSubmit = offline.Update( [self.GetClusterUnique()] )
# Check for idle glideins (idle startd's)
idleslots = status.GetIdleGlideins()
if idleslots == None:
logging.info("Received None from idle glideins, going to try later")
self.SleepFactory()
continue
logging.debug("Idle glideins = %i" % idleslots)
if idleslots >= int(self.config.get("general", "MAXIDLEGLIDEINS")):
logging.info("Too many idle glideins")
self.SleepFactory()
continue
# Check for idle glidein jobs
idlejobs = status.GetIdleGlideinJobs()
if idlejobs == None:
logging.info("Received None from idle glidein jobs, going to try later")
self.SleepFactory()
continue
logging.debug("Queued jobs = %i" % idlejobs)
if idlejobs >= int(self.config.get("general", "maxqueuedjobs")):
logging.info("Too many queued jobs")
self.SleepFactory()
continue
# Get the offline ads to update.
if self.UseOffline:
num_submit = offline.GetDelinquentSites( [self.GetClusterUnique()] )
logging.debug("toSubmit from offline %s", str(toSubmit))
logging.debug("num_submit = %s\n", str(num_submit))
if (len(toSubmit) > 0) or num_submit[self.GetClusterUnique()]:
idleuserjobs = max([ num_submit[self.GetClusterUnique()], 5 ])
logging.debug("OFfline ads detected jobs should be submitted. Idle user jobs set to %i", idleuserjobs)
else:
logging.debug("Offline ads did not detect any matches or Delinquencies.")
idleuserjobs = 0
else:
# Check for idle jobs to flock from
if self.config.has_option("general", "FLOCK_FROM"):
schedds = self.config.get("general", "FLOCK_FROM").split(",")
logging.debug("Using FLOCK_FROM from the factory config.")
else:
schedds = self.condor_config.get('FLOCK_FROM').split(",")
logging.debug("Using FLOCK_FROM from the condor configuration")
logging.debug("Schedds to query: %s" % str(schedds))
idleuserjobs = status.GetIdleJobs(schedds)
if idleuserjobs == None:
logging.info("Received None from idle user jobs, going to try later")
self.SleepFactory()
continue
logging.debug("Idle jobs = %i" % idleuserjobs)
if idleuserjobs < 1:
logging.info("No idle jobs")
self.SleepFactory()
continue
# Determine how many glideins to submit
num_submit = self.GetNumSubmit(idleslots, idlejobs, idleuserjobs)
logging.info("Submitting %i glidein jobs", num_submit)
self.SubmitGlideins(num_submit)
self.SleepFactory()
def SleepFactory(self):
sleeptime = int(self.config.get("general", "iterationtime"))
logging.info("Sleeping for %i seconds" % sleeptime)
time.sleep(sleeptime)
def GetNumSubmit(self, idleslots, idlejobs, idleuserjobs):
"""
Calculate the number of glideins to submit.
@param idleslots: Number of idle startd's
@param idlejobs: Number of glideins in queue, but not active
@param idleuserjobs: Number of idle user jobs from FLOCK_FROM
@return: int - Number of glideins to submit
"""
# If we have already submitted enough glideins to fufill the request,
# don't submit more.
if max([idlejobs, idleslots]) >= idleuserjobs:
logging.debug("The number of idlejobs or idleslots fufills the requested idleuserjobs, not submitting any glideins")
return 0
status = ClusterStatus(status_constraint="IsUndefined(Offline)")
# Check that running glideins are reporting to the collector
running_glidein_jobs = status.GetRunningGlideinJobs()
logging.debug("Number of running_glidein_jobs = %i", running_glidein_jobs)
running_glideins = status.GetRunningGlideins()
logging.debug("Number of running glideins = %i", running_glideins)
if ((running_glidein_jobs * .9) > running_glideins):
logging.error("I'm guessing glideins are not reporting to the collector, not submitting")
return 0
# Ok, so now submit until we can't submit any more, or there are less user jobs
return min([int(self.config.get("general", "maxqueuedjobs")) - idlejobs, \
idleuserjobs,\
int(self.config.get("general", "MaxIdleGlideins")) - idleslots])
def SubmitGlideins(self, numSubmit):
"""
Submit numSubmit glideins.
@param numSubmit: The number of glideins to submit.
"""
# Substitute values in submit file
file = "share/glidein_jobs/job.submit.template"
# Submit jobs
for i in range(numSubmit):
self.SingleSubmit(file)
# Delete the submit file
def SingleSubmit(self, file):
"""
Submit a single glidein job
@param file: The file (string) to submit
"""
# TODO: These options should be moved to a better location
options = {"WN_TMP": self.config.get("general", "worker_tmp"), \
"GLIDEIN_HOST": self.condor_config.get("CONDOR_HOST"), \
"GLIDEIN_Site": self.GetClusterUnique()}
options_str = ""
for key in options.keys():
options_str += " -a %s=\"%s\"" % (key, options[key])
(stdout, stderr) = RunExternal("condor_submit %s %s" % (file, options_str))
logging.debug("stdout: %s" % stdout)
logging.debug("stderr: %s" % stderr)
def GetClusterUnique(self):
"""
@return: str - The unique identifier for each cluster. Assuming only 1 cluster for now.
"""
if self.config.has_option("general", "GLIDEIN_Site"):
return self.config.get("general", "GLIDEIN_Site")
else:
return self.condor_config.get("COLLECTOR_NAME")
|
from tests.utils import pool, storage
from tests.utils.wallet import create_and_open_wallet
from indy import ledger, wallet, signus
from indy.pool import close_pool_ledger
from indy.error import ErrorCode, IndyError
import json
import pytest
import logging
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(autouse=True)
def before_after_each():
storage.cleanup()
yield
storage.cleanup()
@pytest.fixture
async def wallet_handle():
handle = await create_and_open_wallet()
yield handle
await wallet.close_wallet(handle)
@pytest.fixture
async def pool_handle():
handle = await pool.create_and_open_pool_ledger("pool_1")
yield handle
await close_pool_ledger(handle)
@pytest.mark.asyncio
async def test_submit_request_works(pool_handle):
request = {
"reqId": 1491566332010860,
"identifier": "Th7MpTaRZVRYnPiabds81Y",
"operation": {
"type": "105",
"dest": "Th7MpTaRZVRYnPiabds81Y"
},
"signature": "4o86XfkiJ4e2r3J6Ufoi17UU3W5Zi9sshV6FjBjkVw4sgEQFQov9dxqDEtLbAJAWffCWd5KfAk164QVo7mYwKkiV"
}
expected_response = {
"result": {
"reqId": 1491566332010860,
"identifier": "Th7MpTaRZVRYnPiabds81Y",
"dest": "Th7MpTaRZVRYnPiabds81Y",
"data": "{\"dest\":\"Th7MpTaRZVRYnPiabds81Y\",\"identifier\":\"V4SGRU86Z58d6TV7PBUe6f\",\"role\":\"2\""
",\"verkey\":\"~7TYfekw4GUagBnBVCqPjiC\"}",
"type": "105",
},
"op": "REPLY"
}
response = json.loads((await ledger.submit_request(pool_handle, json.dumps(request))).decode())
assert response == expected_response
@pytest.mark.asyncio
async def test_submit_request_works_for_invalid_pool_handle(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
get_nym_request = await ledger.build_get_nym_request(my_did.decode(), my_did.decode())
invalid_pool_handle = pool_handle + 1
try:
await ledger.submit_request(invalid_pool_handle, get_nym_request.decode())
raise Exception("Failed")
except Exception as e:
assert type(IndyError(ErrorCode.PoolLedgerInvalidPoolHandle)) == type(e) and \
IndyError(ErrorCode.PoolLedgerInvalidPoolHandle).args == e.args
@pytest.mark.asyncio
async def test_nym_request_works_without_signature(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
nym_request = await ledger.build_nym_request(my_did.decode(), my_did.decode(), None, None, None)
try:
await ledger.submit_request(pool_handle, nym_request.decode())
raise Exception("Failed")
except Exception as e:
assert type(IndyError(ErrorCode.LedgerInvalidTransaction)) == type(e) and \
IndyError(ErrorCode.LedgerInvalidTransaction).args == e.args
@pytest.mark.asyncio
async def test_send_get_nym_request_works(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
get_nym_request = await ledger.build_get_nym_request(my_did.decode(), my_did.decode())
response = json.loads((await ledger.submit_request(pool_handle, get_nym_request.decode())).decode())
assert response['result']['data'] is not None
@pytest.mark.asyncio
async def test_nym_requests_works(pool_handle, wallet_handle):
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
(my_did, my_ver_key, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
nym_request = await ledger.build_nym_request(trustee_did.decode(), my_did.decode(), my_ver_key.decode(), None, None)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did.decode(), nym_request.decode())
get_nym_request = await ledger.build_get_nym_request(my_did.decode(), my_did.decode())
response = json.loads((await ledger.submit_request(pool_handle, get_nym_request.decode())).decode())
assert response['result']['data'] is not None
@pytest.mark.asyncio
async def test_attrib_request_works_without_signature(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
attrib_request = await ledger.build_attrib_request(my_did.decode(), my_did.decode(), None,
"{\"endpoint\":{\"ha\":\"127.0.0.1:5555\"}}", None)
try:
await ledger.submit_request(pool_handle, attrib_request.decode())
raise Exception("Failed")
except Exception as e:
assert type(IndyError(ErrorCode.LedgerInvalidTransaction)) == type(e) and \
IndyError(ErrorCode.LedgerInvalidTransaction).args == e.args
@pytest.mark.asyncio
async def test_attrib_requests_works(pool_handle, wallet_handle):
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
(my_did, my_ver_key, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
attrib_request = await ledger.build_attrib_request(my_did.decode(), my_did.decode(), None,
"{\"endpoint\":{\"ha\":\"127.0.0.1:5555\"}}", None)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, my_did.decode(), attrib_request.decode())
get_attrib_request = await ledger.build_get_attrib_request(my_did.decode(), my_did.decode(), "endpoint")
response = json.loads((await ledger.submit_request(pool_handle, get_attrib_request.decode())).decode())
assert response['result']['data'] is not None
added tests for schema_request
from tests.utils import pool, storage
from tests.utils.wallet import create_and_open_wallet
from indy import ledger, wallet, signus
from indy.pool import close_pool_ledger
from indy.error import ErrorCode, IndyError
import json
import pytest
import logging
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(autouse=True)
def before_after_each():
storage.cleanup()
yield
storage.cleanup()
@pytest.fixture
async def wallet_handle():
handle = await create_and_open_wallet()
yield handle
await wallet.close_wallet(handle)
@pytest.fixture
async def pool_handle():
handle = await pool.create_and_open_pool_ledger("pool_1")
yield handle
await close_pool_ledger(handle)
@pytest.mark.asyncio
async def test_submit_request_works(pool_handle):
request = {
"reqId": 1491566332010860,
"identifier": "Th7MpTaRZVRYnPiabds81Y",
"operation": {
"type": "105",
"dest": "Th7MpTaRZVRYnPiabds81Y"
},
"signature": "4o86XfkiJ4e2r3J6Ufoi17UU3W5Zi9sshV6FjBjkVw4sgEQFQov9dxqDEtLbAJAWffCWd5KfAk164QVo7mYwKkiV"
}
expected_response = {
"result": {
"reqId": 1491566332010860,
"identifier": "Th7MpTaRZVRYnPiabds81Y",
"dest": "Th7MpTaRZVRYnPiabds81Y",
"data": "{\"dest\":\"Th7MpTaRZVRYnPiabds81Y\",\"identifier\":\"V4SGRU86Z58d6TV7PBUe6f\",\"role\":\"2\""
",\"verkey\":\"~7TYfekw4GUagBnBVCqPjiC\"}",
"type": "105",
},
"op": "REPLY"
}
response = json.loads((await ledger.submit_request(pool_handle, json.dumps(request))).decode())
assert response == expected_response
@pytest.mark.asyncio
async def test_submit_request_works_for_invalid_pool_handle(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
get_nym_request = await ledger.build_get_nym_request(my_did.decode(), my_did.decode())
invalid_pool_handle = pool_handle + 1
try:
await ledger.submit_request(invalid_pool_handle, get_nym_request.decode())
raise Exception("Failed")
except Exception as e:
assert type(IndyError(ErrorCode.PoolLedgerInvalidPoolHandle)) == type(e) and \
IndyError(ErrorCode.PoolLedgerInvalidPoolHandle).args == e.args
@pytest.mark.asyncio
async def test_nym_request_works_without_signature(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
nym_request = await ledger.build_nym_request(my_did.decode(), my_did.decode(), None, None, None)
try:
await ledger.submit_request(pool_handle, nym_request.decode())
raise Exception("Failed")
except Exception as e:
assert type(IndyError(ErrorCode.LedgerInvalidTransaction)) == type(e) and \
IndyError(ErrorCode.LedgerInvalidTransaction).args == e.args
@pytest.mark.asyncio
async def test_send_get_nym_request_works(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
get_nym_request = await ledger.build_get_nym_request(my_did.decode(), my_did.decode())
response = json.loads((await ledger.submit_request(pool_handle, get_nym_request.decode())).decode())
assert response['result']['data'] is not None
@pytest.mark.asyncio
async def test_nym_requests_works(pool_handle, wallet_handle):
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
(my_did, my_ver_key, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
nym_request = await ledger.build_nym_request(trustee_did.decode(), my_did.decode(), my_ver_key.decode(), None, None)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did.decode(), nym_request.decode())
get_nym_request = await ledger.build_get_nym_request(my_did.decode(), my_did.decode())
response = json.loads((await ledger.submit_request(pool_handle, get_nym_request.decode())).decode())
assert response['result']['data'] is not None
@pytest.mark.asyncio
async def test_attrib_request_works_without_signature(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
attrib_request = await ledger.build_attrib_request(my_did.decode(), my_did.decode(), None,
"{\"endpoint\":{\"ha\":\"127.0.0.1:5555\"}}", None)
try:
await ledger.submit_request(pool_handle, attrib_request.decode())
raise Exception("Failed")
except Exception as e:
assert type(IndyError(ErrorCode.LedgerInvalidTransaction)) == type(e) and \
IndyError(ErrorCode.LedgerInvalidTransaction).args == e.args
@pytest.mark.asyncio
async def test_attrib_requests_works(pool_handle, wallet_handle):
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
(my_did, my_ver_key, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
attrib_request = await ledger.build_attrib_request(my_did.decode(), my_did.decode(), None,
"{\"endpoint\":{\"ha\":\"127.0.0.1:5555\"}}", None)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, my_did.decode(), attrib_request.decode())
get_attrib_request = await ledger.build_get_attrib_request(my_did.decode(), my_did.decode(), "endpoint")
response = json.loads((await ledger.submit_request(pool_handle, get_attrib_request.decode())).decode())
assert response['result']['data'] is not None
@pytest.mark.asyncio
async def test_schema_request_works_without_signature(pool_handle, wallet_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
schema_data = {
"name": "gvt2",
"version": "2.0",
"keys": ["name", "male"]
}
schema_request = await ledger.build_schema_request(my_did.decode(), json.dumps(schema_data))
try:
await ledger.submit_request(pool_handle, schema_request.decode())
raise Exception("Failed")
except Exception as e:
assert type(IndyError(ErrorCode.LedgerInvalidTransaction)) == type(e) and \
IndyError(ErrorCode.LedgerInvalidTransaction).args == e.args
@pytest.mark.asyncio
async def test_schema_requests_works(pool_handle, wallet_handle):
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
(my_did, my_ver_key, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"00000000000000000000000000000My1"}')
schema_data = {
"name": "gvt2",
"version": "2.0",
"keys": ["name", "male"]
}
schema_request = await ledger.build_schema_request(my_did.decode(), json.dumps(schema_data))
await ledger.sign_and_submit_request(pool_handle, wallet_handle, my_did.decode(), schema_request.decode())
get_schema_data = {
"name": "gvt2",
"version": "2.0"
}
get_schema_request = await ledger.build_get_schema_request(my_did.decode(), my_did.decode(), json.dumps(get_schema_data))
response = json.loads((await ledger.submit_request(pool_handle, get_schema_request.decode())).decode())
assert response['result']['data'] is not None
|
#!/usr/bin/env python3
# coding=utf-8
import sys
import urllib.request
from utils import Fore, parse_image_arg, chunked_copy, clear_progress, handle_sigint, ensure_ca_load
# handle arguments
handle_sigint()
ensure_ca_load()
if len(sys.argv) < 2:
print('usage: ./get-source.py image[:tag]')
sys.exit(-1)
image, tag, fname, label = parse_image_arg(sys.argv[1], False)
dfurl = ''
tgurl = ''
# find the Dockerfile for the specified image and tag
print('%s[*]%s Fetching official-images info for %s%s%s:%s%s%s...' % (Fore.GREEN, Fore.RESET, Fore.YELLOW, image, Fore.RESET, Fore.YELLOW, tag, Fore.RESET))
try:
with urllib.request.urlopen('https://raw.githubusercontent.com/docker-library/official-images/master/library/' + image) as f:
data = f.read().decode('utf-8').splitlines() + ['']
# there seems to be two versions for this file:
# a) simplistic one-line per tag:
# latest: git://github.com/oracle/docker-images.git@a44844fe085a561ded44865eafb63f742e4250c1 OracleLinux/7.2
# b) key-values spanning over multiple lines:
# GitRepo: https://github.com/CentOS/sig-cloud-instance-images.git
# Directory: docker
# Tags: latest, centos7, 7
# GitFetch: refs/heads/CentOS-7
# GitCommit: f5b919346432acc728078aa32ffb6dcf84d303a0
# try a) first
for line in data:
if line.startswith(tag + ': '):
# extract the parts
line = line.split(': ', 1)
line = line[1].split(' ', 1)
path = line[1]
line = line[0].split('@', 1)
repo = line[0]
commit = line[1]
repo = repo[repo.find('github.com/') + len('github.com/'):]
if repo.find('.git') != -1:
repo = repo[:repo.find('.git')]
if len(path) != 0:
path = '/' + path
# build direct URL to Dockerfile
dfurl = 'https://raw.githubusercontent.com/%s/%s%s/Dockerfile' % (repo, commit, path)
break
# try b) second
if not dfurl:
repo = ''
path = ''
commit = ''
isTag = False
for line in data:
if line == '':
# tags are separated by double new lines and we need to wait for all values
# before building the direct URL
if isTag and repo and commit:
dfurl = 'https://raw.githubusercontent.com/%s/%s%s/Dockerfile' % (repo, commit, path)
break
else:
continue
line = line.split(': ', 1)
# collect key-values
if line[0] == 'GitRepo':
repo = line[1]
repo = repo[repo.find('github.com/') + len('github.com/') : repo.find('.git')]
elif line[0] == 'Tags':
tags = line[1].split(', ')
isTag = tag in tags
elif line[0] == 'GitCommit':
commit = line[1]
elif line[0] == 'Directory':
path = '/' + line[1]
# otherwise, fail miserably
if not dfurl:
print('%s[!]%s Failed to find tag %s%s%s for image %s%s%s.' % (Fore.RED, Fore.RESET, Fore.BLUE, tag, Fore.RESET, Fore.BLUE, image, Fore.RESET))
sys.exit(-1)
except urllib.error.HTTPError as err:
print('%s[!]%s Failed to fetch official-images info for %s%s%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, image, Fore.RESET, err))
print('%s[!]%s If this is not an official image, try getting it with %sget-prebuilt.py %s%s.' % (Fore.RED, Fore.RESET, Fore.GREEN, sys.argv[1].strip(), Fore.RESET))
sys.exit(-1)
# process Dockerfile
print('%s[*]%s Fetching Dockerfile from repo %s%s%s...' % (Fore.GREEN, Fore.RESET, Fore.BLUE, dfurl[dfurl.find('.com/') + len('.com/') : dfurl.find('/Dockerfile')], Fore.RESET))
try:
with urllib.request.urlopen(dfurl) as f:
data = f.read().decode('utf-8').splitlines()
for line in data:
line = line.split(' ')
# we are only interested in rootfs archives, generally specified like so:
# ADD oraclelinux-7.2-rootfs.tar.xz /
if line[0].lower() == 'add' and line[2] == '/':
tgurl = dfurl[:dfurl.rfind('/Dockerfile') + 1] + line[1]
fname += line[1][line[1].find('.tar'):]
# otherwise, fail miserably
if not tgurl:
print('%s[!]%s Failed to find a suitable rootfs specification in Dockerfile.' % (Fore.RED, Fore.RESET))
sys.exit(-1)
except urllib.error.HTTPError as err:
print('%s[!]%s Failed to fetch Dockerfile from %s%s%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, dfurl, Fore.RESET, err))
sys.exit(-1)
# download rootfs archive
print('%s[*]%s Downloading archive %s%s%s...' % (Fore.GREEN, Fore.RESET, Fore.BLUE, tgurl, Fore.RESET))
try:
with urllib.request.urlopen(tgurl) as u, open(fname, 'wb') as f:
chunked_copy(fname, u, f)
except urllib.error.HTTPError as err:
clear_progress()
print('%s[!]%s Failed to download archive from %s%s%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, tgurl, Fore.RESET, err))
sys.exit(-1)
except OSError as err:
clear_progress()
print('%s[!]%s Failed to open file %s%s%s for writing: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, fname, Fore.RESET, err))
sys.exit(-1)
print('%s[*]%s Rootfs archive for %s%s%s:%s%s%s saved to %s%s%s.' % (Fore.GREEN, Fore.RESET, Fore.YELLOW, image, Fore.RESET, Fore.YELLOW, tag, Fore.RESET, Fore.GREEN, fname, Fore.RESET))
Add support for platform-specific keys in get-source.
#!/usr/bin/env python3
# coding=utf-8
import sys
import urllib.request
from utils import Fore, parse_image_arg, chunked_copy, clear_progress, handle_sigint, ensure_ca_load
# handle arguments
handle_sigint()
ensure_ca_load()
if len(sys.argv) < 2:
print('usage: ./get-source.py image[:tag]')
sys.exit(-1)
image, tag, fname, label = parse_image_arg(sys.argv[1], False)
dfurl = ''
tgurl = ''
# find the Dockerfile for the specified image and tag
print('%s[*]%s Fetching official-images info for %s%s%s:%s%s%s...' % (Fore.GREEN, Fore.RESET, Fore.YELLOW, image, Fore.RESET, Fore.YELLOW, tag, Fore.RESET))
try:
with urllib.request.urlopen('https://raw.githubusercontent.com/docker-library/official-images/master/library/' + image) as f:
data = f.read().decode('utf-8').splitlines() + ['']
# there seems to be two versions for this file:
# a) simplistic one-line per tag:
# latest: git://github.com/oracle/docker-images.git@a44844fe085a561ded44865eafb63f742e4250c1 OracleLinux/7.2
# b) key-values spanning over multiple lines:
# GitRepo: https://github.com/CentOS/sig-cloud-instance-images.git
# Directory: docker
# Tags: latest, centos7, 7
# GitFetch: refs/heads/CentOS-7
# GitCommit: f5b919346432acc728078aa32ffb6dcf84d303a0
# try a) first
for line in data:
if line.startswith(tag + ': '):
# extract the parts
line = line.split(': ', 1)
line = line[1].split(' ', 1)
path = line[1]
line = line[0].split('@', 1)
repo = line[0]
commit = line[1]
repo = repo[repo.find('github.com/') + len('github.com/'):]
if repo.find('.git') != -1:
repo = repo[:repo.find('.git')]
if len(path) != 0:
path = '/' + path
# build direct URL to Dockerfile
dfurl = 'https://raw.githubusercontent.com/%s/%s%s/Dockerfile' % (repo, commit, path)
break
# try b) second
if not dfurl:
repo = ''
path = ''
commit = ''
isTag = False
for line in data:
if line == '':
# tags are separated by double new lines and we need to wait for all values
# before building the direct URL
if isTag and repo and commit:
dfurl = 'https://raw.githubusercontent.com/%s/%s%s/Dockerfile' % (repo, commit, path)
break
else:
continue
line = line.split(': ', 1)
# collect key-values
if line[0] == 'GitRepo':
repo = line[1]
repo = repo[repo.find('github.com/') + len('github.com/') : repo.find('.git')]
elif line[0] == 'Tags':
tags = line[1].split(', ')
isTag = tag in tags
elif line[0] == 'amd64-GitCommit':
commit = line[1]
elif line[0] == 'GitCommit':
if not commit:
commit = line[1]
elif line[0] == 'amd64-Directory':
path = '/' + line[1].strip('/')
elif line[0] == 'Directory':
if not path:
path = '/' + line[1].strip('/')
# otherwise, fail miserably
if not dfurl:
print('%s[!]%s Failed to find tag %s%s%s for image %s%s%s.' % (Fore.RED, Fore.RESET, Fore.BLUE, tag, Fore.RESET, Fore.BLUE, image, Fore.RESET))
sys.exit(-1)
except urllib.error.HTTPError as err:
print('%s[!]%s Failed to fetch official-images info for %s%s%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, image, Fore.RESET, err))
print('%s[!]%s If this is not an official image, try getting it with %sget-prebuilt.py %s%s.' % (Fore.RED, Fore.RESET, Fore.GREEN, sys.argv[1].strip(), Fore.RESET))
sys.exit(-1)
# process Dockerfile
print('%s[*]%s Fetching Dockerfile from repo %s%s%s...' % (Fore.GREEN, Fore.RESET, Fore.BLUE, dfurl[dfurl.find('.com/') + len('.com/') : dfurl.find('/Dockerfile')], Fore.RESET))
try:
with urllib.request.urlopen(dfurl) as f:
data = f.read().decode('utf-8').splitlines()
for line in data:
line = line.split(' ')
# we are only interested in rootfs archives, generally specified like so:
# ADD oraclelinux-7.2-rootfs.tar.xz /
if line[0].lower() == 'add' and line[2] == '/':
tgurl = dfurl[:dfurl.rfind('/Dockerfile') + 1] + line[1]
fname += line[1][line[1].find('.tar'):]
# otherwise, fail miserably
if not tgurl:
print('%s[!]%s Failed to find a suitable rootfs specification in Dockerfile.' % (Fore.RED, Fore.RESET))
sys.exit(-1)
except urllib.error.HTTPError as err:
print('%s[!]%s Failed to fetch Dockerfile from %s%s%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, dfurl, Fore.RESET, err))
sys.exit(-1)
# download rootfs archive
print('%s[*]%s Downloading archive %s%s%s...' % (Fore.GREEN, Fore.RESET, Fore.BLUE, tgurl, Fore.RESET))
try:
with urllib.request.urlopen(tgurl) as u, open(fname, 'wb') as f:
chunked_copy(fname, u, f)
except urllib.error.HTTPError as err:
clear_progress()
print('%s[!]%s Failed to download archive from %s%s%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, tgurl, Fore.RESET, err))
sys.exit(-1)
except OSError as err:
clear_progress()
print('%s[!]%s Failed to open file %s%s%s for writing: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, fname, Fore.RESET, err))
sys.exit(-1)
print('%s[*]%s Rootfs archive for %s%s%s:%s%s%s saved to %s%s%s.' % (Fore.GREEN, Fore.RESET, Fore.YELLOW, image, Fore.RESET, Fore.YELLOW, tag, Fore.RESET, Fore.GREEN, fname, Fore.RESET))
|
"""Output streaming, processing and formatting.
"""
import json
from functools import partial
from itertools import chain
import pygments
from pygments import token, lexer
from pygments.styles import get_style_by_name, STYLE_MAP
from pygments.lexers import get_lexer_for_mimetype, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.util import ClassNotFound
from requests.compat import is_windows
from .solarized import Solarized256Style
from .models import HTTPRequest, HTTPResponse, Environment
from .input import (OUT_REQ_BODY, OUT_REQ_HEAD,
OUT_RESP_HEAD, OUT_RESP_BODY)
# Colors on Windows via colorama don't look that
# great and fruity seems to give the best result there.
AVAILABLE_STYLES = set(STYLE_MAP.keys())
AVAILABLE_STYLES.add('solarized')
DEFAULT_STYLE = 'solarized' if not is_windows else 'fruity'
BINARY_SUPPRESSED_NOTICE = (
b'\n'
b'+-----------------------------------------+\n'
b'| NOTE: binary data not shown in terminal |\n'
b'+-----------------------------------------+'
)
class BinarySuppressedError(Exception):
"""An error indicating that the body is binary and won't be written,
e.g., for terminal output)."""
message = BINARY_SUPPRESSED_NOTICE
###############################################################################
# Output Streams
###############################################################################
def write(stream, outfile, flush):
"""Write the output stream."""
try:
# Writing bytes so we use the buffer interface (Python 3).
buf = outfile.buffer
except AttributeError:
buf = outfile
for chunk in stream:
buf.write(chunk)
if flush:
outfile.flush()
def write_with_colors_win_p3k(stream, outfile, flush):
"""Like `write`, but colorized chunks are written as text
directly to `outfile` to ensure it gets processed by colorama.
Applies only to Windows with Python 3 and colorized terminal output.
"""
color = b'\x1b['
encoding = outfile.encoding
for chunk in stream:
if color in chunk:
outfile.write(chunk.decode(encoding))
else:
outfile.buffer.write(chunk)
if flush:
outfile.flush()
def output_stream(args, env, request, response):
"""Build and return a chain of iterators over the `request`-`response`
exchange each of which yields `bytes` chunks.
"""
Stream = make_stream(env, args)
req_h = OUT_REQ_HEAD in args.output_options
req_b = OUT_REQ_BODY in args.output_options
resp_h = OUT_RESP_HEAD in args.output_options
resp_b = OUT_RESP_BODY in args.output_options
req = req_h or req_b
resp = resp_h or resp_b
output = []
if req:
output.append(Stream(
msg=HTTPRequest(request),
with_headers=req_h,
with_body=req_b))
if req_b and resp:
# Request/Response separator.
output.append([b'\n\n'])
if resp:
output.append(Stream(
msg=HTTPResponse(response),
with_headers=resp_h,
with_body=resp_b))
if env.stdout_isatty and resp_b:
# Ensure a blank line after the response body.
# For terminal output only.
output.append([b'\n\n'])
return chain(*output)
def make_stream(env, args):
"""Pick the right stream type based on `env` and `args`.
Wrap it in a partial with the type-specific args so that
we don't need to think what stream we are dealing with.
"""
if not env.stdout_isatty and not args.prettify:
Stream = partial(
RawStream,
chunk_size=RawStream.CHUNK_SIZE_BY_LINE
if args.stream
else RawStream.CHUNK_SIZE
)
elif args.prettify:
Stream = partial(
PrettyStream if args.stream else BufferedPrettyStream,
env=env,
processor=OutputProcessor(
env=env, groups=args.prettify, pygments_style=args.style),
)
else:
Stream = partial(EncodedStream, env=env)
return Stream
class BaseStream(object):
"""Base HTTP message stream class."""
def __init__(self, msg, with_headers=True, with_body=True):
"""
:param msg: a :class:`models.HTTPMessage` subclass
:param with_headers: if `True`, headers will be included
:param with_body: if `True`, body will be included
"""
self.msg = msg
self.with_headers = with_headers
self.with_body = with_body
def _headers(self):
"""Return the headers' bytes."""
return self.msg.headers.encode('ascii')
def _body(self):
"""Return an iterator over the message body."""
raise NotImplementedError()
def __iter__(self):
"""Return an iterator over `self.msg`."""
if self.with_headers:
yield self._headers()
yield b'\r\n\r\n'
if self.with_body:
try:
for chunk in self._body():
yield chunk
except BinarySuppressedError as e:
if self.with_headers:
yield b'\n'
yield e.message
class RawStream(BaseStream):
"""The message is streamed in chunks with no processing."""
CHUNK_SIZE = 1024 * 100
CHUNK_SIZE_BY_LINE = 1024 * 5
def __init__(self, chunk_size=CHUNK_SIZE, **kwargs):
super(RawStream, self).__init__(**kwargs)
self.chunk_size = chunk_size
def _body(self):
return self.msg.iter_body(self.chunk_size)
class EncodedStream(BaseStream):
"""Encoded HTTP message stream.
The message bytes are converted to an encoding suitable for
`self.env.stdout`. Unicode errors are replaced and binary data
is suppressed. The body is always streamed by line.
"""
CHUNK_SIZE = 1024 * 5
def __init__(self, env=Environment(), **kwargs):
super(EncodedStream, self).__init__(**kwargs)
if env.stdout_isatty:
# Use the encoding supported by the terminal.
output_encoding = getattr(env.stdout, 'encoding', None)
else:
# Preserve the message encoding.
output_encoding = self.msg.encoding
# Default to utf8 when unsure.
self.output_encoding = output_encoding or 'utf8'
def _body(self):
for line, lf in self.msg.iter_lines(self.CHUNK_SIZE):
if b'\0' in line:
raise BinarySuppressedError()
yield line.decode(self.msg.encoding)\
.encode(self.output_encoding, 'replace') + lf
class PrettyStream(EncodedStream):
"""In addition to :class:`EncodedStream` behaviour, this stream applies
content processing.
Useful for long-lived HTTP responses that stream by lines
such as the Twitter streaming API.
"""
CHUNK_SIZE = 1024 * 5
def __init__(self, processor, **kwargs):
super(PrettyStream, self).__init__(**kwargs)
self.processor = processor
def _headers(self):
return self.processor.process_headers(
self.msg.headers).encode(self.output_encoding)
def _body(self):
for line, lf in self.msg.iter_lines(self.CHUNK_SIZE):
if b'\0' in line:
raise BinarySuppressedError()
yield self._process_body(line) + lf
def _process_body(self, chunk):
return (self.processor
.process_body(
chunk.decode(self.msg.encoding, 'replace'),
self.msg.content_type)
.encode(self.output_encoding, 'replace'))
class BufferedPrettyStream(PrettyStream):
"""The same as :class:`PrettyStream` except that the body is fully
fetched before it's processed.
Suitable regular HTTP responses.
"""
CHUNK_SIZE = 1024 * 10
def _body(self):
#noinspection PyArgumentList
# Read the whole body before prettifying it,
# but bail out immediately if the body is binary.
body = bytearray()
for chunk in self.msg.iter_body(self.CHUNK_SIZE):
if b'\0' in chunk:
raise BinarySuppressedError()
body.extend(chunk)
yield self._process_body(body)
###############################################################################
# Processing
###############################################################################
class HTTPLexer(lexer.RegexLexer):
"""Simplified HTTP lexer for Pygments.
It only operates on headers and provides a stronger contrast between
their names and values than the original one bundled with Pygments
(:class:`pygments.lexers.text import HttpLexer`), especially when
Solarized color scheme is used.
"""
name = 'HTTP'
aliases = ['http']
filenames = ['*.http']
tokens = {
'root': [
# Request-Line
(r'([A-Z]+)( +)([^ ]+)( +)(HTTP)(/)(\d+\.\d+)',
lexer.bygroups(
token.Name.Function,
token.Text,
token.Name.Namespace,
token.Text,
token.Keyword.Reserved,
token.Operator,
token.Number
)),
# Response Status-Line
(r'(HTTP)(/)(\d+\.\d+)( +)(\d{3})( +)(.+)',
lexer.bygroups(
token.Keyword.Reserved, # 'HTTP'
token.Operator, # '/'
token.Number, # Version
token.Text,
token.Number, # Status code
token.Text,
token.Name.Exception, # Reason
)),
# Header
(r'(.*?)( *)(:)( *)(.+)', lexer.bygroups(
token.Name.Attribute, # Name
token.Text,
token.Operator, # Colon
token.Text,
token.String # Value
))
]
}
class BaseProcessor(object):
"""Base, noop output processor class."""
enabled = True
def __init__(self, env=Environment(), **kwargs):
"""
:param env: an class:`Environment` instance
:param kwargs: additional keyword argument that some
processor might require.
"""
self.env = env
self.kwargs = kwargs
def process_headers(self, headers):
"""Return processed `headers`
:param headers: The headers as text.
"""
return headers
def process_body(self, content, content_type, subtype):
"""Return processed `content`.
:param content: The body content as text
:param content_type: Full content type, e.g., 'application/atom+xml'.
:param subtype: E.g. 'xml'.
"""
return content
class JSONProcessor(BaseProcessor):
"""JSON body processor."""
def process_body(self, content, content_type, subtype):
if subtype == 'json':
try:
# Indent the JSON data, sort keys by name, and
# avoid unicode escapes to improve readability.
content = json.dumps(json.loads(content),
sort_keys=True,
ensure_ascii=False,
indent=4)
except ValueError:
# Invalid JSON but we don't care.
pass
return content
class PygmentsProcessor(BaseProcessor):
"""A processor that applies syntax-highlighting using Pygments
to the headers, and to the body as well if its content type is recognized.
"""
def __init__(self, *args, **kwargs):
super(PygmentsProcessor, self).__init__(*args, **kwargs)
# Cache that speeds up when we process streamed body by line.
self.lexers_by_type = {}
if not self.env.colors:
self.enabled = False
return
try:
style = get_style_by_name(
self.kwargs.get('pygments_style', DEFAULT_STYLE))
except ClassNotFound:
style = Solarized256Style
if self.env.is_windows or self.env.colors == 256:
fmt_class = Terminal256Formatter
else:
fmt_class = TerminalFormatter
self.formatter = fmt_class(style=style)
def process_headers(self, headers):
return pygments.highlight(
headers, HTTPLexer(), self.formatter).strip()
def process_body(self, content, content_type, subtype):
try:
lexer = self.lexers_by_type.get(content_type)
if not lexer:
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
lexer = get_lexer_by_name(subtype)
self.lexers_by_type[content_type] = lexer
except ClassNotFound:
pass
else:
content = pygments.highlight(content, lexer, self.formatter)
return content.strip()
class HeadersProcessor(BaseProcessor):
"""Sorts headers by name retaining relative order of multiple headers
with the same name.
"""
def process_headers(self, headers):
lines = headers.splitlines()
headers = sorted(lines[1:], key=lambda h: h.split(':')[0])
return '\r\n'.join(lines[:1] + headers)
class OutputProcessor(object):
"""A delegate class that invokes the actual processors."""
installed_processors = {
'format': [
HeadersProcessor,
JSONProcessor
],
'colors': [
PygmentsProcessor
]
}
def __init__(self, groups, env=Environment(), **kwargs):
"""
:param env: a :class:`models.Environment` instance
:param groups: the groups of processors to be applied
:param kwargs: additional keyword arguments for processors
"""
self.processors = []
for group in groups:
for cls in self.installed_processors[group]:
processor = cls(env, **kwargs)
if processor.enabled:
self.processors.append(processor)
def process_headers(self, headers):
for processor in self.processors:
headers = processor.process_headers(headers)
return headers
def process_body(self, content, content_type):
# e.g., 'application/atom+xml'
content_type = content_type.split(';')[0]
# e.g., 'xml'
subtype = content_type.split('/')[-1].split('+')[-1]
for processor in self.processors:
content = processor.process_body(content, content_type, subtype)
return content
Output stream refactoring.
"""Output streaming, processing and formatting.
"""
import json
from functools import partial
from itertools import chain
import pygments
from pygments import token, lexer
from pygments.styles import get_style_by_name, STYLE_MAP
from pygments.lexers import get_lexer_for_mimetype, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.util import ClassNotFound
from requests.compat import is_windows
from .solarized import Solarized256Style
from .models import HTTPRequest, HTTPResponse, Environment
from .input import (OUT_REQ_BODY, OUT_REQ_HEAD,
OUT_RESP_HEAD, OUT_RESP_BODY)
# Colors on Windows via colorama don't look that
# great and fruity seems to give the best result there.
AVAILABLE_STYLES = set(STYLE_MAP.keys())
AVAILABLE_STYLES.add('solarized')
DEFAULT_STYLE = 'solarized' if not is_windows else 'fruity'
BINARY_SUPPRESSED_NOTICE = (
b'\n'
b'+-----------------------------------------+\n'
b'| NOTE: binary data not shown in terminal |\n'
b'+-----------------------------------------+'
)
class BinarySuppressedError(Exception):
"""An error indicating that the body is binary and won't be written,
e.g., for terminal output)."""
message = BINARY_SUPPRESSED_NOTICE
###############################################################################
# Output Streams
###############################################################################
def write(stream, outfile, flush):
"""Write the output stream."""
try:
# Writing bytes so we use the buffer interface (Python 3).
buf = outfile.buffer
except AttributeError:
buf = outfile
for chunk in stream:
buf.write(chunk)
if flush:
outfile.flush()
def write_with_colors_win_p3k(stream, outfile, flush):
"""Like `write`, but colorized chunks are written as text
directly to `outfile` to ensure it gets processed by colorama.
Applies only to Windows with Python 3 and colorized terminal output.
"""
color = b'\x1b['
encoding = outfile.encoding
for chunk in stream:
if color in chunk:
outfile.write(chunk.decode(encoding))
else:
outfile.buffer.write(chunk)
if flush:
outfile.flush()
def output_stream(args, env, request, response):
"""Build and return a chain of iterators over the `request`-`response`
exchange each of which yields `bytes` chunks.
"""
Stream = make_stream(env, args)
req_h = OUT_REQ_HEAD in args.output_options
req_b = OUT_REQ_BODY in args.output_options
resp_h = OUT_RESP_HEAD in args.output_options
resp_b = OUT_RESP_BODY in args.output_options
req = req_h or req_b
resp = resp_h or resp_b
output = []
if req:
output.append(Stream(
msg=HTTPRequest(request),
with_headers=req_h,
with_body=req_b))
if req_b and resp:
# Request/Response separator.
output.append([b'\n\n'])
if resp:
output.append(Stream(
msg=HTTPResponse(response),
with_headers=resp_h,
with_body=resp_b))
if env.stdout_isatty and resp_b:
# Ensure a blank line after the response body.
# For terminal output only.
output.append([b'\n\n'])
return chain(*output)
def make_stream(env, args):
"""Pick the right stream type based on `env` and `args`.
Wrap it in a partial with the type-specific args so that
we don't need to think what stream we are dealing with.
"""
if not env.stdout_isatty and not args.prettify:
Stream = partial(
RawStream,
chunk_size=RawStream.CHUNK_SIZE_BY_LINE
if args.stream
else RawStream.CHUNK_SIZE
)
elif args.prettify:
Stream = partial(
PrettyStream if args.stream else BufferedPrettyStream,
env=env,
processor=OutputProcessor(
env=env, groups=args.prettify, pygments_style=args.style),
)
else:
Stream = partial(EncodedStream, env=env)
return Stream
class BaseStream(object):
"""Base HTTP message stream class."""
def __init__(self, msg, with_headers=True, with_body=True):
"""
:param msg: a :class:`models.HTTPMessage` subclass
:param with_headers: if `True`, headers will be included
:param with_body: if `True`, body will be included
"""
self.msg = msg
self.with_headers = with_headers
self.with_body = with_body
def _get_headers(self):
"""Return the headers' bytes."""
return self.msg.headers.encode('ascii')
def _iter_body(self):
"""Return an iterator over the message body."""
raise NotImplementedError()
def __iter__(self):
"""Return an iterator over `self.msg`."""
if self.with_headers:
yield self._get_headers()
yield b'\r\n\r\n'
if self.with_body:
try:
for chunk in self._iter_body():
yield chunk
except BinarySuppressedError as e:
if self.with_headers:
yield b'\n'
yield e.message
class RawStream(BaseStream):
"""The message is streamed in chunks with no processing."""
CHUNK_SIZE = 1024 * 100
CHUNK_SIZE_BY_LINE = 1024 * 5
def __init__(self, chunk_size=CHUNK_SIZE, **kwargs):
super(RawStream, self).__init__(**kwargs)
self.chunk_size = chunk_size
def _iter_body(self):
return self.msg.iter_body(self.chunk_size)
class EncodedStream(BaseStream):
"""Encoded HTTP message stream.
The message bytes are converted to an encoding suitable for
`self.env.stdout`. Unicode errors are replaced and binary data
is suppressed. The body is always streamed by line.
"""
CHUNK_SIZE = 1024 * 5
def __init__(self, env=Environment(), **kwargs):
super(EncodedStream, self).__init__(**kwargs)
if env.stdout_isatty:
# Use the encoding supported by the terminal.
output_encoding = getattr(env.stdout, 'encoding', None)
else:
# Preserve the message encoding.
output_encoding = self.msg.encoding
# Default to utf8 when unsure.
self.output_encoding = output_encoding or 'utf8'
def _iter_body(self):
for line, lf in self.msg.iter_lines(self.CHUNK_SIZE):
if b'\0' in line:
raise BinarySuppressedError()
yield line.decode(self.msg.encoding)\
.encode(self.output_encoding, 'replace') + lf
class PrettyStream(EncodedStream):
"""In addition to :class:`EncodedStream` behaviour, this stream applies
content processing.
Useful for long-lived HTTP responses that stream by lines
such as the Twitter streaming API.
"""
CHUNK_SIZE = 1024 * 5
def __init__(self, processor, **kwargs):
super(PrettyStream, self).__init__(**kwargs)
self.processor = processor
def _get_headers(self):
return self.processor.process_headers(
self.msg.headers).encode(self.output_encoding)
def _iter_body(self):
for line, lf in self.msg.iter_lines(self.CHUNK_SIZE):
if b'\0' in line:
raise BinarySuppressedError()
yield self._process_body(line) + lf
def _process_body(self, chunk):
return (self.processor
.process_body(
chunk.decode(self.msg.encoding, 'replace'),
self.msg.content_type)
.encode(self.output_encoding, 'replace'))
class BufferedPrettyStream(PrettyStream):
"""The same as :class:`PrettyStream` except that the body is fully
fetched before it's processed.
Suitable regular HTTP responses.
"""
CHUNK_SIZE = 1024 * 10
def _iter_body(self):
#noinspection PyArgumentList
# Read the whole body before prettifying it,
# but bail out immediately if the body is binary.
body = bytearray()
for chunk in self.msg.iter_body(self.CHUNK_SIZE):
if b'\0' in chunk:
raise BinarySuppressedError()
body.extend(chunk)
yield self._process_body(body)
###############################################################################
# Processing
###############################################################################
class HTTPLexer(lexer.RegexLexer):
"""Simplified HTTP lexer for Pygments.
It only operates on headers and provides a stronger contrast between
their names and values than the original one bundled with Pygments
(:class:`pygments.lexers.text import HttpLexer`), especially when
Solarized color scheme is used.
"""
name = 'HTTP'
aliases = ['http']
filenames = ['*.http']
tokens = {
'root': [
# Request-Line
(r'([A-Z]+)( +)([^ ]+)( +)(HTTP)(/)(\d+\.\d+)',
lexer.bygroups(
token.Name.Function,
token.Text,
token.Name.Namespace,
token.Text,
token.Keyword.Reserved,
token.Operator,
token.Number
)),
# Response Status-Line
(r'(HTTP)(/)(\d+\.\d+)( +)(\d{3})( +)(.+)',
lexer.bygroups(
token.Keyword.Reserved, # 'HTTP'
token.Operator, # '/'
token.Number, # Version
token.Text,
token.Number, # Status code
token.Text,
token.Name.Exception, # Reason
)),
# Header
(r'(.*?)( *)(:)( *)(.+)', lexer.bygroups(
token.Name.Attribute, # Name
token.Text,
token.Operator, # Colon
token.Text,
token.String # Value
))
]
}
class BaseProcessor(object):
"""Base, noop output processor class."""
enabled = True
def __init__(self, env=Environment(), **kwargs):
"""
:param env: an class:`Environment` instance
:param kwargs: additional keyword argument that some
processor might require.
"""
self.env = env
self.kwargs = kwargs
def process_headers(self, headers):
"""Return processed `headers`
:param headers: The headers as text.
"""
return headers
def process_body(self, content, content_type, subtype):
"""Return processed `content`.
:param content: The body content as text
:param content_type: Full content type, e.g., 'application/atom+xml'.
:param subtype: E.g. 'xml'.
"""
return content
class JSONProcessor(BaseProcessor):
"""JSON body processor."""
def process_body(self, content, content_type, subtype):
if subtype == 'json':
try:
# Indent the JSON data, sort keys by name, and
# avoid unicode escapes to improve readability.
content = json.dumps(json.loads(content),
sort_keys=True,
ensure_ascii=False,
indent=4)
except ValueError:
# Invalid JSON but we don't care.
pass
return content
class PygmentsProcessor(BaseProcessor):
"""A processor that applies syntax-highlighting using Pygments
to the headers, and to the body as well if its content type is recognized.
"""
def __init__(self, *args, **kwargs):
super(PygmentsProcessor, self).__init__(*args, **kwargs)
# Cache that speeds up when we process streamed body by line.
self.lexers_by_type = {}
if not self.env.colors:
self.enabled = False
return
try:
style = get_style_by_name(
self.kwargs.get('pygments_style', DEFAULT_STYLE))
except ClassNotFound:
style = Solarized256Style
if self.env.is_windows or self.env.colors == 256:
fmt_class = Terminal256Formatter
else:
fmt_class = TerminalFormatter
self.formatter = fmt_class(style=style)
def process_headers(self, headers):
return pygments.highlight(
headers, HTTPLexer(), self.formatter).strip()
def process_body(self, content, content_type, subtype):
try:
lexer = self.lexers_by_type.get(content_type)
if not lexer:
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
lexer = get_lexer_by_name(subtype)
self.lexers_by_type[content_type] = lexer
except ClassNotFound:
pass
else:
content = pygments.highlight(content, lexer, self.formatter)
return content.strip()
class HeadersProcessor(BaseProcessor):
"""Sorts headers by name retaining relative order of multiple headers
with the same name.
"""
def process_headers(self, headers):
lines = headers.splitlines()
headers = sorted(lines[1:], key=lambda h: h.split(':')[0])
return '\r\n'.join(lines[:1] + headers)
class OutputProcessor(object):
"""A delegate class that invokes the actual processors."""
installed_processors = {
'format': [
HeadersProcessor,
JSONProcessor
],
'colors': [
PygmentsProcessor
]
}
def __init__(self, groups, env=Environment(), **kwargs):
"""
:param env: a :class:`models.Environment` instance
:param groups: the groups of processors to be applied
:param kwargs: additional keyword arguments for processors
"""
self.processors = []
for group in groups:
for cls in self.installed_processors[group]:
processor = cls(env, **kwargs)
if processor.enabled:
self.processors.append(processor)
def process_headers(self, headers):
for processor in self.processors:
headers = processor.process_headers(headers)
return headers
def process_body(self, content, content_type):
# e.g., 'application/atom+xml'
content_type = content_type.split(';')[0]
# e.g., 'xml'
subtype = content_type.split('/')[-1].split('+')[-1]
for processor in self.processors:
content = processor.process_body(content, content_type, subtype)
return content
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
ACTIVE_PROGRAM_STATUS = "active"
ENDED_PROGRAM_STATUS = "ended"
HIDDEN_PROGRAM_STATUS = "hidden"
UPCOMING_PROGRAM_STATUS = "upcoming"
PROGRAM_STATUSES = ((UPCOMING_PROGRAM_STATUS, 'Upcoming'),
(ACTIVE_PROGRAM_STATUS, 'Active'),
(ENDED_PROGRAM_STATUS, 'Ended'),
(HIDDEN_PROGRAM_STATUS, 'Hidden'))
CURRENT_STATUSES = [ACTIVE_PROGRAM_STATUS, UPCOMING_PROGRAM_STATUS]
REFUND_CODES_DISABLED = "disabled"
REFUND_CODES_ENABLED = "enabled"
REFUND_CODES_VIEW_ONLY = "view-submitted-only"
REFUND_CODE_SUPPORT_VALUES = (
(REFUND_CODES_ENABLED, "Enabled"),
(REFUND_CODES_VIEW_ONLY, "View Submitted Only"),
(REFUND_CODES_DISABLED, "Disabled"),
)
INVALID_OVERVIEW_DEADLINE_MSG = ("Overview deadline date must be set"
" if start date is set")
INVALID_OVERVIEW_START_MSG = ("Overview start date must be set"
" if deadline date is set")
INVALID_OVERVIEW_TIMESPAN_MSG = ("Overview deadline date must be"
" after start date")
class BaseProgram(AcceleratorModel):
"""An Accelerator program"""
name = models.CharField(max_length=50)
program_family = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"ProgramFamily"),
related_name="programs",
)
cycle = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"ProgramCycle"),
blank=True,
null=True,
related_name="programs")
description = models.CharField(max_length=500, blank=True)
start_date = models.DateTimeField(blank=True, null=True)
end_date = models.DateTimeField(blank=True, null=True)
location = models.CharField(max_length=50)
program_status = models.CharField(
max_length=64,
choices=PROGRAM_STATUSES,
)
alumni_eligible_program = models.BooleanField(
default=False,
help_text=('Finalists will be added to our Global ' +
'Alumni Program upon this program being set to "Ended"')
)
currency_code = models.CharField(max_length=3)
early_application_fee = models.DecimalField(
max_digits=7,
decimal_places=2
)
regular_application_fee = models.DecimalField(
max_digits=7,
decimal_places=2
)
regular_fee_suffix = models.CharField(max_length=20, blank=True)
interested_judge_message = models.TextField(
blank=True,
help_text="You may use HTML, including links"
)
approved_judge_message = models.TextField(
blank=True,
help_text="You may use HTML, including links")
interested_mentor_message = models.TextField(
blank=True,
help_text="You may use HTML, including links"
)
approved_mentor_message = models.TextField(
blank=True,
help_text="You may use HTML, including links")
interested_speaker_message = models.TextField(
blank=True,
help_text="You may use HTML, including links"
)
approved_speaker_message = models.TextField(
blank=True,
help_text="You may use HTML, including links")
interested_office_hours_message = models.TextField(
blank=True,
help_text="You may use HTML, including links"
)
approved_office_hours_message = models.TextField(
blank=True,
help_text="You may use HTML, including links")
refund_code_support = models.CharField(
max_length=64,
choices=REFUND_CODE_SUPPORT_VALUES,
default='enabled',
)
many_codes_per_partner = models.BooleanField(
default=False,
verbose_name="Allow multiple refund codes per partner",
help_text=u"If true, then a given application may apply more than one "
u"refund code from the same partner for this program"
)
url_slug = models.CharField(
max_length=30,
default="",
)
accepting_mentors_and_goals = models.BooleanField(default=False)
mentor_program_group = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "NamedGroup"),
blank=True,
null=True)
overview_start_date = models.DateTimeField(
blank=True, null=True,
help_text="Time is in UTC")
overview_deadline_date = models.DateTimeField(
blank=True, null=True,
help_text="Time is in UTC")
eventbrite_organizer_id = models.CharField(
max_length=20,
blank=True,
null=True)
program_overview_link = models.URLField(
blank=True,
null=True,
max_length=255,
help_text=('URL of the program overview page, '
'ex: https://masschallenge.org/programs-boston')
)
class Meta(AcceleratorModel.Meta):
verbose_name_plural = 'Programs'
abstract = True
db_table = '{}_program'.format(AcceleratorModel.Meta.app_label)
def family_abbr(self):
return self.program_family.url_slug.upper()
[AC-7049] Remove new line at the end of the file
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
ACTIVE_PROGRAM_STATUS = "active"
ENDED_PROGRAM_STATUS = "ended"
HIDDEN_PROGRAM_STATUS = "hidden"
UPCOMING_PROGRAM_STATUS = "upcoming"
PROGRAM_STATUSES = ((UPCOMING_PROGRAM_STATUS, 'Upcoming'),
(ACTIVE_PROGRAM_STATUS, 'Active'),
(ENDED_PROGRAM_STATUS, 'Ended'),
(HIDDEN_PROGRAM_STATUS, 'Hidden'))
CURRENT_STATUSES = [ACTIVE_PROGRAM_STATUS, UPCOMING_PROGRAM_STATUS]
REFUND_CODES_DISABLED = "disabled"
REFUND_CODES_ENABLED = "enabled"
REFUND_CODES_VIEW_ONLY = "view-submitted-only"
REFUND_CODE_SUPPORT_VALUES = (
(REFUND_CODES_ENABLED, "Enabled"),
(REFUND_CODES_VIEW_ONLY, "View Submitted Only"),
(REFUND_CODES_DISABLED, "Disabled"),
)
INVALID_OVERVIEW_DEADLINE_MSG = ("Overview deadline date must be set"
" if start date is set")
INVALID_OVERVIEW_START_MSG = ("Overview start date must be set"
" if deadline date is set")
INVALID_OVERVIEW_TIMESPAN_MSG = ("Overview deadline date must be"
" after start date")
class BaseProgram(AcceleratorModel):
"""An Accelerator program"""
name = models.CharField(max_length=50)
program_family = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"ProgramFamily"),
related_name="programs",
)
cycle = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"ProgramCycle"),
blank=True,
null=True,
related_name="programs")
description = models.CharField(max_length=500, blank=True)
start_date = models.DateTimeField(blank=True, null=True)
end_date = models.DateTimeField(blank=True, null=True)
location = models.CharField(max_length=50)
program_status = models.CharField(
max_length=64,
choices=PROGRAM_STATUSES,
)
alumni_eligible_program = models.BooleanField(
default=False,
help_text=('Finalists will be added to our Global ' +
'Alumni Program upon this program being set to "Ended"')
)
currency_code = models.CharField(max_length=3)
early_application_fee = models.DecimalField(
max_digits=7,
decimal_places=2
)
regular_application_fee = models.DecimalField(
max_digits=7,
decimal_places=2
)
regular_fee_suffix = models.CharField(max_length=20, blank=True)
interested_judge_message = models.TextField(
blank=True,
help_text="You may use HTML, including links"
)
approved_judge_message = models.TextField(
blank=True,
help_text="You may use HTML, including links")
interested_mentor_message = models.TextField(
blank=True,
help_text="You may use HTML, including links"
)
approved_mentor_message = models.TextField(
blank=True,
help_text="You may use HTML, including links")
interested_speaker_message = models.TextField(
blank=True,
help_text="You may use HTML, including links"
)
approved_speaker_message = models.TextField(
blank=True,
help_text="You may use HTML, including links")
interested_office_hours_message = models.TextField(
blank=True,
help_text="You may use HTML, including links"
)
approved_office_hours_message = models.TextField(
blank=True,
help_text="You may use HTML, including links")
refund_code_support = models.CharField(
max_length=64,
choices=REFUND_CODE_SUPPORT_VALUES,
default='enabled',
)
many_codes_per_partner = models.BooleanField(
default=False,
verbose_name="Allow multiple refund codes per partner",
help_text=u"If true, then a given application may apply more than one "
u"refund code from the same partner for this program"
)
url_slug = models.CharField(
max_length=30,
default="",
)
accepting_mentors_and_goals = models.BooleanField(default=False)
mentor_program_group = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "NamedGroup"),
blank=True,
null=True)
overview_start_date = models.DateTimeField(
blank=True, null=True,
help_text="Time is in UTC")
overview_deadline_date = models.DateTimeField(
blank=True, null=True,
help_text="Time is in UTC")
eventbrite_organizer_id = models.CharField(
max_length=20,
blank=True,
null=True)
program_overview_link = models.URLField(
blank=True,
null=True,
max_length=255,
help_text=('URL of the program overview page, '
'ex: https://masschallenge.org/programs-boston')
)
class Meta(AcceleratorModel.Meta):
verbose_name_plural = 'Programs'
abstract = True
db_table = '{}_program'.format(AcceleratorModel.Meta.app_label)
def family_abbr(self):
return self.program_family.url_slug.upper()
|
import requests
import logging
import os
import time
import threading
import queue
import json
from urllib.parse import quote_plus
from cis_profile.common import WellKnown
from cis_profile import User
from cis_publisher import secret
from cis_publisher import common
logger = logging.getLogger(__name__)
class PublisherError(Exception):
pass
class Publish:
def __init__(self, profiles, login_method, publisher_name, discovery_url=None):
"""
@profiles list of cis_profiles.User
@login_method str a valid login_method for the user (such as "ad")
@publisher_name str of your publisher name (such as 'ldap' or 'mozilliansorg')
@discovery_url a discovery URL for CIS (CIS_DISCOVERY_URL env var will be used otherwise)
"""
self.profiles = profiles
self.login_method = login_method
self.publisher_name = publisher_name
if discovery_url is None:
discovery_url = os.environ.get("CIS_DISCOVERY_URL", "https://auth.mozilla.com/.well-known/mozilla-iam")
self.__discovery_url = discovery_url
# Defaults
self.api_url = None
self.api_url_person = None
self.api_url_change = None
self.access_token = None
# Number of retries when calling CIS APIs, for robustness
self.max_retries = 3
self.max_threads = 50
# retry_delay is passed to time.sleep
self.retry_delay = 1
# known_cis_users is the output of the Person API query
# known_cis_users_by_user_id is a dict that maps user_id: email
# known_cis_users_by_email is a dict that maps email: user_id (instead of user_id: email)
self.cis_user_list = None
self.known_cis_users = None
self.known_cis_users_by_email = {}
self.known_cis_users_by_user_id = {}
self.all_known_profiles = {}
self.known_profiles = {}
self.__inited = False
def __deferred_init(self):
"""
Init all data that requires external resources
"""
if self.__inited:
return
logger.info("Getting API URLs from well-known {}".format(self.__discovery_url))
self.secret_manager = secret.Manager()
self.config = common.get_config()
self.__well_known = WellKnown(self.__discovery_url)
wk = self.__well_known.get_well_known()
self.api_url = wk["api"]["endpoints"]
# XXX These are not currently used
# self.api_audience = wk["api"]["audience"]
# self.api_url_person = self.api_url["person"]
# self.api_url_change = self.api_url["change"]
self.api_audience = self.config("api_identifier", namespace="cis", default="api.dev.sso.allizom.org")
self.api_url_person = "https://" + self.config(
"person_api_url", namespace="cis", default="person.api.dev.sso.allizom.org"
)
self.api_url_change = "https://" + self.config(
"change_api_url", namespace="cis", default="change.api.dev.sso.allizom.org"
)
self.publisher_rules = self.__well_known.get_publisher_rules()
self.__inited = True
def post_all(self, user_ids=None, create_users=False):
"""
Post all profiles
@user_ids list of str which are user ids like 'ad|test'
Returns list of failed users (empty if no failure)
"""
self.__deferred_init()
qs = "/v2/user"
threads = []
failed_users = queue.Queue()
logger.info("Received {} user profiles to post".format(len(self.profiles)))
if user_ids is not None:
logger.info(
"Requesting a specific list of user_id's to post {} (total user_ids: {}, total profiles: {})".format(
user_ids, len(user_ids), len(self.profiles)
)
)
if not isinstance(user_ids, list):
raise PublisherError("user_ids must be a list", user_ids)
# list what to delete, then delete instead of slower copy list operations or filters
# This is because some data sets are huge / GBs of data
if not create_users:
xlist = []
for idx, profile in enumerate(self.profiles):
if profile.user_id.value is None:
if self.known_cis_users_by_email.get(profile.primary_email.value) not in user_ids:
xlist.append(idx)
elif profile.user_id.value not in user_ids:
xlist.append(idx)
for i in reversed(xlist):
if self.profiles[i].active.value:
del self.profiles[i]
logger.info("After filtering, we have {} user profiles to post".format(len(self.profiles)))
for profile in self.profiles:
# If we have no user_id provided we need to find it here
# These are always considered updated users, not new users
if profile.user_id.value is None:
user_id = self.known_cis_users_by_email[profile.primary_email.value]
else:
user_id = profile.user_id.value
# Filter out non-updatable attributes as needed
self.filter_known_cis_users(profiles=[profile])
threads.append(threading.Thread(target=self._really_post, args=(user_id, qs, profile, failed_users)))
threads[-1].start()
num_threads = len(threading.enumerate())
while num_threads >= self.max_threads:
time.sleep(1)
num_threads = len(threading.enumerate())
logger.info("Too many concurrent threads, waiting a bit...")
logger.debug("Waiting for threads to terminate...")
for t in threads:
t.join()
logger.debug("Retrieving results from the queue...")
ret = []
while not failed_users.empty():
ret.append(failed_users.get())
failed_users.task_done()
return ret
def _really_post(self, user_id, qs, profile, failed_users):
# Existing users (i.e. users to update) have to be passed as argument
if user_id in self.known_cis_users_by_user_id:
qs = "/v2/user?user_id={}".format(user_id)
# New users do not
else:
qs = "/v2/user"
self._really_post_with_qs(user_id, qs, profile, failed_users)
def _really_post_with_qs(self, user_id, qs, profile, failed_users):
response_ok = False
retries = 0
access_token = self._get_authzero_token()
# We don't always get a user_id set
identifier = user_id
if identifier is None:
identifier = profile.primary_email.value
if identifier is None:
logger.critical("Could not find profile identifier!")
logger.debug("Posting user profile: {}".format(profile.as_dict()))
while not response_ok:
logger.info(
"Attempting to post profile (user_id: {}, primary_email: {} to API {}{}".format(
profile.user_id.value, profile.primary_email.value, self.api_url_change, qs
)
)
response = self._request_post(
url="{}{}".format(self.api_url_change, qs),
payload=profile.as_dict(),
headers={"authorization": "Bearer {}".format(access_token)},
)
response_ok = response.ok
if not response_ok:
logger.warning(
"Posting profile {} to API failed, retry is {} retry_delay is {} status_code is {} reason is {}"
"contents were {}".format(
identifier, retries, self.retry_delay, response.status_code, response.reason, response.text
)
)
retries = retries + 1
time.sleep(self.retry_delay)
if retries >= self.max_retries:
logger.error(
"Maximum retries reached ({}), profile is not to be sent {}".format(retries, identifier)
)
failed_users.put(identifier)
break
else:
logger.info(
"Profile successfully posted to API {}, status_code: {}".format(identifier, response.status_code)
)
def _request_post(self, url, payload, headers):
return requests.post(url, json=payload, headers=headers)
def _request_get(self, url, qs, headers):
return requests.get("{}{}".format(url, qs), headers=headers)
def _get_authzero_client(self):
authzero = secret.AuthZero(
client_id=self.secret_manager.secret("client_id"),
client_secret=self.secret_manager.secret("client_secret"),
api_identifier=self.api_audience,
authzero_tenant=self.config("authzero_tenant", namespace="cis", default="auth.mozilla.auth0.com"),
)
return authzero
def _get_authzero_token(self):
# This could check for expiration
if self.access_token is not None:
return self.access_token
else:
authzero = self._get_authzero_client()
self.access_token = authzero.exchange_for_access_token()
return self.access_token
def get_known_cis_user_by_attribute_paginated(self, attributes):
"""
Call CIS Person API and return a list of known profiles matching the selected attribute
attributes: dict of attr: value keypairs (eg: {"staff_information.staff": True, "active": True} )
Endpoint: /v2/users/id/all/by_attribute_contains?staff_information.staff=True&active=True
return: list of dict JSON profiles
"""
self.__deferred_init()
hkey = hash(json.dumps(attributes))
if hkey in self.known_profiles and len(self.known_profiles[hkey]) > 0:
return self.known_profiles([hkey])
else:
self.known_profiles[hkey] = {}
logger.info(
"Requesting CIS Person API for a list of user profiles matching these attributes: {}".format(attributes)
)
args = "&".join("{}={}".format(key, value) for key, value in attributes.items())
qs = f"/v2/users/id/all/by_attribute_contains?fullProfiles=True&{args}"
access_token = self._get_authzero_token()
nextPage = ""
while nextPage is not None:
if nextPage != "":
real_qs = f"{qs}&nextPage={nextPage}"
else:
real_qs = qs
response = self._request_get(
self.api_url_person, real_qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(f"Failed to query CIS Person API: {self.api_url_person}, {real_qs}, {response.text}")
raise PublisherError("Failed to query CIS Person API", response.text)
response_json = response.json()
for p in response_json["users"]:
self.known_profiles[hkey][p["id"]["value"]] = p
nextPage = response_json.get("nextPage")
logger.info("Got {} users known to CIS for these attributes".format(len(self.known_profiles[hkey])))
return self.known_profiles[hkey]
def get_known_cis_users_paginated(self):
"""
Call CIS Person API and return a list of all known profiles
return: list of dict JSON profiles
"""
self.__deferred_init()
if len(self.all_known_profiles) > 0:
return self.all_known_profiles
logger.info("Requesting CIS Person API for a list of all user profiles")
qs = "/v2/users"
access_token = self._get_authzero_token()
nextPage = ""
while nextPage is not None:
if nextPage != "":
# This is an all users query and nextPage is the only arg.
# ? is appropriate here.
real_qs = "{}?nextPage={}".format(qs, nextPage)
else:
real_qs = qs
response = self._request_get(
self.api_url_person, real_qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(
"Failed to query CIS Person API: {}{} response: {}".format(
self.api_url_person, real_qs, response.text
)
)
raise PublisherError("Failed to query CIS Person API", response.text)
response_json = response.json()
for p in response_json["Items"]:
self.all_known_profiles[p["user_id"]["value"]] = p
nextPage = response_json.get("nextPage")
logger.info("Got {} users known to CIS".format(len(self.all_known_profiles)))
return self.all_known_profiles
def get_known_cis_users(self, include_inactive=False):
return self.get_known_cis_userids_paginated(include_inactive)
def get_known_cis_userids_paginated(self, include_inactive=False):
"""
Call CIS Person API and return a list of existing user ids and/or emails
@include_inactive: bool include inactive users (active=False) in the results
return: list of str: cis user ids
"""
self.__deferred_init()
if self.known_cis_users is not None:
return self.known_cis_users
logger.info("Requesting CIS Person API for a list of existing users for method {}".format(self.login_method))
qs = "/v2/users/id/all?connectionMethod={}&active=True".format(self.login_method)
access_token = self._get_authzero_token()
nextPage = ""
self.known_cis_users = []
while nextPage is not None:
if nextPage != "":
real_qs = "{}&nextPage={}".format(qs, nextPage)
else:
real_qs = qs
response = self._request_get(
self.api_url_person, real_qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(
"Failed to query CIS Person API: {}{} response: {}".format(
self.api_url_person, real_qs, response.text
)
)
raise PublisherError("Failed to query CIS Person API", response.text)
response_json = response.json()
# Rebuild response in a way that's backward compat with older code
for p in response_json["users"]:
self.known_cis_users.append(p)
self.known_cis_users_by_user_id[p["user_id"]] = p["primary_email"]
self.known_cis_users_by_email[p["primary_email"]] = p["user_id"]
nextPage = response_json.get("nextPage")
if include_inactive:
logger.info(
"Requesting CIS Person API for a list of existing inactive users for method {}".format(
self.login_method
)
)
qs = "/v2/users/id/all?connectionMethod={}&active=False".format(self.login_method)
while nextPage is not None:
if nextPage != "":
real_qs = "{}&nextPage={}".format(qs, nextPage)
else:
real_qs = qs
response = self._request_get(
self.api_url_person, real_qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(
"Failed to query CIS Person API: {}{} response: {}".format(
self.api_url_person, real_qs, response.text
)
)
raise PublisherError("Failed to query CIS Person API", response.text)
response_json = response.json()
# Rebuild response in a way that's backward compat with older code
for p in response_json["users"]:
self.known_cis_users.append(p)
self.known_cis_users_by_user_id[p["user_id"]] = p["primary_email"]
self.known_cis_users_by_email[p["primary_email"]] = p["user_id"]
nextPage = response_json.get("nextPage")
logger.info(f"Got {len(self.known_cis_users)} users known to CIS with login_method {self.login_method}")
return self.known_cis_users
def get_cis_user(self, user_id):
"""
Call CIS Person API and return the matching user profile
@user_id str a user_id
"""
self.__deferred_init()
logger.info("Requesting CIS Person API for a user profile {}".format(user_id))
access_token = self._get_authzero_token()
qs = "/v2/user/user_id/{}".format(quote_plus(user_id))
response = self._request_get(
self.api_url_person, qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(
"Failed to query CIS Person API: {}{} response: {}".format(self.api_url_person, qs, response.text)
)
raise PublisherError("Failed to query CIS Person API", response.text)
return User(response.json())
def filter_known_cis_users(self, profiles=None, save=True):
"""
Filters out fields that are not allowed to be updated by this publisher from the profile before posting
This is for "new" users
"""
self.__deferred_init()
self.get_known_cis_users()
if profiles is None:
profiles = self.profiles
# Never NULL/None these fields during filtering as they're used for knowing where to post
whitelist = ["user_id", "active"]
null_user = User()
allowed_updates = self.publisher_rules["update"]
allowed_creates = self.publisher_rules["create"]
for n in range(0, len(profiles)):
p = profiles[n]
if p.user_id.value is None:
user_id = self.known_cis_users_by_email[p.primary_email.value]
else:
user_id = p.user_id.value
if user_id in self.known_cis_users_by_user_id:
logger.debug(
"Filtering out non-updatable values from user {} because it already exist in CIS".format(user_id)
)
for pfield in p.__dict__:
# Skip? (see below for sub item)
if pfield in whitelist:
continue
if pfield not in allowed_updates:
continue
# sub-item?
elif pfield in ["identities", "staff_information", "access_information"]:
for subpfield in p.__dict__[pfield]:
# Skip?
if subpfield in whitelist:
continue
# XXX access_information.{hris,ldap, ...} - this needs refactor
exit_loop = False
if isinstance(allowed_updates[pfield], dict):
for sub_au in allowed_updates[pfield]:
if (
p.__dict__[pfield][subpfield]["signature"]["publisher"]["name"]
== self.publisher_name
):
exit_loop = True
break
if exit_loop:
continue
if allowed_updates[pfield] != self.publisher_name:
p.__dict__[pfield][subpfield]["signature"]["publisher"]["value"] = ""
if "value" in p.__dict__[pfield][subpfield].keys():
p.__dict__[pfield][subpfield]["value"] = None
elif "values" in p.__dict__[pfield][subpfield].keys():
p.__dict__[pfield][subpfield]["values"] = None
else:
if allowed_updates[pfield] != self.publisher_name:
p.__dict__[pfield]["signature"]["publisher"]["value"] = ""
if "value" in p.__dict__[pfield].keys():
p.__dict__[pfield]["value"] = None
elif "values" in p.__dict__[pfield].keys():
p.__dict__[pfield]["values"] = None
else:
# User is not yet in CIS, its a new user
logger.debug(f"Filtering out None/null fields from creation since these aren't needed for {user_id}")
for pfield in p.__dict__:
if pfield in whitelist:
continue
if pfield not in allowed_creates:
continue
# XXX filter more sub-fields on create? ["staff_information", "access_information"]
# Refactor me to be recursive (just like above code)
if pfield == "identities":
for subpfield in p.__dict__[pfield]:
f = p.__dict__[pfield][subpfield]
if "value" in f.keys() and f["value"] is None:
p.__dict__[pfield][subpfield] = null_user.__dict__[pfield][subpfield] # reset
elif "values" in f.keys() and (f["values"] is None or len(f["values"]) == 0):
p.__dict__[pfield][subpfield] = null_user.__dict__[pfield][subpfield] # reset
else:
f = p.__dict__[pfield]
if "value" in f.keys() and f["value"] is None:
p.__dict__[pfield] = null_user.__dict__[pfield] # reset
elif "values" in f.keys() and (f["values"] is None or len(f["values"]) == 0):
p.__dict__[pfield] = null_user.__dict__[pfield] # reset
logger.debug("Filtered fields for user {}".format(user_id))
profiles[n] = p
if save:
self.profiles = profiles
return profiles
def validate(self):
"""
Validates all profiles are from the correct provider
"""
logger.info("Validating {} profiles".format(len(self.profiles)))
# XXX ensure ldap2s3 use the right login_method
# then remove this
lm_map = {"ad": ["Mozilla-LDAP", "Mozilla-LDAP-Dev"]}
if self.login_method in lm_map:
local_login_method = lm_map[self.login_method]
else:
local_login_method = [self.login_method]
for profile in self.profiles:
if profile.login_method.value not in local_login_method:
logger.error(
"Incorrect login method for this user {} - looking for {} but got {}".format(
profile.user_id.value, local_login_method, profile.login_method.value
)
)
logger.info("Validation completed for all profiles")
fix inactive users
import requests
import logging
import os
import time
import threading
import queue
import json
from urllib.parse import quote_plus
from cis_profile.common import WellKnown
from cis_profile import User
from cis_publisher import secret
from cis_publisher import common
logger = logging.getLogger(__name__)
class PublisherError(Exception):
pass
class Publish:
def __init__(self, profiles, login_method, publisher_name, discovery_url=None):
"""
@profiles list of cis_profiles.User
@login_method str a valid login_method for the user (such as "ad")
@publisher_name str of your publisher name (such as 'ldap' or 'mozilliansorg')
@discovery_url a discovery URL for CIS (CIS_DISCOVERY_URL env var will be used otherwise)
"""
self.profiles = profiles
self.login_method = login_method
self.publisher_name = publisher_name
if discovery_url is None:
discovery_url = os.environ.get("CIS_DISCOVERY_URL", "https://auth.mozilla.com/.well-known/mozilla-iam")
self.__discovery_url = discovery_url
# Defaults
self.api_url = None
self.api_url_person = None
self.api_url_change = None
self.access_token = None
# Number of retries when calling CIS APIs, for robustness
self.max_retries = 3
self.max_threads = 50
# retry_delay is passed to time.sleep
self.retry_delay = 1
# known_cis_users is the output of the Person API query
# known_cis_users_by_user_id is a dict that maps user_id: email
# known_cis_users_by_email is a dict that maps email: user_id (instead of user_id: email)
self.cis_user_list = None
self.known_cis_users = None
self.known_cis_users_by_email = {}
self.known_cis_users_by_user_id = {}
self.all_known_profiles = {}
self.known_profiles = {}
self.__inited = False
def __deferred_init(self):
"""
Init all data that requires external resources
"""
if self.__inited:
return
logger.info("Getting API URLs from well-known {}".format(self.__discovery_url))
self.secret_manager = secret.Manager()
self.config = common.get_config()
self.__well_known = WellKnown(self.__discovery_url)
wk = self.__well_known.get_well_known()
self.api_url = wk["api"]["endpoints"]
# XXX These are not currently used
# self.api_audience = wk["api"]["audience"]
# self.api_url_person = self.api_url["person"]
# self.api_url_change = self.api_url["change"]
self.api_audience = self.config("api_identifier", namespace="cis", default="api.dev.sso.allizom.org")
self.api_url_person = "https://" + self.config(
"person_api_url", namespace="cis", default="person.api.dev.sso.allizom.org"
)
self.api_url_change = "https://" + self.config(
"change_api_url", namespace="cis", default="change.api.dev.sso.allizom.org"
)
self.publisher_rules = self.__well_known.get_publisher_rules()
self.__inited = True
def post_all(self, user_ids=None, create_users=False):
"""
Post all profiles
@user_ids list of str which are user ids like 'ad|test'
Returns list of failed users (empty if no failure)
"""
self.__deferred_init()
qs = "/v2/user"
threads = []
failed_users = queue.Queue()
logger.info("Received {} user profiles to post".format(len(self.profiles)))
if user_ids is not None:
logger.info(
"Requesting a specific list of user_id's to post {} (total user_ids: {}, total profiles: {})".format(
user_ids, len(user_ids), len(self.profiles)
)
)
if not isinstance(user_ids, list):
raise PublisherError("user_ids must be a list", user_ids)
# list what to delete, then delete instead of slower copy list operations or filters
# This is because some data sets are huge / GBs of data
if not create_users:
xlist = []
for idx, profile in enumerate(self.profiles):
if profile.user_id.value is None:
if self.known_cis_users_by_email.get(profile.primary_email.value) not in user_ids:
xlist.append(idx)
elif profile.user_id.value not in user_ids:
xlist.append(idx)
for i in reversed(xlist):
if self.profiles[i].active.value:
del self.profiles[i]
logger.info("After filtering, we have {} user profiles to post".format(len(self.profiles)))
for profile in self.profiles:
# If we have no user_id provided we need to find it here
# These are always considered updated users, not new users
if profile.user_id.value is None:
user_id = self.known_cis_users_by_email[profile.primary_email.value]
else:
user_id = profile.user_id.value
# Filter out non-updatable attributes as needed
self.filter_known_cis_users(profiles=[profile])
threads.append(threading.Thread(target=self._really_post, args=(user_id, qs, profile, failed_users)))
threads[-1].start()
num_threads = len(threading.enumerate())
while num_threads >= self.max_threads:
time.sleep(1)
num_threads = len(threading.enumerate())
logger.info("Too many concurrent threads, waiting a bit...")
logger.debug("Waiting for threads to terminate...")
for t in threads:
t.join()
logger.debug("Retrieving results from the queue...")
ret = []
while not failed_users.empty():
ret.append(failed_users.get())
failed_users.task_done()
return ret
def _really_post(self, user_id, qs, profile, failed_users):
# Existing users (i.e. users to update) have to be passed as argument
if user_id in self.known_cis_users_by_user_id:
qs = "/v2/user?user_id={}".format(user_id)
# New users do not
else:
qs = "/v2/user"
self._really_post_with_qs(user_id, qs, profile, failed_users)
def _really_post_with_qs(self, user_id, qs, profile, failed_users):
response_ok = False
retries = 0
access_token = self._get_authzero_token()
# We don't always get a user_id set
identifier = user_id
if identifier is None:
identifier = profile.primary_email.value
if identifier is None:
logger.critical("Could not find profile identifier!")
logger.debug("Posting user profile: {}".format(profile.as_dict()))
while not response_ok:
logger.info(
"Attempting to post profile (user_id: {}, primary_email: {} to API {}{}".format(
profile.user_id.value, profile.primary_email.value, self.api_url_change, qs
)
)
response = self._request_post(
url="{}{}".format(self.api_url_change, qs),
payload=profile.as_dict(),
headers={"authorization": "Bearer {}".format(access_token)},
)
response_ok = response.ok
if not response_ok:
logger.warning(
"Posting profile {} to API failed, retry is {} retry_delay is {} status_code is {} reason is {}"
"contents were {}".format(
identifier, retries, self.retry_delay, response.status_code, response.reason, response.text
)
)
retries = retries + 1
time.sleep(self.retry_delay)
if retries >= self.max_retries:
logger.error(
"Maximum retries reached ({}), profile is not to be sent {}".format(retries, identifier)
)
failed_users.put(identifier)
break
else:
logger.info(
"Profile successfully posted to API {}, status_code: {}".format(identifier, response.status_code)
)
def _request_post(self, url, payload, headers):
return requests.post(url, json=payload, headers=headers)
def _request_get(self, url, qs, headers):
return requests.get("{}{}".format(url, qs), headers=headers)
def _get_authzero_client(self):
authzero = secret.AuthZero(
client_id=self.secret_manager.secret("client_id"),
client_secret=self.secret_manager.secret("client_secret"),
api_identifier=self.api_audience,
authzero_tenant=self.config("authzero_tenant", namespace="cis", default="auth.mozilla.auth0.com"),
)
return authzero
def _get_authzero_token(self):
# This could check for expiration
if self.access_token is not None:
return self.access_token
else:
authzero = self._get_authzero_client()
self.access_token = authzero.exchange_for_access_token()
return self.access_token
def get_known_cis_user_by_attribute_paginated(self, attributes):
"""
Call CIS Person API and return a list of known profiles matching the selected attribute
attributes: dict of attr: value keypairs (eg: {"staff_information.staff": True, "active": True} )
Endpoint: /v2/users/id/all/by_attribute_contains?staff_information.staff=True&active=True
return: list of dict JSON profiles
"""
self.__deferred_init()
hkey = hash(json.dumps(attributes))
if hkey in self.known_profiles and len(self.known_profiles[hkey]) > 0:
return self.known_profiles([hkey])
else:
self.known_profiles[hkey] = {}
logger.info(
"Requesting CIS Person API for a list of user profiles matching these attributes: {}".format(attributes)
)
args = "&".join("{}={}".format(key, value) for key, value in attributes.items())
qs = f"/v2/users/id/all/by_attribute_contains?fullProfiles=True&{args}"
access_token = self._get_authzero_token()
nextPage = ""
while nextPage is not None:
if nextPage != "":
real_qs = f"{qs}&nextPage={nextPage}"
else:
real_qs = qs
response = self._request_get(
self.api_url_person, real_qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(f"Failed to query CIS Person API: {self.api_url_person}, {real_qs}, {response.text}")
raise PublisherError("Failed to query CIS Person API", response.text)
response_json = response.json()
for p in response_json["users"]:
self.known_profiles[hkey][p["id"]["value"]] = p
nextPage = response_json.get("nextPage")
logger.info("Got {} users known to CIS for these attributes".format(len(self.known_profiles[hkey])))
return self.known_profiles[hkey]
def get_known_cis_users_paginated(self):
"""
Call CIS Person API and return a list of all known profiles
return: list of dict JSON profiles
"""
self.__deferred_init()
if len(self.all_known_profiles) > 0:
return self.all_known_profiles
logger.info("Requesting CIS Person API for a list of all user profiles")
qs = "/v2/users"
access_token = self._get_authzero_token()
nextPage = ""
while nextPage is not None:
if nextPage != "":
# This is an all users query and nextPage is the only arg.
# ? is appropriate here.
real_qs = "{}?nextPage={}".format(qs, nextPage)
else:
real_qs = qs
response = self._request_get(
self.api_url_person, real_qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(
"Failed to query CIS Person API: {}{} response: {}".format(
self.api_url_person, real_qs, response.text
)
)
raise PublisherError("Failed to query CIS Person API", response.text)
response_json = response.json()
for p in response_json["Items"]:
self.all_known_profiles[p["user_id"]["value"]] = p
nextPage = response_json.get("nextPage")
logger.info("Got {} users known to CIS".format(len(self.all_known_profiles)))
return self.all_known_profiles
def get_known_cis_users(self, include_inactive=False):
return self.get_known_cis_userids_paginated(include_inactive)
def get_known_cis_userids_paginated(self, include_inactive=False):
"""
Call CIS Person API and return a list of existing user ids and/or emails
@include_inactive: bool include inactive users (active=False) in the results
return: list of str: cis user ids
"""
self.__deferred_init()
if self.known_cis_users is not None:
return self.known_cis_users
logger.info("Requesting CIS Person API for a list of existing users for method {}".format(self.login_method))
qs = "/v2/users/id/all?connectionMethod={}&active=True".format(self.login_method)
access_token = self._get_authzero_token()
nextPage = ""
self.known_cis_users = []
while nextPage is not None:
if nextPage != "":
real_qs = "{}&nextPage={}".format(qs, nextPage)
else:
real_qs = qs
response = self._request_get(
self.api_url_person, real_qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(
"Failed to query CIS Person API: {}{} response: {}".format(
self.api_url_person, real_qs, response.text
)
)
raise PublisherError("Failed to query CIS Person API", response.text)
response_json = response.json()
# Rebuild response in a way that's backward compat with older code
for p in response_json["users"]:
self.known_cis_users.append(p)
self.known_cis_users_by_user_id[p["user_id"]] = p["primary_email"]
self.known_cis_users_by_email[p["primary_email"]] = p["user_id"]
nextPage = response_json.get("nextPage")
if include_inactive:
nextPage = ""
logger.info(
"Requesting CIS Person API for a list of existing inactive users for method {}".format(
self.login_method
)
)
qs = "/v2/users/id/all?connectionMethod={}&active=False".format(self.login_method)
while nextPage is not None:
if nextPage != "":
real_qs = "{}&nextPage={}".format(qs, nextPage)
else:
real_qs = qs
response = self._request_get(
self.api_url_person, real_qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(
"Failed to query CIS Person API: {}{} response: {}".format(
self.api_url_person, real_qs, response.text
)
)
raise PublisherError("Failed to query CIS Person API", response.text)
response_json = response.json()
# Rebuild response in a way that's backward compat with older code
for p in response_json["users"]:
self.known_cis_users.append(p)
self.known_cis_users_by_user_id[p["user_id"]] = p["primary_email"]
self.known_cis_users_by_email[p["primary_email"]] = p["user_id"]
nextPage = response_json.get("nextPage")
logger.info(f"Got {len(self.known_cis_users)} users known to CIS with login_method {self.login_method}")
return self.known_cis_users
def get_cis_user(self, user_id):
"""
Call CIS Person API and return the matching user profile
@user_id str a user_id
"""
self.__deferred_init()
logger.info("Requesting CIS Person API for a user profile {}".format(user_id))
access_token = self._get_authzero_token()
qs = "/v2/user/user_id/{}".format(quote_plus(user_id))
response = self._request_get(
self.api_url_person, qs, headers={"authorization": "Bearer {}".format(access_token)}
)
if not response.ok:
logger.error(
"Failed to query CIS Person API: {}{} response: {}".format(self.api_url_person, qs, response.text)
)
raise PublisherError("Failed to query CIS Person API", response.text)
return User(response.json())
def filter_known_cis_users(self, profiles=None, save=True):
"""
Filters out fields that are not allowed to be updated by this publisher from the profile before posting
This is for "new" users
"""
self.__deferred_init()
self.get_known_cis_users()
if profiles is None:
profiles = self.profiles
# Never NULL/None these fields during filtering as they're used for knowing where to post
whitelist = ["user_id", "active"]
null_user = User()
allowed_updates = self.publisher_rules["update"]
allowed_creates = self.publisher_rules["create"]
for n in range(0, len(profiles)):
p = profiles[n]
if p.user_id.value is None:
user_id = self.known_cis_users_by_email[p.primary_email.value]
else:
user_id = p.user_id.value
if user_id in self.known_cis_users_by_user_id:
logger.debug(
"Filtering out non-updatable values from user {} because it already exist in CIS".format(user_id)
)
for pfield in p.__dict__:
# Skip? (see below for sub item)
if pfield in whitelist:
continue
if pfield not in allowed_updates:
continue
# sub-item?
elif pfield in ["identities", "staff_information", "access_information"]:
for subpfield in p.__dict__[pfield]:
# Skip?
if subpfield in whitelist:
continue
# XXX access_information.{hris,ldap, ...} - this needs refactor
exit_loop = False
if isinstance(allowed_updates[pfield], dict):
for sub_au in allowed_updates[pfield]:
if (
p.__dict__[pfield][subpfield]["signature"]["publisher"]["name"]
== self.publisher_name
):
exit_loop = True
break
if exit_loop:
continue
if allowed_updates[pfield] != self.publisher_name:
p.__dict__[pfield][subpfield]["signature"]["publisher"]["value"] = ""
if "value" in p.__dict__[pfield][subpfield].keys():
p.__dict__[pfield][subpfield]["value"] = None
elif "values" in p.__dict__[pfield][subpfield].keys():
p.__dict__[pfield][subpfield]["values"] = None
else:
if allowed_updates[pfield] != self.publisher_name:
p.__dict__[pfield]["signature"]["publisher"]["value"] = ""
if "value" in p.__dict__[pfield].keys():
p.__dict__[pfield]["value"] = None
elif "values" in p.__dict__[pfield].keys():
p.__dict__[pfield]["values"] = None
else:
# User is not yet in CIS, its a new user
logger.debug(f"Filtering out None/null fields from creation since these aren't needed for {user_id}")
for pfield in p.__dict__:
if pfield in whitelist:
continue
if pfield not in allowed_creates:
continue
# XXX filter more sub-fields on create? ["staff_information", "access_information"]
# Refactor me to be recursive (just like above code)
if pfield == "identities":
for subpfield in p.__dict__[pfield]:
f = p.__dict__[pfield][subpfield]
if "value" in f.keys() and f["value"] is None:
p.__dict__[pfield][subpfield] = null_user.__dict__[pfield][subpfield] # reset
elif "values" in f.keys() and (f["values"] is None or len(f["values"]) == 0):
p.__dict__[pfield][subpfield] = null_user.__dict__[pfield][subpfield] # reset
else:
f = p.__dict__[pfield]
if "value" in f.keys() and f["value"] is None:
p.__dict__[pfield] = null_user.__dict__[pfield] # reset
elif "values" in f.keys() and (f["values"] is None or len(f["values"]) == 0):
p.__dict__[pfield] = null_user.__dict__[pfield] # reset
logger.debug("Filtered fields for user {}".format(user_id))
profiles[n] = p
if save:
self.profiles = profiles
return profiles
def validate(self):
"""
Validates all profiles are from the correct provider
"""
logger.info("Validating {} profiles".format(len(self.profiles)))
# XXX ensure ldap2s3 use the right login_method
# then remove this
lm_map = {"ad": ["Mozilla-LDAP", "Mozilla-LDAP-Dev"]}
if self.login_method in lm_map:
local_login_method = lm_map[self.login_method]
else:
local_login_method = [self.login_method]
for profile in self.profiles:
if profile.login_method.value not in local_login_method:
logger.error(
"Incorrect login method for this user {} - looking for {} but got {}".format(
profile.user_id.value, local_login_method, profile.login_method.value
)
)
logger.info("Validation completed for all profiles")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
ACUTE-Eval Analyzer.
FOR ANALYSIS!!
"""
import hashlib
import json
import os
from copy import deepcopy
from datetime import datetime
from typing import Dict, Any, List, Optional
import numpy as np
import pandas as pd
from IPython.core.display import HTML
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.data_model.unit import Unit as MephistoUnit
from mephisto.data_model.worker import Worker
from scipy.stats import binom_test
from parlai.core.params import ParlaiParser
from parlai.crowdsourcing.tasks.acute_eval.acute_eval_blueprint import (
BLUEPRINT_TYPE as ACUTE_EVAL_BLUEPRINT_TYPE,
)
from parlai.crowdsourcing.tasks.acute_eval.fast_acute_blueprint import (
FAST_ACUTE_BLUEPRINT_TYPE,
)
# To register the ACUTE-Eval and Fast ACUTE blueprints
from parlai.crowdsourcing.tasks.acute_eval.util import get_hashed_combo_path
_ = ACUTE_EVAL_BLUEPRINT_TYPE
_ = FAST_ACUTE_BLUEPRINT_TYPE
# TODO: blueprint type strings need to be imported here to register the blueprints -
# find a better way to scale up when there are many more subclassed ACUTE blueprints
# throw away turkers below this threshold
AGREEMENT_THRESHOLD = 0.8
# do we count ties as agreements?
AGREEMENT_TIES_OKAY = False
# NOTE: these could be added as flags if desired
def setup_args():
"""
Setup appropriate args.
"""
parser = ParlaiParser(False, False)
parser.add_argument(
'-ids',
'--run-ids',
type=str,
default=None,
help='Comma-separated list of run IDs to analyze',
)
parser.add_argument(
'--root-dir',
type=str,
default=None,
help='Optional root ACUTE-Eval save directory',
)
parser.add_argument(
'--outdir', type=str, default=None, help='Where to save the results'
)
parser.add_argument(
'--pairings-filepath',
type=str,
default=None,
help='Path to the ACUTE analysis pairs for the corresponding run id',
)
parser.add_argument(
'--mephisto-root',
type=str,
default=None,
help='Where to check for mephisto data (default own dir)',
)
parser.add_argument(
'--model-ordering',
type=str,
default=None,
help='Comma-separated list of models, in the order in which to display them',
)
return parser
class AcuteAnalyzer(object):
"""
Analyzer.
Given a run_id, we can do lots of fun things!
"""
CHECKBOX_PREFIX = 'checkbox: '
# Prepended to checkbox columns in self.dataframe
def __init__(self, opt: Dict, remove_failed: bool = True):
"""
Initialize the analyzer.
Builds up the dataframe
:param opt:
opt dict
:param remove_failed:
Whether to remove ratings from turkers who failed onboarding
"""
assert ',' not in opt['run_ids'], "AcuteAnalyzer can only handle one run ID!"
self.run_id = opt['run_ids']
self.pairings_filepath = opt['pairings_filepath']
self.outdir = opt['outdir']
self.root_dir = opt['root_dir']
# Get task for loading pairing files
self.task = opt.get('task', 'q')
if opt.get('model_ordering') is not None:
self.custom_model_ordering = opt['model_ordering'].split(',')
else:
self.custom_model_ordering = None
if not self.outdir or not self.pairings_filepath:
# Default to using self.root_dir as the root directory for outputs
assert self.root_dir is not None and os.path.isdir(
self.root_dir
), '--root-dir must be a real directory!'
if not self.pairings_filepath:
# Will be set to a non-empty path later
self.pairings_filepath = ''
if not self.outdir:
self.outdir = os.path.join(self.root_dir, f'{self.run_id}-results')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir, exist_ok=True)
mephisto_root_path = opt['mephisto_root']
if not mephisto_root_path:
mephisto_root_path = None
self.mephisto_db = LocalMephistoDB(database_path=mephisto_root_path)
self.mephisto_data_browser = MephistoDataBrowser(db=self.mephisto_db)
self.checkbox_prefix = self.CHECKBOX_PREFIX
# Prepended to checkbox columns in self.dataframe
self.dataframe = self._extract_to_dataframe()
self._check_eval_question()
if remove_failed:
self._remove_failed_onboarding()
if self.dataframe.index.size == 0:
raise ValueError('No valid results found!')
self._get_model_nick_names()
self._load_pairing_files()
def _extract_response_by_index(
self, unit_details: Dict[str, Any], idx: int
) -> Optional[Dict[str, Any]]:
"""
Extract response data from task data.
:param unit_details:
full extracted data from a unit
:param idx:
index of the singular evaluation within unit_details to extract
:return response:
Formatted worker's response data from the task
"""
task_data = unit_details['data'][idx]
response: Dict[str, Any] = {
'run_id': self.run_id,
'worker': unit_details['worker_id'],
'worker_name': Worker.get(
self.mephisto_db, unit_details['worker_id']
).worker_name,
'time_taken': unit_details['task_end'] - unit_details['task_start'],
'question': task_data['task_specs']['question'],
'unit_id': unit_details['unit_id'],
'task_start': unit_details['task_start'],
}
onboarding = task_data['task_specs'].get('is_onboarding', False)
if 'speakerChoice' not in task_data or task_data['speakerChoice'] == '':
print('speakerChoice not in task data!')
return
choice = task_data['speakerChoice']
if onboarding:
response['correct'] = choice == task_data['pairing_dict']['correct_answer']
else:
response['correct'] = -1
speakers_to_eval = sorted(task_data["pairing_dict"]["speakers_to_eval"])
response.update(
{
'winner': choice,
'loser': speakers_to_eval[1 - (speakers_to_eval.index(choice))],
'eval_choice_0': speakers_to_eval[0],
'eval_choice_1': speakers_to_eval[1],
'reason': task_data['textReason'],
'is_onboarding': onboarding,
'matchup': f"{'__vs__'.join(speakers_to_eval)}",
'pairing_id': task_data['pair_id'],
}
)
# If it exists, add in which checkboxes of possible reasons the Turkers checked
if len(task_data.get('speakerReasons', {})) > 0:
response.update(
{
self.checkbox_prefix + reason: checked
for reason, checked in task_data['speakerReasons'].items()
}
)
return response
def _parse_unit(self, unit: MephistoUnit) -> Optional[Dict[str, Any]]:
"""
Return data for a given unit.
If the data is corrupt for whatever reason, we return None
:param unit:
MephistoUnit of what should be a completed task by a worker
:return data:
Optional dict with the task's formatted data
"""
try:
return self.mephisto_data_browser.get_data_from_unit(unit)
except AssertionError:
print(
f"WARNING: Data for run_id `{self.run_id}` not found for "
f"unit id {unit.db_id}"
)
return None
def _extract_to_dataframe(self) -> pd.DataFrame:
"""
Extract the data from the run to a pandas dataframe.
"""
units = self.mephisto_data_browser.get_units_for_task_name(self.run_id)
responses: List[Dict[str, Any]] = []
for unit in units:
unit_details = self._parse_unit(unit)
if unit_details is None:
continue
for idx in range(len(unit_details['data'])):
response = self._extract_response_by_index(unit_details, idx)
if response is not None:
responses.append(response)
if len(responses) == 0:
raise ValueError('No valid results found!')
else:
return pd.DataFrame(responses)
def _check_eval_question(self):
"""
Check that the same eval question has been used for all results.
"""
if len(set(self.dataframe['question'].unique())) > 1:
raise ValueError(
'All results must share the same eval question for consistency!'
)
def _remove_failed_onboarding(self):
"""
Remove workers who failed onboarding.
"""
df = self.dataframe
all_workers_failing_onboarding = df.loc[
df['is_onboarding'] & (df['correct'] == False), 'worker' # noqa: E712
].values
workers_failing_onboarding = sorted(
np.unique(all_workers_failing_onboarding).tolist()
)
self.dataframe = df[
~df["worker"].isin(workers_failing_onboarding) & ~df["is_onboarding"]
]
print(
f'{self.dataframe.size:d} dataframe entries remaining after removing users who failed onboarding.'
)
def _load_pairing_files(self):
df = self.dataframe
if not os.path.exists(self.pairings_filepath):
print('No valid pairings filepath was passed in: will extract likely path.')
self.pairings_filepath = get_hashed_combo_path(
root_dir=self.root_dir,
subdir='pairings_files',
task=self.task,
combos=self.combos,
)
if not os.path.exists(self.pairings_filepath):
print(
f'WARNING: Pairings filepath {self.pairings_filepath} could not be found.'
)
self.pairings_filepath = os.path.join(
self.root_dir,
'pairings_files',
hashlib.sha1(
'___vs___'.join(
[f"{m}.{'q'.replace(':', '_')}" for m in self.models]
).encode('utf-8')
).hexdigest()[:10],
)
if not os.path.exists(self.pairings_filepath):
# For backward compatibility
print(
f'WARNING: Pairings filepath {self.pairings_filepath} could not be found.'
)
self.pairings_filepath = os.path.join(
self.root_dir,
'pairings_files',
'___vs___'.join(
[f"{m}.{self.task.replace(':', '_')}" for m in self.models]
),
)
if not os.path.exists(self.pairings_filepath):
print(
f'NOTE: Pairings filepath {self.pairings_filepath} could not be found!'
)
return
self.pairings = []
with open(self.pairings_filepath, 'r') as f:
for line in f:
pair = json.loads(line)
model1, model2 = pair['speakers_to_eval']
pair[model1] = pair['dialogue_dicts'][0]
pair[model2] = pair['dialogue_dicts'][1]
del pair['dialogue_dicts']
self.pairings.append(pair)
self.pairs_to_eval = [self.pairings[i] for i in df.pairing_id.values.tolist()]
# Build dialogue_ids => dialogue mappings
winner_dialogues = []
loser_dialogues = []
for i, (_, row) in enumerate(df.iterrows()):
winner = row['winner']
loser = row['loser']
winner_dialogues.append(self.pairs_to_eval[i][winner])
loser_dialogues.append(self.pairs_to_eval[i][loser])
df['pairs_to_eval'] = pd.Series(self.pairs_to_eval, index=df.index)
df['winner_dialogue'] = pd.Series(winner_dialogues, index=df.index)
df['loser_dialogue'] = pd.Series(loser_dialogues, index=df.index)
self.dataframe = df
def _get_model_nick_names(self):
df = self.dataframe
df = df[df['run_id'] == self.run_id]
matchups = list(df.matchup.unique())
models = set()
combos = set()
for matchup in matchups:
model1, model2 = matchup.split('__vs__')
models.add(model1)
models.add(model2)
combos.add(tuple(sorted((model1, model2))))
self.models = list(models)
self.models.sort()
self.combos = list(combos)
self.combos.sort()
def get_reasons(self) -> List[str]:
"""
Return dataframe reasons.
"""
return self.dataframe['reason'].values.tolist()
def get_max_hits_per_worker(self) -> List[int]:
"""
Get max number of hits per worker.
"""
return self.dataframe.groupby('worker')['run_id'].count().max()
def get_wins_per_model_matchup(self) -> pd.DataFrame:
"""
Return the wins for each model by matchup.
"""
self.matchup_total_df = (
self.dataframe.groupby(['eval_choice_0', 'eval_choice_1'])['run_id']
.count()
.to_frame('matchup_total')
)
self.win_total_df = (
self.dataframe.groupby(
['eval_choice_0', 'eval_choice_1', 'winner', 'loser']
)['loser']
.count()
.to_frame('win_total')
.reset_index()
.set_index(['eval_choice_0', 'eval_choice_1'])
)
return self.win_total_df
def get_win_fractions(self) -> pd.DataFrame:
"""
Return the joined matchup + win totals, get win fractions.
Sorted according to win percentage
"""
if not hasattr(self, 'win_total_df'):
self.get_wins_per_model_matchup()
self.win_fraction_df = self.matchup_total_df.join(self.win_total_df).assign(
win_frac=lambda df: df['win_total'] / df['matchup_total']
)
pivoted_df = self.win_fraction_df.pivot(
index="loser", columns="winner", values="win_frac"
)
if self.custom_model_ordering is not None:
# Use the ordering of the models supplied by the user
assert set(self.custom_model_ordering) == set(pivoted_df.columns)
self.model_ordering = self.custom_model_ordering
else:
self.model_ordering = (
self.win_fraction_df.groupby("winner")["win_frac"]
.mean()
.sort_values()
.index.values.tolist()
)
self.sorted_win_frac_df = pivoted_df.reindex(
index=self.model_ordering, columns=self.model_ordering
)
return self.sorted_win_frac_df
def get_num_hits_per_matchup(self):
"""
Return the number of hits per matchup.
"""
matchup_total_1_df = self.matchup_total_df.reset_index()
matchup_total_2_df = matchup_total_1_df.rename(
columns={'eval_choice_0': 'eval_choice_1', 'eval_choice_1': 'eval_choice_0'}
)
self.num_hits_per_matchup_df = (
pd.concat([matchup_total_1_df, matchup_total_2_df], axis=0)
.pivot(
index='eval_choice_0', columns='eval_choice_1', values='matchup_total'
)
.reindex(index=self.model_ordering, columns=self.model_ordering)
)
return self.num_hits_per_matchup_df
def _compile_checkbox_stats(self) -> Dict[str, pd.DataFrame]:
"""
Return the fraction of time that Turkers selected each checkbox.
Results are cut both (1) by matchup and winner and (2) by just the winner. Each
checkbox represents one reason that the Turkers could have chosen the speaker
that they did.
"""
checkbox_columns = [
col
for col in self.dataframe.columns
if col.startswith(self.checkbox_prefix)
]
group_column_types = {
'matchup_and_winner': ['matchup', 'winner'],
'winner': ['winner'],
}
grouped_dataframes = {}
for group_type, group_columns in group_column_types.items():
selected_columns = (
self.dataframe[group_columns + checkbox_columns]
.rename(
columns={
col: col[len(self.checkbox_prefix) :]
for col in checkbox_columns
}
)
.set_index(group_columns)
.fillna(False)
)
grouped_dataframes[group_type] = selected_columns.groupby(
group_columns
).mean()
return grouped_dataframes
def _compile_convos_and_reasons(self) -> str:
"""
Create a human-readable string of all pairs of conversations, as well as which
conversation each Turker chose and their reason for choosing it.
"""
pairing_outputs = []
for _, pairing_sr in self.dataframe.iterrows():
winning_dialogue = self._dialogue_to_string(
pairing_sr['winner_dialogue']['dialogue']
)
loser_dialogue = self._dialogue_to_string(
pairing_sr['loser_dialogue']['dialogue']
)
pairing_output = f"""CONVO PAIR ID: {pairing_sr['pairing_id']}
WINNING DIALOGUE: {pairing_sr['winner']}
{winning_dialogue}
LOSING DIALOGUE: {pairing_sr['loser']}
{loser_dialogue}
QUESTION: {pairing_sr['question']}
TURKER'S CHOICE: {pairing_sr['winner']}
REASON: {pairing_sr['reason']}
"""
pairing_outputs.append(pairing_output)
return ''.join(pairing_outputs)
@staticmethod
def _dialogue_to_string(dialogue: List[dict]) -> str:
"""
Convert a list of dictionaries into a human-readable conversation.
Each dictionary represents one utterance.
"""
utterance_strings = []
for utterance_dict in dialogue:
if utterance_dict["id"] == "human_evaluator":
speaker_string = "HUMAN"
else:
speaker_string = utterance_dict["id"]
utterance = utterance_dict["text"]
utterance_strings.append(f"[{speaker_string}]: {utterance}")
return "\n".join(utterance_strings)
def get_matchup_totals_with_significance(self) -> pd.DataFrame:
"""
Return dataframe with matchup win totals + significance.
"""
def _signf_level(p):
if p < 0.001:
return "***", "p<.001"
elif p < 0.01:
return "**", "p<.01"
elif p < 0.05:
return "*", "p<.05"
else:
return "", "p>.05"
output = []
for _, run_annotations in self.dataframe.groupby('run_id'):
question = list(run_annotations.question)[0]
for matchup, annotations in run_annotations.groupby('matchup'):
model1, model2 = matchup.split('__vs__')
wincount1 = np.sum(annotations['winner'] == model1)
wincount2 = np.sum(annotations['winner'] == model2)
numratings = wincount1 + wincount2
winrate1 = np.mean(annotations['winner'] == model1)
winrate2 = np.mean(annotations['winner'] == model2)
p = binom_test([wincount1, wincount2])
stars, plevel = _signf_level(p)
agreements = []
for _, pairing_annotations in annotations.groupby('pairing_id'):
pair_wincount1 = np.sum(pairing_annotations['winner'] == model1)
pair_wincount2 = np.sum(pairing_annotations['winner'] == model2)
if pair_wincount1 < 2 and pair_wincount2 < 2:
if pair_wincount1 == 1 and pair_wincount2 == 1:
agreements.append(0)
else:
majority_wincount = max(pair_wincount1, pair_wincount2)
num_pair_annotations = pair_wincount1 + pair_wincount2
pair_agreement = majority_wincount / num_pair_annotations
agreements.append(pair_agreement)
total_agreement = np.mean(agreements)
output.append(
{
'question': question,
'matchup': matchup,
'model1': model1,
'model2': model2,
'numwins1': wincount1,
'numwins2': wincount2,
'winrate1': winrate1,
'winrate2': winrate2,
'numratings': numratings,
'p': p,
'stars': stars,
'sigf': plevel,
'agree': total_agreement,
}
)
output = pd.DataFrame(output)
# order the columns how we want
self.significance_df = output[
[
'question',
'matchup',
'model1',
'numwins1',
'winrate1',
'model2',
'numwins2',
'winrate2',
'numratings',
'sigf',
'stars',
'p',
'agree',
]
]
return self.significance_df
def save_results(self, path: str = None):
"""
Save results to a certain path.
"""
if not hasattr(self, 'significance_df'):
self.get_matchup_totals_with_significance()
if path is None:
path = self.outdir
# Save raw dataframe
self.dataframe.to_csv(f'{path}/{self.run_id}.full.csv', index=False)
with open('{}/{}.significance.csv'.format(path, self.run_id), 'w') as f:
f.write(self.significance_df.to_csv(index=False))
print(
'To visualize significance result, try cat {} | column -t -s, | less -S'.format(
'{}/{}.significance.csv'.format(path, self.run_id)
)
)
with open('{}/{}.grid.csv'.format(path, self.run_id), 'w') as f:
f.write(self.get_win_fractions().to_csv(index=True))
with open(f'{path}/{self.run_id}.grid.winners_as_rows.csv', 'w') as f:
f.write(self.get_win_fractions().transpose().to_csv(index=True))
print(
'To visualize grid result, try cat {} | column -t -s, | less -S'.format(
'{}/{}.grid.csv'.format(path, self.run_id)
)
)
# Save stats on how many ratings each worker did
ratings_per_worker = (
self.dataframe.groupby('worker')['run_id']
.count()
.sort_values(ascending=False)
)
ratings_per_worker.to_csv(f'{path}/{self.run_id}.ratings_per_worker.csv')
# Save stats on how often Turkers selected each checkbox that represents one
# reason to pick the speaker they did
if any(col.startswith(self.checkbox_prefix) for col in self.dataframe.columns):
checkbox_stats_dataframes = self._compile_checkbox_stats()
for group_type, stats in checkbox_stats_dataframes.items():
stats.to_csv(f'{path}/{self.run_id}.checkbox_stats.{group_type}.csv')
if not hasattr(self, 'pairings'):
print('No pairing file found, skipping conversation visualizations.')
else:
with open('{}/{}.reason.html'.format(path, self.run_id), 'w') as f:
f.write(render_conversations_per_matchups(self.dataframe, True).data)
print(
'To visualize conversations with reasons only result, '
'try scp username@devfair:{} to your local machine'.format(
' {}/{}.reason.html'.format(path, self.run_id)
)
)
with open('{}/{}.all.html'.format(path, self.run_id), 'w') as f:
f.write(render_conversations_per_matchups(self.dataframe, False).data)
print(
'To visualize conversations result, try scp username@devfair:{}'
' to your local machine'.format(
'{}/{}.all.html'.format(path, self.run_id)
)
)
# Write all pairs of dialogues, as well as the Turkers' choices and reasons, as
# a text file
compiled_text = self._compile_convos_and_reasons()
with open(f'{path}/{self.run_id}.all_convo_pairs.txt', 'w') as f:
f.write(compiled_text)
class MultiRunAcuteAnalyzer(AcuteAnalyzer):
"""
Combine results from different ACUTE-Eval runs.
"""
def __init__(self, opt: Dict, dataframes: Dict[str, pd.DataFrame]):
"""
Read in and combine the dataframes of other already-analyzed ACUTE-Eval runs.
"""
self.outdir = opt['outdir']
if opt.get('model_ordering') is not None:
self.custom_model_ordering = opt['model_ordering'].split(',')
else:
self.custom_model_ordering = None
self.run_id = 'combined'
self.checkbox_prefix = self.CHECKBOX_PREFIX
# Prepended to checkbox columns in self.dataframe
for dataframe in dataframes.values():
dataframe.loc[:, 'run_id'] = self.run_id
# Overwrite the run_id so that results will combine across runs
self.dataframe = pd.concat(dataframes.values(), axis=0)
# Check that all results across all runs share the same eval question
self._check_eval_question()
def get_multi_run_analyzer(opt) -> MultiRunAcuteAnalyzer:
"""
Return an object to analyze the results of multiple runs simultaneously.
Load HITs from each run into a separate dataframe, and then pass all dataframes into
a separate analyzer class that will concatenate them.
"""
run_ids = opt['run_ids'].split(',')
# Define paths
assert (
opt['outdir'] is not None
), '--outdir must be specified when combining results of multiple runs!'
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
opt['outdir'] = os.path.join(opt['outdir'], f'combined_runs_{timestamp}')
os.makedirs(opt['outdir'], exist_ok=True)
run_id_list_path = os.path.join(opt['outdir'], 'run_ids.txt')
# Save a simple list of all run IDs stitched together
with open(run_id_list_path, 'w') as f:
for run_id in run_ids:
f.write(run_id + '\n')
# Loop loading HITs over all run ids into dataframes
dataframes = {}
for run_id in run_ids:
print(f'\nStarting to load HITs for run ID {run_id}.')
opt_copy = deepcopy(opt)
opt_copy['run_ids'] = run_id
dataframes[run_id] = AcuteAnalyzer(opt_copy).dataframe
return MultiRunAcuteAnalyzer(opt=opt, dataframes=dataframes)
def render_row(row):
result = []
for i, turn in enumerate(row['winner_dialogue']['dialogue']):
speakername = turn['id']
text = turn['text']
is_bot = (speakername != 'human_evaluator') and (speakername != 'other_speaker')
if i > 2 and is_bot:
speakername = 'bot'
align = 'right' if is_bot else 'left'
color = "white" if is_bot else "black"
bgcolor = '#2391f7' if is_bot else '#e1e1e7'
result.append(
(
'<div style="overflow: auto; padding: 1ex 0;">'
'<div style="clear: both; float: {}; color: {}; background-color: {}; padding: 0.5em 1em; border-radius: 1em; max-width: 80%">'
'<p style="margin: 0">{}: {}</p>'
'</div>'
'</div>'
).format(align, color, bgcolor, speakername, text)
)
winner_dialogue = (
'<div style="background-color: white; margin: 0em; padding: 0.5em; '
'font-family: sans-serif; font-size: 9pt; width: 99%;">'
+ ''.join(result)
+ '</div>'
)
result = []
for i, turn in enumerate(row['loser_dialogue']['dialogue']):
speakername = turn['id']
is_bot = (speakername != 'human_evaluator') and (speakername != 'other_speaker')
if i > 2 and is_bot:
speakername = 'bot'
text = turn['text']
align = 'right' if is_bot else 'left'
color = "white" if is_bot else "black"
bgcolor = '#2391f7' if is_bot else '#e1e1e7'
result.append(
(
'<div style="overflow: auto; padding: 1ex 0;">'
'<div style="clear: both; float: {}; color: {}; background-color: {}; padding: 0.5em 1em; border-radius: 1em; max-width: 80%">'
'<p style="margin: 0">{}: {}</p>'
'</div>'
'</div>'
).format(align, color, bgcolor, speakername, text)
)
loser_dialogue = (
'<div style="background-color: white; margin: 0em; padding: 0.5em; '
'font-family: sans-serif; font-size: 9pt; width: 99%;">'
+ ''.join(result)
+ '</div>'
)
return HTML(
'<tr><td>{}</td><td>{}</td><td>{}</td></tr>'.format(
winner_dialogue, loser_dialogue, row['reason']
)
)
def render_many_conversations(table):
return HTML(
'<table><tr><th>Winner Conversation</th><th>Loser Conversation</th><th>Reason</th></tr>{}</table>'.format(
''.join(render_row(row).data for i, row in table.iterrows())
)
)
def render_conversations_per_matchups(table, force_reasons=True):
matchups = list(table.matchup.unique())
result = ''
if force_reasons:
table = table[table['reason'] != '']
for matchup in matchups:
length = min(10, len(table[table['matchup'] == matchup]))
result += '<h2>{}</h2><body>{}</body>'.format(
matchup,
render_many_conversations(table[table['matchup'] == matchup][:length]).data,
)
return HTML(result)
if __name__ == "__main__":
parser = setup_args()
opt_ = parser.parse_args()
if ',' not in opt_['run_ids']:
analyzer = AcuteAnalyzer(opt_)
else:
analyzer = get_multi_run_analyzer(opt_)
analyzer.save_results()
# Print win fractions
results = pd.DataFrame(analyzer.get_win_fractions())
print(results.round(2).to_string())
# Print matchup totals with significance
result_ = pd.DataFrame(analyzer.get_matchup_totals_with_significance())
result_ = result_.drop(columns=['matchup', 'agree'])
print(result_.round(2).to_string())
Fix (#4413)
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
ACUTE-Eval Analyzer.
FOR ANALYSIS!!
"""
import hashlib
import json
import os
from copy import deepcopy
from datetime import datetime
from typing import Dict, Any, List, Optional
import numpy as np
import pandas as pd
from IPython.core.display import HTML
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.data_model.unit import Unit as MephistoUnit
from mephisto.data_model.worker import Worker
from scipy.stats import binom_test
from parlai.core.params import ParlaiParser
from parlai.crowdsourcing.tasks.acute_eval.acute_eval_blueprint import (
BLUEPRINT_TYPE as ACUTE_EVAL_BLUEPRINT_TYPE,
)
from parlai.crowdsourcing.tasks.acute_eval.fast_acute_blueprint import (
FAST_ACUTE_BLUEPRINT_TYPE,
)
# To register the ACUTE-Eval and Fast ACUTE blueprints
from parlai.crowdsourcing.tasks.acute_eval.util import get_hashed_combo_path
_ = ACUTE_EVAL_BLUEPRINT_TYPE
_ = FAST_ACUTE_BLUEPRINT_TYPE
# TODO: blueprint type strings need to be imported here to register the blueprints -
# find a better way to scale up when there are many more subclassed ACUTE blueprints
# throw away turkers below this threshold
AGREEMENT_THRESHOLD = 0.8
# do we count ties as agreements?
AGREEMENT_TIES_OKAY = False
# NOTE: these could be added as flags if desired
def setup_args():
"""
Setup appropriate args.
"""
parser = ParlaiParser(False, False)
parser.add_argument(
'-ids',
'--run-ids',
type=str,
default=None,
help='Comma-separated list of run IDs to analyze',
)
parser.add_argument(
'--root-dir',
type=str,
default=None,
help='Optional root ACUTE-Eval save directory',
)
parser.add_argument(
'--outdir', type=str, default=None, help='Where to save the results'
)
parser.add_argument(
'--pairings-filepath',
type=str,
default=None,
help='Path to the ACUTE analysis pairs for the corresponding run id',
)
parser.add_argument(
'--mephisto-root',
type=str,
default=None,
help='Where to check for mephisto data (default own dir)',
)
parser.add_argument(
'--model-ordering',
type=str,
default=None,
help='Comma-separated list of models, in the order in which to display them',
)
return parser
class AcuteAnalyzer(object):
"""
Analyzer.
Given a run_id, we can do lots of fun things!
"""
CHECKBOX_PREFIX = 'checkbox: '
# Prepended to checkbox columns in self.dataframe
def __init__(self, opt: Dict, remove_failed: bool = True):
"""
Initialize the analyzer.
Builds up the dataframe
:param opt:
opt dict
:param remove_failed:
Whether to remove ratings from turkers who failed onboarding
"""
assert ',' not in opt['run_ids'], "AcuteAnalyzer can only handle one run ID!"
self.run_id = opt['run_ids']
self.pairings_filepath = opt['pairings_filepath']
self.outdir = opt['outdir']
self.root_dir = opt['root_dir']
# Get task for loading pairing files
self.task = opt.get('task', 'q')
if opt.get('model_ordering') is not None:
self.custom_model_ordering = opt['model_ordering'].split(',')
else:
self.custom_model_ordering = None
if not self.outdir or not self.pairings_filepath:
# Default to using self.root_dir as the root directory for outputs
assert self.root_dir is not None and os.path.isdir(
self.root_dir
), '--root-dir must be a real directory!'
if not self.pairings_filepath:
# Will be set to a non-empty path later
self.pairings_filepath = ''
if not self.outdir:
self.outdir = os.path.join(self.root_dir, f'{self.run_id}-results')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir, exist_ok=True)
mephisto_root_path = opt['mephisto_root']
if not mephisto_root_path:
mephisto_root_path = None
self.mephisto_db = LocalMephistoDB(database_path=mephisto_root_path)
self.mephisto_data_browser = MephistoDataBrowser(db=self.mephisto_db)
self.checkbox_prefix = self.CHECKBOX_PREFIX
# Prepended to checkbox columns in self.dataframe
self.dataframe = self._extract_to_dataframe()
self._check_eval_question()
if remove_failed:
self._remove_failed_onboarding()
if self.dataframe.index.size == 0:
raise ValueError('No valid results found!')
self._get_model_nick_names()
self._load_pairing_files()
def _extract_response_by_index(
self, unit_details: Dict[str, Any], idx: int
) -> Optional[Dict[str, Any]]:
"""
Extract response data from task data.
:param unit_details:
full extracted data from a unit
:param idx:
index of the singular evaluation within unit_details to extract
:return response:
Formatted worker's response data from the task
"""
task_data = unit_details['data'][idx]
response: Dict[str, Any] = {
'run_id': self.run_id,
'worker': unit_details['worker_id'],
'worker_name': Worker.get(
self.mephisto_db, unit_details['worker_id']
).worker_name,
'time_taken': unit_details['task_end'] - unit_details['task_start'],
'question': task_data['task_specs']['question'],
'unit_id': unit_details['unit_id'],
'task_start': unit_details['task_start'],
}
onboarding = task_data['task_specs'].get('is_onboarding', False)
if 'speakerChoice' not in task_data or task_data['speakerChoice'] == '':
print('speakerChoice not in task data!')
return
choice = task_data['speakerChoice']
if onboarding:
response['correct'] = choice == task_data['pairing_dict']['correct_answer']
else:
response['correct'] = -1
speakers_to_eval = sorted(task_data["pairing_dict"]["speakers_to_eval"])
response.update(
{
'winner': choice,
'loser': speakers_to_eval[1 - (speakers_to_eval.index(choice))],
'eval_choice_0': speakers_to_eval[0],
'eval_choice_1': speakers_to_eval[1],
'reason': task_data['textReason'],
'is_onboarding': onboarding,
'matchup': f"{'__vs__'.join(speakers_to_eval)}",
'pairing_id': task_data['pair_id'],
}
)
# If it exists, add in which checkboxes of possible reasons the Turkers checked
if len(task_data.get('speakerReasons', {})) > 0:
response.update(
{
self.checkbox_prefix + reason: checked
for reason, checked in task_data['speakerReasons'].items()
}
)
return response
def _parse_unit(self, unit: MephistoUnit) -> Optional[Dict[str, Any]]:
"""
Return data for a given unit.
If the data is corrupt for whatever reason, we return None
:param unit:
MephistoUnit of what should be a completed task by a worker
:return data:
Optional dict with the task's formatted data
"""
try:
return self.mephisto_data_browser.get_data_from_unit(unit)
except AssertionError:
print(
f"WARNING: Data for run_id `{self.run_id}` not found for "
f"unit id {unit.db_id}"
)
return None
def _extract_to_dataframe(self) -> pd.DataFrame:
"""
Extract the data from the run to a pandas dataframe.
"""
units = self.mephisto_data_browser.get_units_for_task_name(self.run_id)
responses: List[Dict[str, Any]] = []
for unit in units:
unit_details = self._parse_unit(unit)
if unit_details is None:
continue
for idx in range(len(unit_details['data'])):
response = self._extract_response_by_index(unit_details, idx)
if response is not None:
responses.append(response)
if len(responses) == 0:
raise ValueError('No valid results found!')
else:
return pd.DataFrame(responses)
def _check_eval_question(self):
"""
Check that the same eval question has been used for all results.
"""
if len(set(self.dataframe['question'].unique())) > 1:
raise ValueError(
'All results must share the same eval question for consistency!'
)
def _remove_failed_onboarding(self):
"""
Remove workers who failed onboarding.
"""
df = self.dataframe
all_workers_failing_onboarding = df.loc[
df['is_onboarding'] & (df['correct'] == False), 'worker' # noqa: E712
].values
workers_failing_onboarding = sorted(
np.unique(all_workers_failing_onboarding).tolist()
)
self.dataframe = df[
~df["worker"].isin(workers_failing_onboarding) & ~df["is_onboarding"]
]
print(
f'{self.dataframe.index.size:d} dataframe entries remaining after removing users who failed onboarding.'
)
def _load_pairing_files(self):
df = self.dataframe
if not os.path.exists(self.pairings_filepath):
print('No valid pairings filepath was passed in: will extract likely path.')
self.pairings_filepath = get_hashed_combo_path(
root_dir=self.root_dir,
subdir='pairings_files',
task=self.task,
combos=self.combos,
)
if not os.path.exists(self.pairings_filepath):
print(
f'WARNING: Pairings filepath {self.pairings_filepath} could not be found.'
)
self.pairings_filepath = os.path.join(
self.root_dir,
'pairings_files',
hashlib.sha1(
'___vs___'.join(
[f"{m}.{'q'.replace(':', '_')}" for m in self.models]
).encode('utf-8')
).hexdigest()[:10],
)
if not os.path.exists(self.pairings_filepath):
# For backward compatibility
print(
f'WARNING: Pairings filepath {self.pairings_filepath} could not be found.'
)
self.pairings_filepath = os.path.join(
self.root_dir,
'pairings_files',
'___vs___'.join(
[f"{m}.{self.task.replace(':', '_')}" for m in self.models]
),
)
if not os.path.exists(self.pairings_filepath):
print(
f'NOTE: Pairings filepath {self.pairings_filepath} could not be found!'
)
return
self.pairings = []
with open(self.pairings_filepath, 'r') as f:
for line in f:
pair = json.loads(line)
model1, model2 = pair['speakers_to_eval']
pair[model1] = pair['dialogue_dicts'][0]
pair[model2] = pair['dialogue_dicts'][1]
del pair['dialogue_dicts']
self.pairings.append(pair)
self.pairs_to_eval = [self.pairings[i] for i in df.pairing_id.values.tolist()]
# Build dialogue_ids => dialogue mappings
winner_dialogues = []
loser_dialogues = []
for i, (_, row) in enumerate(df.iterrows()):
winner = row['winner']
loser = row['loser']
winner_dialogues.append(self.pairs_to_eval[i][winner])
loser_dialogues.append(self.pairs_to_eval[i][loser])
df['pairs_to_eval'] = pd.Series(self.pairs_to_eval, index=df.index)
df['winner_dialogue'] = pd.Series(winner_dialogues, index=df.index)
df['loser_dialogue'] = pd.Series(loser_dialogues, index=df.index)
self.dataframe = df
def _get_model_nick_names(self):
df = self.dataframe
df = df[df['run_id'] == self.run_id]
matchups = list(df.matchup.unique())
models = set()
combos = set()
for matchup in matchups:
model1, model2 = matchup.split('__vs__')
models.add(model1)
models.add(model2)
combos.add(tuple(sorted((model1, model2))))
self.models = list(models)
self.models.sort()
self.combos = list(combos)
self.combos.sort()
def get_reasons(self) -> List[str]:
"""
Return dataframe reasons.
"""
return self.dataframe['reason'].values.tolist()
def get_max_hits_per_worker(self) -> List[int]:
"""
Get max number of hits per worker.
"""
return self.dataframe.groupby('worker')['run_id'].count().max()
def get_wins_per_model_matchup(self) -> pd.DataFrame:
"""
Return the wins for each model by matchup.
"""
self.matchup_total_df = (
self.dataframe.groupby(['eval_choice_0', 'eval_choice_1'])['run_id']
.count()
.to_frame('matchup_total')
)
self.win_total_df = (
self.dataframe.groupby(
['eval_choice_0', 'eval_choice_1', 'winner', 'loser']
)['loser']
.count()
.to_frame('win_total')
.reset_index()
.set_index(['eval_choice_0', 'eval_choice_1'])
)
return self.win_total_df
def get_win_fractions(self) -> pd.DataFrame:
"""
Return the joined matchup + win totals, get win fractions.
Sorted according to win percentage
"""
if not hasattr(self, 'win_total_df'):
self.get_wins_per_model_matchup()
self.win_fraction_df = self.matchup_total_df.join(self.win_total_df).assign(
win_frac=lambda df: df['win_total'] / df['matchup_total']
)
pivoted_df = self.win_fraction_df.pivot(
index="loser", columns="winner", values="win_frac"
)
if self.custom_model_ordering is not None:
# Use the ordering of the models supplied by the user
assert set(self.custom_model_ordering) == set(pivoted_df.columns)
self.model_ordering = self.custom_model_ordering
else:
self.model_ordering = (
self.win_fraction_df.groupby("winner")["win_frac"]
.mean()
.sort_values()
.index.values.tolist()
)
self.sorted_win_frac_df = pivoted_df.reindex(
index=self.model_ordering, columns=self.model_ordering
)
return self.sorted_win_frac_df
def get_num_hits_per_matchup(self):
"""
Return the number of hits per matchup.
"""
matchup_total_1_df = self.matchup_total_df.reset_index()
matchup_total_2_df = matchup_total_1_df.rename(
columns={'eval_choice_0': 'eval_choice_1', 'eval_choice_1': 'eval_choice_0'}
)
self.num_hits_per_matchup_df = (
pd.concat([matchup_total_1_df, matchup_total_2_df], axis=0)
.pivot(
index='eval_choice_0', columns='eval_choice_1', values='matchup_total'
)
.reindex(index=self.model_ordering, columns=self.model_ordering)
)
return self.num_hits_per_matchup_df
def _compile_checkbox_stats(self) -> Dict[str, pd.DataFrame]:
"""
Return the fraction of time that Turkers selected each checkbox.
Results are cut both (1) by matchup and winner and (2) by just the winner. Each
checkbox represents one reason that the Turkers could have chosen the speaker
that they did.
"""
checkbox_columns = [
col
for col in self.dataframe.columns
if col.startswith(self.checkbox_prefix)
]
group_column_types = {
'matchup_and_winner': ['matchup', 'winner'],
'winner': ['winner'],
}
grouped_dataframes = {}
for group_type, group_columns in group_column_types.items():
selected_columns = (
self.dataframe[group_columns + checkbox_columns]
.rename(
columns={
col: col[len(self.checkbox_prefix) :]
for col in checkbox_columns
}
)
.set_index(group_columns)
.fillna(False)
)
grouped_dataframes[group_type] = selected_columns.groupby(
group_columns
).mean()
return grouped_dataframes
def _compile_convos_and_reasons(self) -> str:
"""
Create a human-readable string of all pairs of conversations, as well as which
conversation each Turker chose and their reason for choosing it.
"""
pairing_outputs = []
for _, pairing_sr in self.dataframe.iterrows():
winning_dialogue = self._dialogue_to_string(
pairing_sr['winner_dialogue']['dialogue']
)
loser_dialogue = self._dialogue_to_string(
pairing_sr['loser_dialogue']['dialogue']
)
pairing_output = f"""CONVO PAIR ID: {pairing_sr['pairing_id']}
WINNING DIALOGUE: {pairing_sr['winner']}
{winning_dialogue}
LOSING DIALOGUE: {pairing_sr['loser']}
{loser_dialogue}
QUESTION: {pairing_sr['question']}
TURKER'S CHOICE: {pairing_sr['winner']}
REASON: {pairing_sr['reason']}
"""
pairing_outputs.append(pairing_output)
return ''.join(pairing_outputs)
@staticmethod
def _dialogue_to_string(dialogue: List[dict]) -> str:
"""
Convert a list of dictionaries into a human-readable conversation.
Each dictionary represents one utterance.
"""
utterance_strings = []
for utterance_dict in dialogue:
if utterance_dict["id"] == "human_evaluator":
speaker_string = "HUMAN"
else:
speaker_string = utterance_dict["id"]
utterance = utterance_dict["text"]
utterance_strings.append(f"[{speaker_string}]: {utterance}")
return "\n".join(utterance_strings)
def get_matchup_totals_with_significance(self) -> pd.DataFrame:
"""
Return dataframe with matchup win totals + significance.
"""
def _signf_level(p):
if p < 0.001:
return "***", "p<.001"
elif p < 0.01:
return "**", "p<.01"
elif p < 0.05:
return "*", "p<.05"
else:
return "", "p>.05"
output = []
for _, run_annotations in self.dataframe.groupby('run_id'):
question = list(run_annotations.question)[0]
for matchup, annotations in run_annotations.groupby('matchup'):
model1, model2 = matchup.split('__vs__')
wincount1 = np.sum(annotations['winner'] == model1)
wincount2 = np.sum(annotations['winner'] == model2)
numratings = wincount1 + wincount2
winrate1 = np.mean(annotations['winner'] == model1)
winrate2 = np.mean(annotations['winner'] == model2)
p = binom_test([wincount1, wincount2])
stars, plevel = _signf_level(p)
agreements = []
for _, pairing_annotations in annotations.groupby('pairing_id'):
pair_wincount1 = np.sum(pairing_annotations['winner'] == model1)
pair_wincount2 = np.sum(pairing_annotations['winner'] == model2)
if pair_wincount1 < 2 and pair_wincount2 < 2:
if pair_wincount1 == 1 and pair_wincount2 == 1:
agreements.append(0)
else:
majority_wincount = max(pair_wincount1, pair_wincount2)
num_pair_annotations = pair_wincount1 + pair_wincount2
pair_agreement = majority_wincount / num_pair_annotations
agreements.append(pair_agreement)
total_agreement = np.mean(agreements)
output.append(
{
'question': question,
'matchup': matchup,
'model1': model1,
'model2': model2,
'numwins1': wincount1,
'numwins2': wincount2,
'winrate1': winrate1,
'winrate2': winrate2,
'numratings': numratings,
'p': p,
'stars': stars,
'sigf': plevel,
'agree': total_agreement,
}
)
output = pd.DataFrame(output)
# order the columns how we want
self.significance_df = output[
[
'question',
'matchup',
'model1',
'numwins1',
'winrate1',
'model2',
'numwins2',
'winrate2',
'numratings',
'sigf',
'stars',
'p',
'agree',
]
]
return self.significance_df
def save_results(self, path: str = None):
"""
Save results to a certain path.
"""
if not hasattr(self, 'significance_df'):
self.get_matchup_totals_with_significance()
if path is None:
path = self.outdir
# Save raw dataframe
self.dataframe.to_csv(f'{path}/{self.run_id}.full.csv', index=False)
with open('{}/{}.significance.csv'.format(path, self.run_id), 'w') as f:
f.write(self.significance_df.to_csv(index=False))
print(
'To visualize significance result, try cat {} | column -t -s, | less -S'.format(
'{}/{}.significance.csv'.format(path, self.run_id)
)
)
with open('{}/{}.grid.csv'.format(path, self.run_id), 'w') as f:
f.write(self.get_win_fractions().to_csv(index=True))
with open(f'{path}/{self.run_id}.grid.winners_as_rows.csv', 'w') as f:
f.write(self.get_win_fractions().transpose().to_csv(index=True))
print(
'To visualize grid result, try cat {} | column -t -s, | less -S'.format(
'{}/{}.grid.csv'.format(path, self.run_id)
)
)
# Save stats on how many ratings each worker did
ratings_per_worker = (
self.dataframe.groupby('worker')['run_id']
.count()
.sort_values(ascending=False)
)
ratings_per_worker.to_csv(f'{path}/{self.run_id}.ratings_per_worker.csv')
# Save stats on how often Turkers selected each checkbox that represents one
# reason to pick the speaker they did
if any(col.startswith(self.checkbox_prefix) for col in self.dataframe.columns):
checkbox_stats_dataframes = self._compile_checkbox_stats()
for group_type, stats in checkbox_stats_dataframes.items():
stats.to_csv(f'{path}/{self.run_id}.checkbox_stats.{group_type}.csv')
if not hasattr(self, 'pairings'):
print('No pairing file found, skipping conversation visualizations.')
else:
with open('{}/{}.reason.html'.format(path, self.run_id), 'w') as f:
f.write(render_conversations_per_matchups(self.dataframe, True).data)
print(
'To visualize conversations with reasons only result, '
'try scp username@devfair:{} to your local machine'.format(
' {}/{}.reason.html'.format(path, self.run_id)
)
)
with open('{}/{}.all.html'.format(path, self.run_id), 'w') as f:
f.write(render_conversations_per_matchups(self.dataframe, False).data)
print(
'To visualize conversations result, try scp username@devfair:{}'
' to your local machine'.format(
'{}/{}.all.html'.format(path, self.run_id)
)
)
# Write all pairs of dialogues, as well as the Turkers' choices and reasons, as
# a text file
compiled_text = self._compile_convos_and_reasons()
with open(f'{path}/{self.run_id}.all_convo_pairs.txt', 'w') as f:
f.write(compiled_text)
class MultiRunAcuteAnalyzer(AcuteAnalyzer):
"""
Combine results from different ACUTE-Eval runs.
"""
def __init__(self, opt: Dict, dataframes: Dict[str, pd.DataFrame]):
"""
Read in and combine the dataframes of other already-analyzed ACUTE-Eval runs.
"""
self.outdir = opt['outdir']
if opt.get('model_ordering') is not None:
self.custom_model_ordering = opt['model_ordering'].split(',')
else:
self.custom_model_ordering = None
self.run_id = 'combined'
self.checkbox_prefix = self.CHECKBOX_PREFIX
# Prepended to checkbox columns in self.dataframe
for dataframe in dataframes.values():
dataframe.loc[:, 'run_id'] = self.run_id
# Overwrite the run_id so that results will combine across runs
self.dataframe = pd.concat(dataframes.values(), axis=0)
# Check that all results across all runs share the same eval question
self._check_eval_question()
def get_multi_run_analyzer(opt) -> MultiRunAcuteAnalyzer:
"""
Return an object to analyze the results of multiple runs simultaneously.
Load HITs from each run into a separate dataframe, and then pass all dataframes into
a separate analyzer class that will concatenate them.
"""
run_ids = opt['run_ids'].split(',')
# Define paths
assert (
opt['outdir'] is not None
), '--outdir must be specified when combining results of multiple runs!'
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
opt['outdir'] = os.path.join(opt['outdir'], f'combined_runs_{timestamp}')
os.makedirs(opt['outdir'], exist_ok=True)
run_id_list_path = os.path.join(opt['outdir'], 'run_ids.txt')
# Save a simple list of all run IDs stitched together
with open(run_id_list_path, 'w') as f:
for run_id in run_ids:
f.write(run_id + '\n')
# Loop loading HITs over all run ids into dataframes
dataframes = {}
for run_id in run_ids:
print(f'\nStarting to load HITs for run ID {run_id}.')
opt_copy = deepcopy(opt)
opt_copy['run_ids'] = run_id
dataframes[run_id] = AcuteAnalyzer(opt_copy).dataframe
return MultiRunAcuteAnalyzer(opt=opt, dataframes=dataframes)
def render_row(row):
result = []
for i, turn in enumerate(row['winner_dialogue']['dialogue']):
speakername = turn['id']
text = turn['text']
is_bot = (speakername != 'human_evaluator') and (speakername != 'other_speaker')
if i > 2 and is_bot:
speakername = 'bot'
align = 'right' if is_bot else 'left'
color = "white" if is_bot else "black"
bgcolor = '#2391f7' if is_bot else '#e1e1e7'
result.append(
(
'<div style="overflow: auto; padding: 1ex 0;">'
'<div style="clear: both; float: {}; color: {}; background-color: {}; padding: 0.5em 1em; border-radius: 1em; max-width: 80%">'
'<p style="margin: 0">{}: {}</p>'
'</div>'
'</div>'
).format(align, color, bgcolor, speakername, text)
)
winner_dialogue = (
'<div style="background-color: white; margin: 0em; padding: 0.5em; '
'font-family: sans-serif; font-size: 9pt; width: 99%;">'
+ ''.join(result)
+ '</div>'
)
result = []
for i, turn in enumerate(row['loser_dialogue']['dialogue']):
speakername = turn['id']
is_bot = (speakername != 'human_evaluator') and (speakername != 'other_speaker')
if i > 2 and is_bot:
speakername = 'bot'
text = turn['text']
align = 'right' if is_bot else 'left'
color = "white" if is_bot else "black"
bgcolor = '#2391f7' if is_bot else '#e1e1e7'
result.append(
(
'<div style="overflow: auto; padding: 1ex 0;">'
'<div style="clear: both; float: {}; color: {}; background-color: {}; padding: 0.5em 1em; border-radius: 1em; max-width: 80%">'
'<p style="margin: 0">{}: {}</p>'
'</div>'
'</div>'
).format(align, color, bgcolor, speakername, text)
)
loser_dialogue = (
'<div style="background-color: white; margin: 0em; padding: 0.5em; '
'font-family: sans-serif; font-size: 9pt; width: 99%;">'
+ ''.join(result)
+ '</div>'
)
return HTML(
'<tr><td>{}</td><td>{}</td><td>{}</td></tr>'.format(
winner_dialogue, loser_dialogue, row['reason']
)
)
def render_many_conversations(table):
return HTML(
'<table><tr><th>Winner Conversation</th><th>Loser Conversation</th><th>Reason</th></tr>{}</table>'.format(
''.join(render_row(row).data for i, row in table.iterrows())
)
)
def render_conversations_per_matchups(table, force_reasons=True):
matchups = list(table.matchup.unique())
result = ''
if force_reasons:
table = table[table['reason'] != '']
for matchup in matchups:
length = min(10, len(table[table['matchup'] == matchup]))
result += '<h2>{}</h2><body>{}</body>'.format(
matchup,
render_many_conversations(table[table['matchup'] == matchup][:length]).data,
)
return HTML(result)
if __name__ == "__main__":
parser = setup_args()
opt_ = parser.parse_args()
if ',' not in opt_['run_ids']:
analyzer = AcuteAnalyzer(opt_)
else:
analyzer = get_multi_run_analyzer(opt_)
analyzer.save_results()
# Print win fractions
results = pd.DataFrame(analyzer.get_win_fractions())
print(results.round(2).to_string())
# Print matchup totals with significance
result_ = pd.DataFrame(analyzer.get_matchup_totals_with_significance())
result_ = result_.drop(columns=['matchup', 'agree'])
print(result_.round(2).to_string())
|
#!/usr/bin/env AFDKOPython
# encoding: UTF-8
from __future__ import division, absolute_import, print_function, unicode_literals
import os, collections, itertools, re
import WriteFeaturesKernFDK, WriteFeaturesMarkFDK
import hindkit as kit
class BaseFeature(kit.BaseFile):
def __init__(self, project, name, style, filename_group):
if style:
abstract_directory = style.abstract_directory
else:
abstract_directory = kit.Project.directories["features"]
super(BaseFeature, self).__init__(
name,
file_format = "FEA",
project = project,
filename_group = filename_group,
abstract_directory = abstract_directory,
)
self.style = style
@staticmethod
def sort_names(names, order):
return (
[i for i in order if i in names] +
[i for i in names if i not in order]
)
@staticmethod
def compose_glyph_class_def_lines(class_name, glyph_names):
if glyph_names:
glyph_class_def_lines = (
["@{} = [".format(class_name)] +
[" {}".format(glyph_name) for glyph_name in glyph_names] +
["];", ""]
)
else:
glyph_class_def_lines = ["# @{} = [];".format(class_name), ""]
return glyph_class_def_lines
class FeatureClasses(BaseFeature):
def generate(self):
lines = []
if self.project.options["prepare_mark_positioning"]:
glyph_classes = []
glyph_classes.extend([(WriteFeaturesMarkFDK.kCombMarksClassName, kit.filters.marks)])
if self.project.options["match_mI_variants"]:
glyph_classes.extend([
(FeatureMatches.CLASS_NAME_mI_VARIANTS, kit.filters.mI_variants),
(FeatureMatches.CLASS_NAME_BASES_ALIVE, kit.filters.bases_alive),
(FeatureMatches.CLASS_NAME_BASES_DEAD, kit.filters.bases_dead),
(FeatureMatches.CLASS_NAME_BASES_FOR_LONG_mI, kit.filters.bases_for_long_mII),
])
font_0 = self.project.products[0].style.open()
glyph_order = self.project.glyph_data.glyph_order
for class_name, filter_function in glyph_classes:
glyph_names = [
glyph.name for glyph in filter(
lambda glyph: filter_function(self.project.family, glyph),
font_0,
)
]
glyph_names = self.sort_names(glyph_names, glyph_order)
font_0.groups.update({class_name: glyph_names})
lines.extend(
self.compose_glyph_class_def_lines(class_name, glyph_names)
)
font_0.save()
for style in (i.style for i in self.project.products[1:]):
font = style.open()
font.groups.update(font_0.groups)
font.save()
if lines:
with open(self.path, "w") as f:
f.writelines(i + "\n" for i in lines)
class FeatureTables(BaseFeature):
def generate(self):
info = self.project.family.info
client = self.project.family.client
lines = []
tables = collections.OrderedDict([
("hhea", []),
("OS/2", []),
("GDEF", []),
("name", []),
])
tables["OS/2"].append("include (WeightClass.fea);")
tables["OS/2"].append("fsType {};".format(client.tables["OS/2"]["fsType"]))
unicode_range_bits = set(
i for i in
[self.project.family.script.unicode_range_bit] +
self.project.options["additional_unicode_range_bits"]
if i
)
if unicode_range_bits:
tables["OS/2"].append(
"UnicodeRange {};".format(
" ".join(str(i) for i in sorted(unicode_range_bits))
)
)
vender_id = client.tables["OS/2"]["Vendor"]
if vender_id:
tables["OS/2"].append("Vendor \"{}\";".format(vender_id))
set_vertical_metrics = False
for field in (
info.openTypeHheaAscender,
info.openTypeHheaDescender,
info.openTypeHheaLineGap,
info.openTypeOS2TypoAscender,
info.openTypeOS2TypoDescender,
info.openTypeOS2TypoLineGap,
info.openTypeOS2WinAscent,
info.openTypeOS2WinDescent,
):
if field is not None:
set_vertical_metrics = True
break
if set_vertical_metrics:
if info.unitsPerEm is None:
raise SystemExit("`family.info.unitsPerEm` is unavailable.")
if info.openTypeHheaAscender is None:
info.openTypeHheaAscender = 800
if info.openTypeHheaDescender is None:
info.openTypeHheaDescender = -200
if info.openTypeHheaLineGap is None:
info.openTypeHheaLineGap = 0
if client.vertical_metrics_strategy == "Google Fonts":
if info.openTypeOS2TypoAscender is None:
info.openTypeOS2TypoAscender = info.openTypeHheaAscender
if info.openTypeOS2TypoDescender is None:
info.openTypeOS2TypoDescender = info.openTypeHheaDescender
if info.openTypeOS2TypoLineGap is None:
info.openTypeOS2TypoLineGap = info.openTypeHheaLineGap
elif client.vertical_metrics_strategy == "ITF":
extra_height = info.openTypeHheaAscender - info.openTypeHheaDescender - info.unitsPerEm
if info.openTypeOS2TypoAscender is None:
info.openTypeOS2TypoAscender = info.openTypeHheaAscender - int(round(extra_height / 2))
if info.openTypeOS2TypoDescender is None:
info.openTypeOS2TypoDescender = info.openTypeOS2TypoAscender - info.unitsPerEm
if info.openTypeOS2TypoLineGap is None:
info.openTypeOS2TypoLineGap = info.openTypeHheaLineGap + extra_height
if info.openTypeOS2WinAscent is None:
info.openTypeOS2WinAscent = info.openTypeHheaAscender
if info.openTypeOS2WinDescent is None:
info.openTypeOS2WinDescent = abs(info.openTypeHheaDescender)
tables["hhea"].extend([
"Ascender {};".format(info.openTypeHheaAscender),
"Descender {};".format(info.openTypeHheaDescender),
"LineGap {};".format(info.openTypeHheaLineGap),
])
tables["OS/2"].extend([
"TypoAscender {};".format(info.openTypeOS2TypoAscender),
"TypoDescender {};".format(info.openTypeOS2TypoDescender),
"TypoLineGap {};".format(info.openTypeOS2TypoLineGap),
"winAscent {};".format(info.openTypeOS2WinAscent),
"winDescent {};".format(info.openTypeOS2WinDescent),
])
code_pages = set(
i for i in self.project.options["additional_code_pages"] if i
)
if code_pages:
tables["OS/2"].append(
"CodePageRange {};".format(
" ".join(str(i) for i in sorted(code_pages))
)
)
if self.project.options["override_GDEF"]:
GDEF_records = {
"bases": "",
"ligatures": "",
"marks": "",
"components": "",
}
if self.project.options["prepare_mark_positioning"] or os.path.exists(os.path.join(self.directory, "classes.fea")):
GDEF_records["marks"] = "@{}".format(WriteFeaturesMarkFDK.kCombMarksClassName)
if os.path.exists(os.path.join(self.directory, "classes_suffixing.fea")):
GDEF_records["marks"] = "@{}".format("COMBINING_MARKS_GDEF")
tables["GDEF"].extend([
"GlyphClassDef {bases}, {ligatures}, {marks}, {components};".format(**GDEF_records)
])
tables["name"].extend(
"nameid {} \"{}\";".format(
name_id,
content.encode("unicode_escape").replace("\\x", "\\00").replace("\\u", "\\")
)
for name_id, content in sorted(client.tables["name"].items())
if content
)
for name, entries in tables.items():
if entries:
lines.append("table %s {" % name)
lines.extend(" " + i for i in entries)
lines.append("} %s;" % name)
if lines:
with open(self.path, "w") as f:
f.writelines(i + "\n" for i in lines)
class FeatureLanguagesystems(BaseFeature):
def generate(self):
lines = ["languagesystem DFLT dflt;"]
for tag in self.project.family.script.tags:
lines.append("languagesystem {} dflt;".format(tag))
if lines:
with open(self.path, "w") as f:
f.writelines(i + "\n" for i in lines)
class FeatureMark(BaseFeature):
def generate(self):
WriteFeaturesMarkFDK.kMarkFeatureFileName = self.filename_with_extension
WriteFeaturesMarkFDK.MarkDataClass(
font = self.style.open(),
folderPath = self.style.directory,
trimCasingTags = False,
genMkmkFeature = self.project.options["prepare_mark_to_mark_positioning"],
writeClassesFile = True,
indianScriptsFormat = self.project.family.script.is_indic,
)
class FeatureKern(BaseFeature):
def generate(self):
WriteFeaturesKernFDK.kKernFeatureFileName = self.filename_with_extension
WriteFeaturesKernFDK.KernDataClass(
font = self.style.open(),
folderPath = self.style.directory,
)
try:
self.project.postprocess_kerning
except AttributeError:
pass
else:
kern_path = self.path
if os.path.exists(kern_path):
with open(kern_path) as f:
content = f.read()
with open(kern_path, "w") as f:
f.write(self.project.postprocess_kerning(content))
class FeatureWeightClass(BaseFeature):
def generate(self):
with open(self.path, "w") as f:
f.write("WeightClass {};\n".format(str(self.style.weight_class)))
class FeatureMatches(BaseFeature):
class Base(object):
def __init__(self, feature, base_glyph_sequence):
self.glyphs = base_glyph_sequence
self.target = None
for g in self.glyphs:
if self.target is None:
self.target = feature._get_stem_position(g)
else:
self.target += g.width
class Match(object):
def __init__(self, feature, mI_variant_name):
self.name = mI_variant_name
if self.name:
self.mI_variant = feature.font[self.name]
self.number = self.mI_variant.name.partition(".")[2]
self.overhanging = abs(self.mI_variant.rightMargin)
self.bases = []
# POTENTIAL_MODES = [
# "single glyph", "glyph sequence",
# "position marks", "ligate marks",
# ]
CLASS_NAME_mI_VARIANTS = "mI_VARIANTS"
CLASS_NAME_BASES_ALIVE = "BASES_ALIVE"
CLASS_NAME_BASES_DEAD = "BASES_DEAD"
CLASS_NAME_BASES_FOR_LONG_mI = "BASES_FOR_LONG_mI"
CONSONANTS_ALIVE = [
i + "A" for i in kit.constants.CONSONANT_STEMS
] + "GAbar JAbar DDAbar BAbar ZHA YAheavy DDAmarwari".split()
CONSONANTS_DEAD = kit.constants.CONSONANT_STEMS
mI_NAME_STEM = "mI"
BASE_NAMES_ALIVE = None
BASE_NAMES_DEAD = None
mI_ANCHOR_NAME = "abvm.i"
def __init__(self, project, name, style, filename_group):
super(FeatureMatches, self).__init__(project, name, style, filename_group)
self._bases_alive = None
self._bases_dead = None
def generate(self):
self.font = self.style.open()
self.matches = [self.Match(self, i) for i in self.font.groups[self.CLASS_NAME_mI_VARIANTS]]
if not self.matches:
raise ValueError("[WARNING] No variants for mI.")
self.not_matched = self.Match(self, None)
abvm_position_in_mE = self._get_abvm_position(
self.font[self.style.family.script.abbr + "mE"],
in_base = False,
)
if abvm_position_in_mE is None:
raise SystemExit("[WARNING] Can't find the abvm anchor in glyph `mE`!")
else:
self.abvm_right_margin = abs(abvm_position_in_mE)
self.bases = [self.Base(self, i) for i in self._base_glyph_sequences()]
if not self.bases:
raise ValueError("[WARNING] No bases.")
self.adjustment_extremes = self._get_adjustment_extremes()
if self.adjustment_extremes:
targets = [base.target for base in self.bases]
target_min = min(targets)
target_max = max(targets)
for i, target in enumerate(targets):
# print("Old:", target, end=", ")
ratio = (target - target_min) / (target_max - target_min)
ae = self.adjustment_extremes
adjustment = ae[0] + (ae[-1] - ae[0]) * ratio
targets[i] += adjustment
# print("New:", targets[i], end="; ")
print()
self.tolerance = self._get_stem_position(
self.font[self.style.family.script.abbr + "VA"]
) * 0.5
for base in self.bases:
match = self.match_mI_variants(base)
match.bases.append(base)
self.name_default = self.style.family.script.abbr + self.mI_NAME_STEM
self.substitute_rule_lines = []
for match in self.matches:
self.output_mI_variant_matches(match)
with open(self.path, "w") as f:
f.writelines([
"lookup %s {\n" % self.name,
# " lookupflag IgnoreMarks;\n",
])
f.writelines(" " + l + "\n" for l in self.substitute_rule_lines)
f.writelines([
# " lookupflag 0;\n",
"} %s;\n" % self.name,
])
if self.project.options["position_marks_for_mI_variants"] and \
self.project.options["match_mI_variants"] == "single":
self.output_mark_positioning_for_mI_variants()
def _get_adjustment_extremes(self):
try:
light, bold = self.project.adjustment_for_matching_mI_variants
except AttributeError:
return None
else:
light_min, light_max = light
bold_min, bold_max = bold
axis_start = self.project.family.masters[0].weight_location
axis_end = self.project.family.masters[-1].weight_location
axis_range = axis_end - axis_start
if axis_range == 0:
ratio = 1
else:
ratio = (self.style.weight_location - axis_start) / axis_range
return (
light_min + (bold_min - light_min) * ratio,
light_max + (bold_max - light_max) * ratio,
)
def _get_abvm_position(self, glyph, in_base=True):
anchor_name_prefix = "" if in_base else "_"
for potential_anchor_name in ["abvm.candra", "abvm.e", "abvm"]:
for anchor in glyph.anchors:
if anchor.name == anchor_name_prefix + potential_anchor_name:
return anchor.x
def _get_stem_position(self, glyph):
abvm_position = self._get_abvm_position(glyph)
if abvm_position is None:
return glyph.width - self.abvm_right_margin
else:
return abvm_position
@property
def bases_alive(self):
if self._bases_alive is None:
base_names = kit.fallback(
self.BASE_NAMES_ALIVE,
self.font.groups[self.CLASS_NAME_BASES_ALIVE],
)
return [self.font[i] for i in base_names]
else:
return self._bases_alive
@bases_alive.setter
def bases_alive(self, value):
self._bases_alive = value
@property
def bases_dead(self):
if self._bases_dead is None:
base_names = kit.fallback(
self.BASE_NAMES_DEAD,
self.font.groups[self.CLASS_NAME_BASES_DEAD],
)
return [self.font[i] for i in base_names]
else:
return self._bases_dead
@bases_dead.setter
def bases_dead(self, value):
self._bases_dead = value
def _base_glyph_sequences(self):
LENGTH = 2
bases_alive = self.bases_alive
if self.project.options["match_mI_variants"] == "single":
bases_dead = [None]
elif self.project.options["match_mI_variants"] == "sequence":
bases_dead = [None] + self.bases_dead
else:
raise ValueError("[WARNING] Project.options[\"match_mI_variants\"] is not set to \"single\" or \"sequence\".")
seeds = [bases_dead] * (LENGTH - 1) + [bases_alive]
for raw_sequence in itertools.product(*seeds):
sequence = [i for i in raw_sequence if i is not None]
if sequence:
yield sequence
def match_mI_variants(self, base):
if base.target <= self.matches[0].overhanging:
return self.matches[0]
elif base.target < self.matches[-1].overhanging:
i = 0
while self.matches[i].overhanging < base.target:
candidate_short = self.matches[i]
i += 1
candidate_enough = self.matches[i]
if (
abs(candidate_enough.overhanging - base.target) <
abs(candidate_short.overhanging - base.target)
):
return candidate_enough
else:
return candidate_short
elif base.target <= self.matches[-1].overhanging + self.tolerance:
return self.matches[-1]
else:
return self.not_matched
def output_mI_variant_matches(self, match):
if not match.bases:
print("\t\t`{}` is not used.".format(match.name))
self.substitute_rule_lines.append(
"# sub {}' _ by {};".format(self.name_default, match.name),
)
return
single_glyph_bases = []
multiple_glyph_bases = []
for base in match.bases:
if len(base.glyphs) == 1:
single_glyph_bases.append(base)
else:
multiple_glyph_bases.append(base)
if single_glyph_bases:
self.substitute_rule_lines.append(
"sub {}' [{}] by {};".format(
self.name_default,
" ".join(i.glyphs[0].name for i in single_glyph_bases),
match.name,
),
)
for base in multiple_glyph_bases:
self.substitute_rule_lines.append(
"sub {}' {} by {};".format(
self.name_default,
" ".join(i.name for i in base.glyphs),
match.name,
),
)
def output_mark_positioning_for_mI_variants(self):
abvm_backup_path = os.path.join(
self.style.directory,
"backup--" + WriteFeaturesMarkFDK.kAbvmFeatureFileName,
)
abvm_path = os.path.join(
self.style.directory,
WriteFeaturesMarkFDK.kAbvmFeatureFileName,
)
if os.path.exists(abvm_backup_path):
kit.copy(abvm_backup_path, abvm_path)
else:
kit.copy(abvm_path, abvm_backup_path)
pattern_begin = re.compile(r"lookup MARK_BASE_%s \{$" % self.mI_ANCHOR_NAME)
pattern_end = re.compile(r"\} MARK_BASE_%s;$" % self.mI_ANCHOR_NAME)
match_dict = {match.number: match for match in self.matches}
def _modify(matchobj):
match = match_dict[matchobj.group(1)]
if match.bases:
prefix = ""
names = "[{}]".format(" ".join(i.glyphs[0].name for i in match.bases))
else:
prefix = "# "
names = "_"
x = int(matchobj.group(2))
x_with_offset = x - self.matches[0].mI_variant.width
return "{}pos base {} <anchor {}".format(prefix, names, x_with_offset)
with open(abvm_path, "r") as f:
lines_modified = []
is_inside_the_lookup = False
for line in f:
if is_inside_the_lookup:
if pattern_end.match(line):
is_inside_the_lookup = False
line_modified = line
else:
line_modified = re.sub(
r"pos base {}\.(\d\d) <anchor (-?\d+)".format(self.name_default),
_modify,
line,
)
else:
if pattern_begin.match(line):
is_inside_the_lookup = True
line_modified = line
else:
line_modified = line
lines_modified.append(line_modified)
with open(abvm_path, "w") as f:
f.writelines(lines_modified)
class FeatureReferences(BaseFeature):
def generate(self):
with open(self.path, "w") as f:
lines = ["table head { FontRevision 1.000; } head;"]
for feature in [
self.project.feature_classes,
self.project.feature_tables,
self.project.feature_languagesystems,
self.project.feature_gsub,
]:
for i in feature.file_group:
if os.path.exists(i.path):
lines.append(
"include ({});".format(
os.path.relpath(i.path, self.style.directory)
)
)
if os.path.exists(self.project.feature_kern.path):
lines.append(
"feature %(tag)s { include (%(path)s); } %(tag)s;" % {
"tag": "dist" if self.project.family.script.is_indic else "kern",
"path": os.path.relpath(self.project.feature_kern.path, self.style.directory),
}
)
if os.path.exists(os.path.join(self.style.directory, WriteFeaturesMarkFDK.kMarkClassesFileName)):
lines.append("include ({});".format(WriteFeaturesMarkFDK.kMarkClassesFileName))
for feature_name, filename in [
("mark", WriteFeaturesMarkFDK.kMarkFeatureFileName),
("mkmk", WriteFeaturesMarkFDK.kMkmkFeatureFileName),
("abvm", WriteFeaturesMarkFDK.kAbvmFeatureFileName),
("blwm", WriteFeaturesMarkFDK.kBlwmFeatureFileName),
]:
if os.path.exists(os.path.join(self.style.directory, filename)):
lines.append(
"feature %(tag)s { include (%(path)s); } %(tag)s;" % {
"tag": feature_name,
"path": filename,
}
)
f.writelines(i + "\n" for i in lines)
class Feature(object):
NAME_TO_CLASS_MAP = {
"classes": FeatureClasses,
"tables": FeatureTables,
"languagesystems": FeatureLanguagesystems,
"kern": FeatureKern,
"mark": FeatureMark,
"mI_variant_matches": FeatureMatches,
"WeightClass": FeatureWeightClass,
"features": FeatureReferences,
}
def __new__(cls, project, name, style=None, filename_group=None):
F = cls.NAME_TO_CLASS_MAP.get(name, BaseFeature)
return F(project, name, style, filename_group)
Compress sequence matching
#!/usr/bin/env AFDKOPython
# encoding: UTF-8
from __future__ import division, absolute_import, print_function, unicode_literals
import os, collections, itertools, re
import WriteFeaturesKernFDK, WriteFeaturesMarkFDK
import hindkit as kit
class BaseFeature(kit.BaseFile):
def __init__(self, project, name, style, filename_group):
if style:
abstract_directory = style.abstract_directory
else:
abstract_directory = kit.Project.directories["features"]
super(BaseFeature, self).__init__(
name,
file_format = "FEA",
project = project,
filename_group = filename_group,
abstract_directory = abstract_directory,
)
self.style = style
@staticmethod
def sort_names(names, order):
return (
[i for i in order if i in names] +
[i for i in names if i not in order]
)
@staticmethod
def compose_glyph_class_def_lines(class_name, glyph_names):
if glyph_names:
glyph_class_def_lines = (
["@{} = [".format(class_name)] +
[" {}".format(glyph_name) for glyph_name in glyph_names] +
["];", ""]
)
else:
glyph_class_def_lines = ["# @{} = [];".format(class_name), ""]
return glyph_class_def_lines
class FeatureClasses(BaseFeature):
def generate(self):
lines = []
if self.project.options["prepare_mark_positioning"]:
glyph_classes = []
glyph_classes.extend([(WriteFeaturesMarkFDK.kCombMarksClassName, kit.filters.marks)])
if self.project.options["match_mI_variants"]:
glyph_classes.extend([
(FeatureMatches.CLASS_NAME_mI_VARIANTS, kit.filters.mI_variants),
(FeatureMatches.CLASS_NAME_BASES_ALIVE, kit.filters.bases_alive),
(FeatureMatches.CLASS_NAME_BASES_DEAD, kit.filters.bases_dead),
(FeatureMatches.CLASS_NAME_BASES_FOR_LONG_mI, kit.filters.bases_for_long_mII),
])
font_0 = self.project.products[0].style.open()
glyph_order = self.project.glyph_data.glyph_order
for class_name, filter_function in glyph_classes:
glyph_names = [
glyph.name for glyph in filter(
lambda glyph: filter_function(self.project.family, glyph),
font_0,
)
]
glyph_names = self.sort_names(glyph_names, glyph_order)
font_0.groups.update({class_name: glyph_names})
lines.extend(
self.compose_glyph_class_def_lines(class_name, glyph_names)
)
font_0.save()
for style in (i.style for i in self.project.products[1:]):
font = style.open()
font.groups.update(font_0.groups)
font.save()
if lines:
with open(self.path, "w") as f:
f.writelines(i + "\n" for i in lines)
class FeatureTables(BaseFeature):
def generate(self):
info = self.project.family.info
client = self.project.family.client
lines = []
tables = collections.OrderedDict([
("hhea", []),
("OS/2", []),
("GDEF", []),
("name", []),
])
tables["OS/2"].append("include (WeightClass.fea);")
tables["OS/2"].append("fsType {};".format(client.tables["OS/2"]["fsType"]))
unicode_range_bits = set(
i for i in
[self.project.family.script.unicode_range_bit] +
self.project.options["additional_unicode_range_bits"]
if i
)
if unicode_range_bits:
tables["OS/2"].append(
"UnicodeRange {};".format(
" ".join(str(i) for i in sorted(unicode_range_bits))
)
)
vender_id = client.tables["OS/2"]["Vendor"]
if vender_id:
tables["OS/2"].append("Vendor \"{}\";".format(vender_id))
set_vertical_metrics = False
for field in (
info.openTypeHheaAscender,
info.openTypeHheaDescender,
info.openTypeHheaLineGap,
info.openTypeOS2TypoAscender,
info.openTypeOS2TypoDescender,
info.openTypeOS2TypoLineGap,
info.openTypeOS2WinAscent,
info.openTypeOS2WinDescent,
):
if field is not None:
set_vertical_metrics = True
break
if set_vertical_metrics:
if info.unitsPerEm is None:
raise SystemExit("`family.info.unitsPerEm` is unavailable.")
if info.openTypeHheaAscender is None:
info.openTypeHheaAscender = 800
if info.openTypeHheaDescender is None:
info.openTypeHheaDescender = -200
if info.openTypeHheaLineGap is None:
info.openTypeHheaLineGap = 0
if client.vertical_metrics_strategy == "Google Fonts":
if info.openTypeOS2TypoAscender is None:
info.openTypeOS2TypoAscender = info.openTypeHheaAscender
if info.openTypeOS2TypoDescender is None:
info.openTypeOS2TypoDescender = info.openTypeHheaDescender
if info.openTypeOS2TypoLineGap is None:
info.openTypeOS2TypoLineGap = info.openTypeHheaLineGap
elif client.vertical_metrics_strategy == "ITF":
extra_height = info.openTypeHheaAscender - info.openTypeHheaDescender - info.unitsPerEm
if info.openTypeOS2TypoAscender is None:
info.openTypeOS2TypoAscender = info.openTypeHheaAscender - int(round(extra_height / 2))
if info.openTypeOS2TypoDescender is None:
info.openTypeOS2TypoDescender = info.openTypeOS2TypoAscender - info.unitsPerEm
if info.openTypeOS2TypoLineGap is None:
info.openTypeOS2TypoLineGap = info.openTypeHheaLineGap + extra_height
if info.openTypeOS2WinAscent is None:
info.openTypeOS2WinAscent = info.openTypeHheaAscender
if info.openTypeOS2WinDescent is None:
info.openTypeOS2WinDescent = abs(info.openTypeHheaDescender)
tables["hhea"].extend([
"Ascender {};".format(info.openTypeHheaAscender),
"Descender {};".format(info.openTypeHheaDescender),
"LineGap {};".format(info.openTypeHheaLineGap),
])
tables["OS/2"].extend([
"TypoAscender {};".format(info.openTypeOS2TypoAscender),
"TypoDescender {};".format(info.openTypeOS2TypoDescender),
"TypoLineGap {};".format(info.openTypeOS2TypoLineGap),
"winAscent {};".format(info.openTypeOS2WinAscent),
"winDescent {};".format(info.openTypeOS2WinDescent),
])
code_pages = set(
i for i in self.project.options["additional_code_pages"] if i
)
if code_pages:
tables["OS/2"].append(
"CodePageRange {};".format(
" ".join(str(i) for i in sorted(code_pages))
)
)
if self.project.options["override_GDEF"]:
GDEF_records = {
"bases": "",
"ligatures": "",
"marks": "",
"components": "",
}
if self.project.options["prepare_mark_positioning"] or os.path.exists(os.path.join(self.directory, "classes.fea")):
GDEF_records["marks"] = "@{}".format(WriteFeaturesMarkFDK.kCombMarksClassName)
if os.path.exists(os.path.join(self.directory, "classes_suffixing.fea")):
GDEF_records["marks"] = "@{}".format("COMBINING_MARKS_GDEF")
tables["GDEF"].extend([
"GlyphClassDef {bases}, {ligatures}, {marks}, {components};".format(**GDEF_records)
])
tables["name"].extend(
"nameid {} \"{}\";".format(
name_id,
content.encode("unicode_escape").replace("\\x", "\\00").replace("\\u", "\\")
)
for name_id, content in sorted(client.tables["name"].items())
if content
)
for name, entries in tables.items():
if entries:
lines.append("table %s {" % name)
lines.extend(" " + i for i in entries)
lines.append("} %s;" % name)
if lines:
with open(self.path, "w") as f:
f.writelines(i + "\n" for i in lines)
class FeatureLanguagesystems(BaseFeature):
def generate(self):
lines = ["languagesystem DFLT dflt;"]
for tag in self.project.family.script.tags:
lines.append("languagesystem {} dflt;".format(tag))
if lines:
with open(self.path, "w") as f:
f.writelines(i + "\n" for i in lines)
class FeatureMark(BaseFeature):
def generate(self):
WriteFeaturesMarkFDK.kMarkFeatureFileName = self.filename_with_extension
WriteFeaturesMarkFDK.MarkDataClass(
font = self.style.open(),
folderPath = self.style.directory,
trimCasingTags = False,
genMkmkFeature = self.project.options["prepare_mark_to_mark_positioning"],
writeClassesFile = True,
indianScriptsFormat = self.project.family.script.is_indic,
)
class FeatureKern(BaseFeature):
def generate(self):
WriteFeaturesKernFDK.kKernFeatureFileName = self.filename_with_extension
WriteFeaturesKernFDK.KernDataClass(
font = self.style.open(),
folderPath = self.style.directory,
)
try:
self.project.postprocess_kerning
except AttributeError:
pass
else:
kern_path = self.path
if os.path.exists(kern_path):
with open(kern_path) as f:
content = f.read()
with open(kern_path, "w") as f:
f.write(self.project.postprocess_kerning(content))
class FeatureWeightClass(BaseFeature):
def generate(self):
with open(self.path, "w") as f:
f.write("WeightClass {};\n".format(str(self.style.weight_class)))
class FeatureMatches(BaseFeature):
class Base(object):
def __init__(self, feature, base_glyph_sequence):
self.glyphs = base_glyph_sequence
self.target = None
for g in self.glyphs:
#TODO: Kerning.
if self.target is None:
self.target = feature._get_stem_position(g)
else:
self.target += g.width
class Match(object):
def __init__(self, feature, mI_variant_name):
self.name = mI_variant_name
if self.name:
self.mI_variant = feature.font[self.name]
self.number = self.mI_variant.name.partition(".")[2]
self.overhanging = abs(self.mI_variant.rightMargin)
self.bases = []
# POTENTIAL_MODES = [
# "single glyph", "glyph sequence",
# "position marks", "ligate marks",
# ]
CLASS_NAME_mI_VARIANTS = "mI_VARIANTS"
CLASS_NAME_BASES_ALIVE = "BASES_ALIVE"
CLASS_NAME_BASES_DEAD = "BASES_DEAD"
CLASS_NAME_BASES_FOR_LONG_mI = "BASES_FOR_LONG_mI"
CONSONANTS_ALIVE = [
i + "A" for i in kit.constants.CONSONANT_STEMS
] + "GAbar JAbar DDAbar BAbar ZHA YAheavy DDAmarwari".split()
CONSONANTS_DEAD = kit.constants.CONSONANT_STEMS
mI_NAME_STEM = "mI"
BASE_NAMES_ALIVE = None
BASE_NAMES_DEAD = None
mI_ANCHOR_NAME = "abvm.i"
BASE_GLYPH_SEQUENCE_LENGTH = 2
def __init__(self, project, name, style, filename_group):
super(FeatureMatches, self).__init__(project, name, style, filename_group)
self._bases_alive = None
self._bases_dead = None
def generate(self):
self.font = self.style.open()
self.matches = [self.Match(self, i) for i in self.font.groups[self.CLASS_NAME_mI_VARIANTS]]
if not self.matches:
raise ValueError("[WARNING] No variants for mI.")
self.not_matched = self.Match(self, None)
abvm_position_in_mE = self._get_abvm_position(
self.font[self.style.family.script.abbr + "mE"],
in_base = False,
)
if abvm_position_in_mE is None:
raise SystemExit("[WARNING] Can't find the abvm anchor in glyph `mE`!")
else:
self.abvm_right_margin = abs(abvm_position_in_mE)
self.bases = [self.Base(self, i) for i in self._base_glyph_sequences()]
if not self.bases:
raise ValueError("[WARNING] No bases.")
self.adjustment_extremes = self._get_adjustment_extremes()
if self.adjustment_extremes:
targets = [base.target for base in self.bases]
target_min = min(targets)
target_max = max(targets)
for i, target in enumerate(targets):
ratio = (target - target_min) / (target_max - target_min)
ae = self.adjustment_extremes
adjustment = ae[0] + (ae[-1] - ae[0]) * ratio
targets[i] += adjustment
print()
self.tolerance = self._get_stem_position(
self.font[self.style.family.script.abbr + "VA"]
) * 0.5
for base in self.bases:
match = self.match_mI_variants(base)
match.bases.append(base)
self.name_default = self.style.family.script.abbr + self.mI_NAME_STEM
self.substitute_rule_lines = []
for match in self.matches:
self.output_mI_variant_matches(match)
with open(self.path, "w") as f:
f.writelines([
"lookup %s {\n" % self.name,
# " lookupflag IgnoreMarks;\n",
])
f.writelines(" " + l + "\n" for l in self.substitute_rule_lines)
f.writelines([
# " lookupflag 0;\n",
"} %s;\n" % self.name,
])
if self.project.options["position_marks_for_mI_variants"] and \
self.project.options["match_mI_variants"] == "single":
self.output_mark_positioning_for_mI_variants()
def _get_adjustment_extremes(self):
try:
light, bold = self.project.adjustment_for_matching_mI_variants
except AttributeError:
return None
else:
light_min, light_max = light
bold_min, bold_max = bold
axis_start = self.project.family.masters[0].weight_location
axis_end = self.project.family.masters[-1].weight_location
axis_range = axis_end - axis_start
if axis_range == 0:
ratio = 1
else:
ratio = (self.style.weight_location - axis_start) / axis_range
return (
light_min + (bold_min - light_min) * ratio,
light_max + (bold_max - light_max) * ratio,
)
def _get_abvm_position(self, glyph, in_base=True):
anchor_name_prefix = "" if in_base else "_"
for potential_anchor_name in ["abvm.candra", "abvm.e", "abvm"]:
for anchor in glyph.anchors:
if anchor.name == anchor_name_prefix + potential_anchor_name:
return anchor.x
def _get_stem_position(self, glyph):
abvm_position = self._get_abvm_position(glyph)
if abvm_position is None:
return glyph.width - self.abvm_right_margin
else:
return abvm_position
@property
def bases_alive(self):
if self._bases_alive is None:
base_names = kit.fallback(
self.BASE_NAMES_ALIVE,
self.font.groups[self.CLASS_NAME_BASES_ALIVE],
)
return [self.font[i] for i in base_names]
else:
return self._bases_alive
@bases_alive.setter
def bases_alive(self, value):
self._bases_alive = value
@property
def bases_dead(self):
if self._bases_dead is None:
base_names = kit.fallback(
self.BASE_NAMES_DEAD,
self.font.groups[self.CLASS_NAME_BASES_DEAD],
)
return [self.font[i] for i in base_names]
else:
return self._bases_dead
@bases_dead.setter
def bases_dead(self, value):
self._bases_dead = value
def _base_glyph_sequences(self):
bases_alive = self.bases_alive
if self.project.options["match_mI_variants"] == "single":
bases_dead = [None]
elif self.project.options["match_mI_variants"] == "sequence":
bases_dead = [None] + self.bases_dead
else:
raise ValueError("[WARNING] Project.options[\"match_mI_variants\"] is not set to \"single\" or \"sequence\".")
seeds = [bases_dead] * (self.BASE_GLYPH_SEQUENCE_LENGTH - 1) + [bases_alive]
for raw_sequence in itertools.product(*seeds):
sequence = [i for i in raw_sequence if i is not None]
if sequence:
yield sequence
def match_mI_variants(self, base):
if base.target <= self.matches[0].overhanging:
return self.matches[0]
elif base.target < self.matches[-1].overhanging:
i = 0
while self.matches[i].overhanging < base.target:
candidate_short = self.matches[i]
i += 1
candidate_enough = self.matches[i]
if (
abs(candidate_enough.overhanging - base.target) <
abs(candidate_short.overhanging - base.target)
):
return candidate_enough
else:
return candidate_short
elif base.target <= self.matches[-1].overhanging + self.tolerance:
return self.matches[-1]
else:
return self.not_matched
def output_mI_variant_matches(self, match):
if not match.bases:
print("\t\t`{}` is not used.".format(match.name))
self.substitute_rule_lines.append(
"# sub {}' _ by {};".format(self.name_default, match.name),
)
return
single_glyph_bases = []
multiple_glyph_bases = []
for base in match.bases:
if len(base.glyphs) == 1:
single_glyph_bases.append(base)
else:
multiple_glyph_bases.append(base)
if single_glyph_bases:
self.substitute_rule_lines.append(
"sub {}' [{}] by {};".format(
self.name_default,
" ".join(i.glyphs[0].name for i in single_glyph_bases),
match.name,
),
)
def compress(raw):
compressed = {}
for k, v in raw:
if k in compressed:
compressed[k].extend(v)
else:
compressed[k] = v
return compressed
# self.BASE_GLYPH_SEQUENCE_LENGTH = 2
compressed = compress(
(tuple(i.glyphs[:1]), i.glyphs[1:]) for i in multiple_glyph_bases
)
compressed = compress(
(tuple(v), list(k)) for k, v in compressed.items()
)
for rule in ([v, list(k)] for k, v in compressed.items()):
self.substitute_rule_lines.append(
"sub {}' {} by {};".format(
self.name_default,
" ".join(
i[0].name if len(i) == 1
else "[{}]".format(" ".join(j.name for j in i))
for i in rule
),
match.name,
),
)
def output_mark_positioning_for_mI_variants(self):
abvm_backup_path = os.path.join(
self.style.directory,
"backup--" + WriteFeaturesMarkFDK.kAbvmFeatureFileName,
)
abvm_path = os.path.join(
self.style.directory,
WriteFeaturesMarkFDK.kAbvmFeatureFileName,
)
if os.path.exists(abvm_backup_path):
kit.copy(abvm_backup_path, abvm_path)
else:
kit.copy(abvm_path, abvm_backup_path)
pattern_begin = re.compile(r"lookup MARK_BASE_%s \{$" % self.mI_ANCHOR_NAME)
pattern_end = re.compile(r"\} MARK_BASE_%s;$" % self.mI_ANCHOR_NAME)
match_dict = {match.number: match for match in self.matches}
def _modify(matchobj):
match = match_dict[matchobj.group(1)]
if match.bases:
prefix = ""
names = "[{}]".format(" ".join(i.glyphs[0].name for i in match.bases))
else:
prefix = "# "
names = "_"
x = int(matchobj.group(2))
x_with_offset = x - self.matches[0].mI_variant.width
return "{}pos base {} <anchor {}".format(prefix, names, x_with_offset)
with open(abvm_path, "r") as f:
lines_modified = []
is_inside_the_lookup = False
for line in f:
if is_inside_the_lookup:
if pattern_end.match(line):
is_inside_the_lookup = False
line_modified = line
else:
line_modified = re.sub(
r"pos base {}\.(\d\d) <anchor (-?\d+)".format(self.name_default),
_modify,
line,
)
else:
if pattern_begin.match(line):
is_inside_the_lookup = True
line_modified = line
else:
line_modified = line
lines_modified.append(line_modified)
with open(abvm_path, "w") as f:
f.writelines(lines_modified)
class FeatureReferences(BaseFeature):
def generate(self):
with open(self.path, "w") as f:
lines = ["table head { FontRevision 1.000; } head;"]
for feature in [
self.project.feature_classes,
self.project.feature_tables,
self.project.feature_languagesystems,
self.project.feature_gsub,
]:
for i in feature.file_group:
if os.path.exists(i.path):
lines.append(
"include ({});".format(
os.path.relpath(i.path, self.style.directory)
)
)
if os.path.exists(self.project.feature_kern.path):
lines.append(
"feature %(tag)s { include (%(path)s); } %(tag)s;" % {
"tag": "dist" if self.project.family.script.is_indic else "kern",
"path": os.path.relpath(self.project.feature_kern.path, self.style.directory),
}
)
if os.path.exists(os.path.join(self.style.directory, WriteFeaturesMarkFDK.kMarkClassesFileName)):
lines.append("include ({});".format(WriteFeaturesMarkFDK.kMarkClassesFileName))
for feature_name, filename in [
("mark", WriteFeaturesMarkFDK.kMarkFeatureFileName),
("mkmk", WriteFeaturesMarkFDK.kMkmkFeatureFileName),
("abvm", WriteFeaturesMarkFDK.kAbvmFeatureFileName),
("blwm", WriteFeaturesMarkFDK.kBlwmFeatureFileName),
]:
if os.path.exists(os.path.join(self.style.directory, filename)):
lines.append(
"feature %(tag)s { include (%(path)s); } %(tag)s;" % {
"tag": feature_name,
"path": filename,
}
)
f.writelines(i + "\n" for i in lines)
class Feature(object):
NAME_TO_CLASS_MAP = {
"classes": FeatureClasses,
"tables": FeatureTables,
"languagesystems": FeatureLanguagesystems,
"kern": FeatureKern,
"mark": FeatureMark,
"mI_variant_matches": FeatureMatches,
"WeightClass": FeatureWeightClass,
"features": FeatureReferences,
}
def __new__(cls, project, name, style=None, filename_group=None):
F = cls.NAME_TO_CLASS_MAP.get(name, BaseFeature)
return F(project, name, style, filename_group)
|
import logging
log = logging.getLogger(__name__)
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from qcodes.instrument.parameter import (
ManualParameter, InstrumentRefParameter)
from qcodes.utils import validators as vals
from pycqed.analysis_v2.readout_analysis import Singleshot_Readout_Analysis_Qutrit
from pycqed.measurement import detector_functions as det
from pycqed.measurement import awg_sweep_functions as awg_swf
from pycqed.measurement import awg_sweep_functions_multi_qubit as awg_swf2
from pycqed.measurement import sweep_functions as swf
from pycqed.measurement.sweep_points import SweepPoints
from pycqed.measurement.calibration_points import CalibrationPoints
from pycqed.analysis_v3.processing_pipeline import ProcessingPipeline
from pycqed.measurement.pulse_sequences import single_qubit_tek_seq_elts as sq
from pycqed.measurement.pulse_sequences import fluxing_sequences as fsqs
from pycqed.analysis_v3 import pipeline_analysis as pla
from pycqed.analysis import measurement_analysis as ma
from pycqed.analysis_v2 import timedomain_analysis as tda
import pycqed.analysis.randomized_benchmarking_analysis as rbma
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.utilities.general import add_suffix_to_dict_keys
from pycqed.utilities.general import temporary_value
from pycqed.instrument_drivers.meta_instrument.qubit_objects.qubit_object \
import Qubit
from pycqed.measurement import optimization as opti
from pycqed.measurement import mc_parameter_wrapper
import pycqed.analysis_v2.spectroscopy_analysis as sa
from pycqed.utilities import math
try:
import pycqed.simulations.readout_mode_simulations_for_CLEAR_pulse \
as sim_CLEAR
except ModuleNotFoundError:
log.warning('"readout_mode_simulations_for_CLEAR_pulse" not imported.')
class QuDev_transmon(Qubit):
def __init__(self, name, **kw):
super().__init__(name, **kw)
self.add_parameter('instr_mc',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_ge_lo',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_pulsar',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_uhf',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_ro_lo',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_trigger',
parameter_class=InstrumentRefParameter)
# device parameters for user only
# could be cleaned up
self.add_parameter('f_RO_resonator', label='RO resonator frequency',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('f_RO_purcell', label='RO purcell filter frequency',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('RO_purcell_kappa', label='Purcell filter kappa',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('RO_J_coupling', label='J coupling of RO resonator'
'and purcell filter',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('Q_RO_resonator', label='RO resonator Q factor',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('ssro_contrast', unit='arb.', initial_value=0,
label='integrated g-e trace contrast',
parameter_class=ManualParameter)
self.add_parameter('optimal_acquisition_delay', label='Optimal '
'acquisition delay', unit='s', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('T1', label='Qubit relaxation', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T1_ef', label='Qubit relaxation', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T2', label='Qubit dephasing Echo', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T2_ef', label='Qubit dephasing Echo', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T2_star', label='Qubit dephasing', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T2_star_ef', label='Qubit dephasing', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('anharmonicity', label='Qubit anharmonicity',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('dynamic_phase', label='CZ dynamic phase',
unit='deg', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('EC_qubit', label='Qubit EC', unit='Hz',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('EJ_qubit', label='Qubit EJ', unit='Hz',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('chi', unit='Hz', parameter_class=ManualParameter,
label='Chi')
# readout pulse parameters
self.add_parameter('ro_freq', unit='Hz',
parameter_class=ManualParameter,
label='Readout frequency')
self.add_parameter('ro_I_offset', unit='V', initial_value=0,
parameter_class=ManualParameter,
label='DC offset for the readout I channel')
self.add_parameter('ro_Q_offset', unit='V', initial_value=0,
parameter_class=ManualParameter,
label='DC offset for the readout Q channel')
self.add_parameter('ro_lo_power', unit='dBm',
parameter_class=ManualParameter,
label='Readout pulse upconversion mixer LO power')
self.add_operation('RO')
self.add_pulse_parameter('RO', 'ro_pulse_type', 'pulse_type',
vals=vals.Enum('GaussFilteredCosIQPulse',
'GaussFilteredCosIQPulseMultiChromatic'),
initial_value='GaussFilteredCosIQPulse')
self.add_pulse_parameter('RO', 'ro_I_channel', 'I_channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('RO', 'ro_Q_channel', 'Q_channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('RO', 'ro_amp', 'amplitude',
initial_value=0.001,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_length', 'pulse_length',
initial_value=2e-6, vals=vals.Numbers())
self.add_pulse_parameter('RO', 'ro_delay', 'pulse_delay',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('RO', 'ro_mod_freq', 'mod_frequency',
initial_value=100e6,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_phase', 'phase',
initial_value=0,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_phi_skew', 'phi_skew',
initial_value=0,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_alpha', 'alpha',
initial_value=1,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_sigma',
'gaussian_filter_sigma',
initial_value=10e-9, vals=vals.Numbers())
self.add_pulse_parameter('RO', 'ro_nr_sigma', 'nr_sigma',
initial_value=5, vals=vals.Numbers())
self.add_pulse_parameter('RO', 'ro_phase_lock', 'phase_lock',
initial_value=True, vals=vals.Bool())
self.add_pulse_parameter('RO', 'ro_basis_rotation',
'basis_rotation', initial_value={},
docstring='Dynamic phase acquired by other '
'qubits due to a measurement tone on'
' this qubit.',
label='RO pulse basis rotation dictionary',
vals=vals.Dict())
# acquisition parameters
self.add_parameter('acq_I_channel', initial_value=0,
vals=vals.Enum(0, 1, 2, 3, 4, 5, 6, 7, 8),
parameter_class=ManualParameter)
self.add_parameter('acq_Q_channel', initial_value=1,
vals=vals.Enum(0, 1, 2, 3, 4, 5, 6, 7, 8, None),
parameter_class=ManualParameter)
self.add_parameter('acq_averages', initial_value=1024,
vals=vals.Ints(0, 1000000),
parameter_class=ManualParameter)
self.add_parameter('acq_shots', initial_value=4094,
docstring='Number of single shot measurements to do'
'in single shot experiments.',
vals=vals.Ints(0, 1048576),
parameter_class=ManualParameter)
self.add_parameter('acq_length', initial_value=2.2e-6,
vals=vals.Numbers(min_value=1e-8,
max_value=4097/1.2e9),
parameter_class=ManualParameter)
self.add_parameter('acq_IQ_angle', initial_value=0,
docstring='The phase of the integration weights '
'when using SSB, DSB or square_rot '
'integration weights',
label='Acquisition IQ angle', unit='rad',
parameter_class=ManualParameter)
self.add_parameter('acq_weights_I', vals=vals.Arrays(),
label='Optimized weights for I channel',
parameter_class=ManualParameter)
self.add_parameter('acq_weights_Q', vals=vals.Arrays(),
label='Optimized weights for Q channel',
parameter_class=ManualParameter)
self.add_parameter('acq_weights_type', initial_value='SSB',
vals=vals.Enum('SSB', 'DSB', 'optimal',
'square_rot', 'manual',
'optimal_qutrit'),
docstring=(
'Determines what type of integration weights to '
'use: \n\tSSB: Single sideband demodulation\n\t'
'DSB: Double sideband demodulation\n\toptimal: '
'waveforms specified in "ro_acq_weight_func_I" '
'and "ro_acq_weight_func_Q"\n\tsquare_rot: uses '
'a single integration channel with boxcar '
'weights'),
parameter_class=ManualParameter)
self.add_parameter('acq_weights_I2', vals=vals.Arrays(),
label='Optimized weights for second integration '
'channel I',
docstring=("Used for double weighted integration "
"during qutrit readout"),
parameter_class=ManualParameter)
self.add_parameter('acq_weights_Q2', vals=vals.Arrays(),
label='Optimized weights for second integration '
'channel Q',
docstring=("Used for double weighted integration "
"during qutrit readout"),
parameter_class=ManualParameter)
self.add_parameter('acq_weights_basis', vals=vals.Lists(),
label="weight basis used",
docstring=("Used to log the weights basis for "
"integration during qutrit readout. E.g."
" ['ge', 'gf'] or ['ge', 'ortho']."),
parameter_class=ManualParameter)
self.add_parameter('acq_classifier_params', vals=vals.Dict(),
label='Parameters for the qutrit classifier.',
docstring=("Used in the int_avg_classif_det to "
"classify single shots into g, e, f."),
parameter_class=ManualParameter)
self.add_parameter('acq_state_prob_mtx', vals=vals.Arrays(),
label='SSRO correction matrix.',
docstring=("Matrix of measured vs prepared qubit "
"states."),
parameter_class=ManualParameter)
# qubit drive pulse parameters
self.add_parameter('ge_freq', label='Qubit drive frequency', unit='Hz',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('ge_lo_power', unit='dBm',
parameter_class=ManualParameter,
label='Qubit drive pulse mixer LO power')
self.add_parameter('ge_I_offset', unit='V', initial_value=0,
parameter_class=ManualParameter,
label='DC offset for the drive line I channel')
self.add_parameter('ge_Q_offset', unit='V', initial_value=0,
parameter_class=ManualParameter,
label='DC offset for the drive line Q channel')
# add drive pulse parameters
self.add_operation('X180')
self.add_pulse_parameter('X180', 'ge_pulse_type', 'pulse_type',
initial_value='SSB_DRAG_pulse',
vals=vals.Enum('SSB_DRAG_pulse'))
self.add_pulse_parameter('X180', 'ge_I_channel', 'I_channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('X180', 'ge_Q_channel', 'Q_channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('X180', 'ge_amp180', 'amplitude',
initial_value=0.001, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_amp90_scale', 'amp90_scale',
initial_value=0.5, vals=vals.Numbers(0, 1))
self.add_pulse_parameter('X180', 'ge_delay', 'pulse_delay',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_sigma', 'sigma',
initial_value=10e-9, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_nr_sigma', 'nr_sigma',
initial_value=5, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_motzoi', 'motzoi',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_mod_freq', 'mod_frequency',
initial_value=-100e6, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_phi_skew', 'phi_skew',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_alpha', 'alpha',
initial_value=1, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_X_phase', 'phase',
initial_value=0, vals=vals.Numbers())
# qubit 2nd excitation drive pulse parameters
self.add_parameter('ef_freq', label='Qubit ef drive frequency',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_operation('X180_ef')
self.add_pulse_parameter('X180_ef', 'ef_pulse_type', 'pulse_type',
initial_value='SSB_DRAG_pulse',
vals=vals.Enum('SSB_DRAG_pulse'))
self.add_pulse_parameter('X180_ef', 'ef_amp180', 'amplitude',
initial_value=0.001, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_amp90_scale', 'amp90_scale',
initial_value=0.5, vals=vals.Numbers(0, 1))
self.add_pulse_parameter('X180_ef', 'ef_delay', 'pulse_delay',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_sigma', 'sigma',
initial_value=10e-9, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_nr_sigma', 'nr_sigma',
initial_value=5, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_motzoi', 'motzoi',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_X_phase', 'phase',
initial_value=0, vals=vals.Numbers())
# add qubit spectroscopy parameters
self.add_parameter('spec_power', unit='dBm', initial_value=-20,
parameter_class=ManualParameter,
label='Qubit spectroscopy power')
self.add_operation('Spec')
self.add_pulse_parameter('Spec', 'spec_pulse_type', 'pulse_type',
initial_value='SquarePulse',
vals=vals.Enum('SquarePulse'))
self.add_pulse_parameter('Spec', 'spec_marker_channel', 'channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('Spec', 'spec_marker_amp', 'amplitude',
vals=vals.Numbers(), initial_value=1)
self.add_pulse_parameter('Spec', 'spec_marker_length', 'length',
initial_value=5e-6, vals=vals.Numbers())
self.add_pulse_parameter('Spec', 'spec_marker_delay', 'pulse_delay',
vals=vals.Numbers(), initial_value=0)
# dc flux parameters
self.add_parameter('dc_flux_parameter', initial_value=None,
label='QCoDeS parameter to sweep the dc flux',
parameter_class=ManualParameter)
# Pulse preparation parameters
DEFAULT_PREP_PARAMS = dict(preparation_type='wait',
post_ro_wait=1e-6, reset_reps=1,
final_reset_pulse=True,
threshold_mapping={
self.name: {0: 'g', 1: 'e'}})
self.add_parameter('preparation_params', parameter_class=ManualParameter,
initial_value=DEFAULT_PREP_PARAMS, vals=vals.Dict())
def get_idn(self):
return {'driver': str(self.__class__), 'name': self.name}
def update_detector_functions(self):
if self.acq_Q_channel() is None or \
self.acq_weights_type() not in ['SSB', 'DSB', 'optimal_qutrit']:
channels = [self.acq_I_channel()]
else:
channels = [self.acq_I_channel(), self.acq_Q_channel()]
self.int_log_det = det.UHFQC_integration_logging_det(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_shots=self.acq_shots(),
integration_length=self.acq_length(),
result_logging_mode='raw')
self.int_avg_classif_det = det.UHFQC_classifier_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_shots=self.acq_averages(),
integration_length=self.acq_length(),
get_values_function_kwargs={
'classifier_params': self.acq_classifier_params(),
'state_prob_mtx': self.acq_state_prob_mtx()
})
self.int_avg_det = det.UHFQC_integrated_average_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_averages=self.acq_averages(),
integration_length=self.acq_length(),
result_logging_mode='raw')
self.dig_avg_det = det.UHFQC_integrated_average_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_averages=self.acq_averages(),
integration_length=self.acq_length(),
result_logging_mode='digitized')
nr_samples = int(self.acq_length() *
self.instr_uhf.get_instr().clock_freq())
self.inp_avg_det = det.UHFQC_input_average_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
nr_averages=self.acq_averages(),
nr_samples=nr_samples)
self.dig_log_det = det.UHFQC_integration_logging_det(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_shots=self.acq_shots(),
integration_length=self.acq_length(),
result_logging_mode='digitized')
self.int_avg_det_spec = det.UHFQC_integrated_average_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_uhf.get_instr(),
channels=[self.acq_I_channel(), self.acq_Q_channel()],
nr_averages=self.acq_averages(),
integration_length=self.acq_length(),
result_logging_mode='raw', real_imag=False, single_int_avg=True)
def prepare(self, drive='timedomain'):
# configure readout local oscillators
lo = self.instr_ro_lo
if lo() is not None:
lo.get_instr().pulsemod_state('Off')
lo.get_instr().power(self.ro_lo_power())
# in case of multichromatic readout, take first ro freq, else just
# wrap the frequency in a list and take the first
if np.ndim(self.ro_freq()) == 0:
ro_freq = [self.ro_freq()]
else:
ro_freq = self.ro_freq()
if np.ndim(self.ro_mod_freq()) == 0:
ro_mod_freq = [self.ro_mod_freq()]
else:
ro_mod_freq = self.ro_mod_freq()
lo.get_instr().frequency(ro_freq[0] - ro_mod_freq[0])
lo.get_instr().on()
# configure qubit drive local oscillator
lo = self.instr_ge_lo
if lo() is not None:
if drive is None:
lo.get_instr().off()
elif drive == 'continuous_spec':
lo.get_instr().pulsemod_state('Off')
lo.get_instr().power(self.spec_power())
lo.get_instr().frequency(self.ge_freq())
lo.get_instr().on()
elif drive == 'pulsed_spec':
lo.get_instr().pulsemod_state('On')
lo.get_instr().power(self.spec_power())
lo.get_instr().frequency(self.ge_freq())
lo.get_instr().on()
elif drive == 'timedomain':
lo.get_instr().pulsemod_state('Off')
lo.get_instr().power(self.ge_lo_power())
lo.get_instr().frequency(self.ge_freq() - self.ge_mod_freq())
lo.get_instr().on()
else:
raise ValueError("Invalid drive parameter '{}'".format(drive)
+ ". Valid options are None, 'continuous_spec"
+ "', 'pulsed_spec' and 'timedomain'.")
# set awg channel dc offsets
offset_list = [('ro_I_channel', 'ro_I_offset'),
('ro_Q_channel', 'ro_Q_offset')]
if drive == 'timedomain':
offset_list += [('ge_I_channel', 'ge_I_offset'),
('ge_Q_channel', 'ge_Q_offset')]
for channel_par, offset_par in offset_list:
self.instr_pulsar.get_instr().set(
self.get(channel_par) + '_offset', self.get(offset_par))
# other preparations
self.update_detector_functions()
self.set_readout_weights()
def set_readout_weights(self, weights_type=None, f_mod=None):
if weights_type is None:
weights_type = self.acq_weights_type()
if f_mod is None:
f_mod = self.ro_mod_freq()
if weights_type == 'manual':
pass
elif weights_type == 'optimal':
if (self.acq_weights_I() is None or self.acq_weights_Q() is None):
log.warning('Optimal weights are None, not setting '
'integration weights')
return
# When optimal weights are used, only the RO I weight
# channel is used
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_real'.format(
self.acq_I_channel()), self.acq_weights_I().copy())
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_imag'.format(
self.acq_I_channel()), self.acq_weights_Q().copy())
self.instr_uhf.get_instr().set('qas_0_rotations_{}'.format(
self.acq_I_channel()), 1.0-1.0j)
elif weights_type == 'optimal_qutrit':
for w_f in [self.acq_weights_I, self.acq_weights_Q,
self.acq_weights_I2, self.acq_weights_Q2]:
if w_f() is None:
log.warning('The optimal weights {} are None. '
'\nNot setting integration weights.'
.format(w_f.name))
return
# if all weights are not None, set first integration weights (real
# and imag) on channel I amd second integration weights on channel
# Q.
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_real'.format(
self.acq_I_channel()),
self.acq_weights_I().copy())
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_imag'.format(
self.acq_I_channel()),
self.acq_weights_Q().copy())
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_real'.format(
self.acq_Q_channel()),
self.acq_weights_I2().copy())
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_imag'.format(
self.acq_Q_channel()),
self.acq_weights_Q2().copy())
self.instr_uhf.get_instr().set('qas_0_rotations_{}'.format(
self.acq_I_channel()), 1.0-1.0j)
self.instr_uhf.get_instr().set('qas_0_rotations_{}'.format(
self.acq_Q_channel()), 1.0-1.0j)
else:
tbase = np.arange(0, 4097 / 1.8e9, 1 / 1.8e9)
theta = self.acq_IQ_angle()
cosI = np.array(np.cos(2 * np.pi * f_mod * tbase + theta))
sinI = np.array(np.sin(2 * np.pi * f_mod * tbase + theta))
c1 = self.acq_I_channel()
c2 = self.acq_Q_channel()
uhf = self.instr_uhf.get_instr()
if weights_type == 'SSB':
uhf.set('qas_0_integration_weights_{}_real'.format(c1), cosI)
uhf.set('qas_0_rotations_{}'.format(c1), 1.0+1.0j)
uhf.set('qas_0_integration_weights_{}_real'.format(c2), sinI)
uhf.set('qas_0_rotations_{}'.format(c2), 1.0-1.0j)
uhf.set('qas_0_integration_weights_{}_imag'.format(c1), sinI)
uhf.set('qas_0_integration_weights_{}_imag'.format(c2), cosI)
elif weights_type == 'DSB':
uhf.set('qas_0_integration_weights_{}_real'.format(c1), cosI)
uhf.set('qas_0_rotations_{}'.format(c1), 1.0+0j)
uhf.set('qas_0_integration_weights_{}_real'.format(c2), sinI)
uhf.set('qas_0_rotations_{}'.format(c2), 1.0+0j)
elif weights_type == 'square_rot':
uhf.set('qas_0_integration_weights_{}_real'.format(c1), cosI)
uhf.set('qas_0_rotations_{}'.format(c1), 1.0+1.0j)
uhf.set('qas_0_integration_weights_{}_imag'.format(c1), sinI)
else:
raise KeyError('Invalid weights type: {}'.format(weights_type))
def get_spec_pars(self):
return self.get_operation_dict()['Spec ' + self.name]
def get_ro_pars(self):
return self.get_operation_dict()['RO ' + self.name]
def get_acq_pars(self):
return self.get_operation_dict()['Acq ' + self.name]
def get_ge_pars(self):
return self.get_operation_dict()['X180 ' + self.name]
def get_ef_pars(self):
return self.get_operation_dict()['X180_ef ' + self.name]
def get_operation_dict(self, operation_dict=None):
if operation_dict is None:
operation_dict = {}
operation_dict = super().get_operation_dict(operation_dict)
operation_dict['Spec ' + self.name]['operation_type'] = 'Other'
operation_dict['RO ' + self.name]['operation_type'] = 'RO'
operation_dict['X180 ' + self.name]['operation_type'] = 'MW'
operation_dict['X180_ef ' + self.name]['operation_type'] = 'MW'
operation_dict['X180 ' + self.name]['basis'] = self.name
operation_dict['X180_ef ' + self.name]['basis'] = self.name + \
'_ef'
operation_dict['X180_ef ' + self.name]['I_channel'] = \
operation_dict['X180 ' + self.name]['I_channel']
operation_dict['X180_ef ' + self.name]['Q_channel'] = \
operation_dict['X180 ' + self.name]['Q_channel']
operation_dict['X180_ef ' + self.name]['phi_skew'] = \
operation_dict['X180 ' + self.name]['phi_skew']
operation_dict['X180_ef ' + self.name]['alpha'] = \
operation_dict['X180 ' + self.name]['alpha']
operation_dict['Acq ' + self.name] = deepcopy(
operation_dict['RO ' + self.name])
operation_dict['Acq ' + self.name]['amplitude'] = 0
if self.ef_freq() == 0:
operation_dict['X180_ef ' + self.name]['mod_frequency'] = None
else:
operation_dict['X180_ef ' + self.name]['mod_frequency'] = \
self.ef_freq() - self.ge_freq() + self.ge_mod_freq()
operation_dict.update(add_suffix_to_dict_keys(
sq.get_pulse_dict_from_pars(
operation_dict['X180 ' + self.name]), ' ' + self.name))
operation_dict.update(add_suffix_to_dict_keys(
sq.get_pulse_dict_from_pars(
operation_dict['X180_ef ' + self.name]), '_ef ' + self.name))
if np.ndim(self.ro_freq()) != 0:
delta_freqs = np.diff(self.ro_freq(), prepend=self.ro_freq()[0])
mods = [self.ro_mod_freq() + d for d in delta_freqs]
operation_dict['RO ' + self.name]['mod_frequency'] = mods
return operation_dict
def swf_ro_freq_lo(self):
return swf.Offset_Sweep(
self.instr_ro_lo.get_instr().frequency,
-self.ro_mod_freq(),
name='Readout frequency',
parameter_name='Readout frequency')
def swf_ro_mod_freq(self):
return swf.Offset_Sweep(
self.ro_mod_freq,
self.instr_ro_lo.get_instr().frequency(),
name='Readout frequency',
parameter_name='Readout frequency')
def measure_resonator_spectroscopy(self, freqs, sweep_points_2D=None,
sweep_function_2D=None,
trigger_separation=3e-6,
upload=True, analyze=True,
close_fig=True, label=None):
""" Varies the frequency of the microwave source to the resonator and
measures the transmittance """
if np.any(freqs < 500e6):
log.warning(('Some of the values in the freqs array might be '
'too small. The units should be Hz.'))
if label is None:
if sweep_function_2D is not None:
label = 'resonator_scan_2d' + self.msmt_suffix
else:
label = 'resonator_scan' + self.msmt_suffix
self.prepare(drive=None)
if upload:
sq.pulse_list_list_seq([[self.get_ro_pars()]])
MC = self.instr_mc.get_instr()
MC.set_sweep_function(self.swf_ro_freq_lo())
if sweep_function_2D is not None:
MC.set_sweep_function_2D(sweep_function_2D)
mode = '2D'
else:
mode = '1D'
MC.set_sweep_points(freqs)
if sweep_points_2D is not None:
MC.set_sweep_points_2D(sweep_points_2D)
MC.set_detector_function(self.int_avg_det_spec)
with temporary_value(self.instr_trigger.get_instr().pulse_period,
trigger_separation):
self.instr_pulsar.get_instr().start(exclude=[self.instr_uhf()])
MC.run(name=label, mode=mode)
self.instr_pulsar.get_instr().stop()
if analyze:
ma.MeasurementAnalysis(close_fig=close_fig, qb_name=self.name,
TwoD=(mode == '2D'))
def measure_qubit_spectroscopy(self, freqs, sweep_points_2D=None,
sweep_function_2D=None, pulsed=True, trigger_separation=13e-6,
upload=True, analyze=True, close_fig=True, label=None):
""" Varies qubit drive frequency and measures the resonator
transmittance """
if np.any(freqs < 500e6):
log.warning(('Some of the values in the freqs array might be '
'too small. The units should be Hz.'))
if pulsed:
if label is None:
if sweep_function_2D is not None:
label = 'pulsed_spec_2d' + self.msmt_suffix
else:
label = 'pulsed_spec' + self.msmt_suffix
self.prepare(drive='pulsed_spec')
if upload:
sq.pulse_list_list_seq([[self.get_spec_pars(),
self.get_ro_pars()]])
else:
if label is None:
if sweep_function_2D is not None:
label = 'continuous_spec_2d' + self.msmt_suffix
else:
label = 'continuous_spec' + self.msmt_suffix
self.prepare(drive='continuous_spec')
if upload:
sq.pulse_list_list_seq([[self.get_ro_pars()]])
MC = self.instr_mc.get_instr()
MC.set_sweep_function(self.instr_ge_lo.get_instr().frequency)
if sweep_function_2D is not None:
MC.set_sweep_function_2D(sweep_function_2D)
mode = '2D'
else:
mode = '1D'
MC.set_sweep_points(freqs)
if sweep_points_2D is not None:
MC.set_sweep_points_2D(sweep_points_2D)
MC.set_detector_function(self.int_avg_det_spec)
with temporary_value(self.instr_trigger.get_instr().pulse_period,
trigger_separation):
self.instr_pulsar.get_instr().start(exclude=[self.instr_uhf()])
MC.run(name=label, mode=mode)
self.instr_pulsar.get_instr().stop()
if analyze:
ma.MeasurementAnalysis(close_fig=close_fig, qb_name=self.name,
TwoD=(mode == '2D'))
def measure_rabi(self, amps, analyze=True, upload=True, label=None, n=1,
last_ge_pulse=False, n_cal_points_per_state=2,
cal_states='auto', for_ef=False, classified_ro=False,
prep_params=None, exp_metadata=None):
"""
Varies the amplitude of the qubit drive pulse and measures the readout
resonator transmission.
Args:
amps the array of drive pulse amplitudes
analyze whether to create a (base) MeasurementAnalysis
object for this measurement; offers possibility to
manually analyse data using the classes in
measurement_analysis.py
close_fig whether or not to close the default analysis figure
cal_points whether or not to use calibration points
no_cal_points how many calibration points to use
upload whether or not to upload the sequence to the AWG
label the measurement label
n the number of times the drive pulses with the same
amplitude should be repeated in each measurement
"""
if prep_params is None:
prep_params = self.preparation_params()
# Define the measurement label
if label is None:
label = 'Rabi_ef' if for_ef else 'Rabi'
if n != 1:
label += f'-n{n}'
if classified_ro:
label += '_classified'
if 'active' in prep_params['preparation_type']:
label += '_reset'
label += self.msmt_suffix
# Prepare the physical instruments for a time domain measurement
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
cal_states = CalibrationPoints.guess_cal_states(cal_states, for_ef)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
seq, sweep_points = sq.rabi_seq_active_reset(
amps=amps, qb_name=self.name, cal_points=cp, n=n, for_ef=for_ef,
operation_dict=self.get_operation_dict(), upload=False,
last_ge_pulse=last_ge_pulse, prep_params=prep_params)
# Specify the sweep function, the sweep points,
# and the detector function, and run the measurement
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Amplitude', unit='V'))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if classified_ro else
self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: amps},
'preparation_params': prep_params,
'cal_points': repr(cp),
'rotate': False if classified_ro else
len(cp.states) != 0,
'last_ge_pulses': [last_ge_pulse],
'data_to_fit': {self.name: 'pf' if for_ef else 'pe'},
"sweep_name": "Amplitude",
"sweep_unit": "V"})
MC.run(label, exp_metadata=exp_metadata)
# Create a MeasurementAnalysis object for this measurement
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_rabi_amp90(self, scales=np.linspace(0.3, 0.7, 31), n=1,
MC=None, analyze=True, close_fig=True, upload=True):
self.prepare(drive='timedomain')
if MC is None:
MC = self.instr_mc.get_instr()
MC.set_sweep_function(awg_swf.Rabi_amp90(
pulse_pars=self.get_ge_pars(), RO_pars=self.get_ro_pars(), n=n,
upload=upload))
MC.set_sweep_points(scales)
MC.set_detector_function(self.int_avg_det)
MC.run('Rabi_amp90_scales_n{}'.format(n)+ self.msmt_suffix)
def measure_T1(self, times=None, analyze=True, upload=True,
last_ge_pulse=False, n_cal_points_per_state=2,
cal_states='auto', for_ef=False, classified_ro=False,
prep_params=None, label=None,
exp_metadata=None):
if times is None:
raise ValueError("Unspecified times for measure_T1")
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.'
'The units should be seconds.')
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
if prep_params is None:
prep_params = self.preparation_params()
# Define the measurement label
if label is None:
label = f'T1{"_ef" if for_ef else ""}' + self.msmt_suffix
cal_states = CalibrationPoints.guess_cal_states(cal_states, for_ef)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
seq, sweep_points = sq.t1_active_reset(
times=times, qb_name=self.name, cal_points=cp, for_ef=for_ef,
operation_dict=self.get_operation_dict(), upload=False,
last_ge_pulse=last_ge_pulse, prep_params=prep_params)
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Time', unit='s'))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if classified_ro else
self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: times},
'preparation_params': prep_params,
'cal_points': repr(cp),
'rotate': False if classified_ro else
len(cp.states) != 0,
'last_ge_pulses': [last_ge_pulse],
'data_to_fit': {self.name: 'pf' if for_ef else 'pe'},
"sweep_name": "Time",
"sweep_unit": "s"})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_qscale(self, qscales=None, analyze=True, upload=True, label=None,
cal_states="auto", n_cal_points_per_state=2,
last_ge_pulse=False, for_ef=False, classified_ro=False,
prep_params=None, exp_metadata=None):
if qscales is None:
raise ValueError("Unspecified qscale values for measure_qscale")
uniques = np.unique(qscales[range(3)])
if uniques.size > 1:
raise ValueError("The values in the qscales array are not repeated "
"3 times.")
if prep_params is None:
prep_params = self.preparation_params()
log.debug(f"Preparation Parameters:\n{prep_params}")
# Define the measurement label
if label is None:
label = f'Qscale{"_ef" if for_ef else ""}'
if classified_ro:
label += '_classified'
if 'active' in prep_params['preparation_type']:
label += '_reset'
label += self.msmt_suffix
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
# create cal points
cal_states = CalibrationPoints.guess_cal_states(cal_states, for_ef)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
# create sequence
seq, sweep_points = sq.qscale_active_reset(qscales=qscales,
qb_name=self.name, cal_points=cp, for_ef=for_ef,
operation_dict=self.get_operation_dict(), upload=False,
last_ge_pulse=last_ge_pulse, prep_params=prep_params)
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Qscale factor'))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if classified_ro else
self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: qscales},
'sweep_name': 'Qscale factor',
'sweep_unit': '',
'preparation_params': prep_params,
'cal_points': repr(cp),
'rotate': False if classified_ro else len(cp.states) != 0,
'last_ge_pulses': [last_ge_pulse],
'data_to_fit': {self.name: 'pf' if for_ef else 'pe'}})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_ramsey_multiple_detunings(self, times=None,
artificial_detunings=None, label='',
MC=None, analyze=True, close_fig=True,
cal_points=True, upload=True,
exp_metadata=None):
log.error("This function is deprecated, please use measure_ramsey()")
if times is None:
raise ValueError("Unspecified times for measure_ramsey")
if artificial_detunings is None:
log.warning('Artificial detuning is 0.')
uniques = np.unique(times[range(len(artificial_detunings))])
if uniques.size>1:
raise ValueError("The values in the times array are not repeated "
"len(artificial_detunings) times.")
if np.any(np.asarray(np.abs(artificial_detunings))<1e3):
log.warning('The artificial detuning is too small. The units '
'should be Hz.')
if np.any(times>1e-3):
log.warning('The values in the times array might be too large.'
'The units should be seconds.')
self.prepare(drive='timedomain')
if MC is None:
MC = self.instr_mc.get_instr()
# Define the measurement label
if label == '':
label = 'Ramsey_mult_det' + self.msmt_suffix
if cal_points:
len_art_det = len(artificial_detunings)
step = np.abs(times[-1] - times[-len_art_det-1])
sweep_points = np.concatenate(
[times, [times[-1] + step, times[-1] + 2*step,
times[-1] + 3*step, times[-1] + 4*step]])
else:
sweep_points = times
Rams_swf = awg_swf.Ramsey_multiple_detunings(
pulse_pars=self.get_ge_pars(), RO_pars=self.get_ro_pars(),
artificial_detunings=artificial_detunings, cal_points=cal_points,
upload=upload)
MC.set_sweep_function(Rams_swf)
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: sweep_points},
'use_cal_points': cal_points,
'artificial_detunings': artificial_detunings})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
ma.MeasurementAnalysis(auto=True, close_fig=close_fig,
qb_name=self.name)
def measure_ramsey_dyn_decoupling(self, times=None, artificial_detuning=0,
label='', MC=None, analyze=True,
close_fig=True, cal_points=True,
upload=True, nr_echo_pulses=4,
seq_func=None, cpmg_scheme=True,
exp_metadata=None):
if times is None:
raise ValueError("Unspecified times for measure_ramsey")
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.'
'The units should be seconds.')
if artificial_detuning is None:
log.warning('Artificial detuning is 0.')
if np.abs(artificial_detuning) < 1e3:
log.warning('The artificial detuning is too small. The units'
'should be Hz.')
if seq_func is None:
seq_func = sq.ramsey_seq
self.prepare(drive='timedomain')
if MC is None:
MC = self.instr_mc.get_instr()
# Define the measurement label
if label == '':
label = 'Ramsey' + self.msmt_suffix
if cal_points:
step = np.abs(times[-1]-times[-2])
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step,
times[-1]+3*step, times[-1]+4*step]])
else:
sweep_points = times
Rams_swf = awg_swf.Ramsey_decoupling_swf(
seq_func=seq_func,
pulse_pars=self.get_ge_pars(), RO_pars=self.get_ro_pars(),
artificial_detuning=artificial_detuning, cal_points=cal_points,
upload=upload, nr_echo_pulses=nr_echo_pulses, cpmg_scheme=cpmg_scheme)
MC.set_sweep_function(Rams_swf)
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: sweep_points},
'use_cal_points': cal_points,
'cpmg_scheme': cpmg_scheme,
'nr_echo_pulses': nr_echo_pulses,
'seq_func': seq_func,
'artificial_detuning': artificial_detuning})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
RamseyA = ma.Ramsey_Analysis(
auto=True,
label=label,
qb_name=self.name,
NoCalPoints=4,
artificial_detuning=artificial_detuning,
close_fig=close_fig)
def measure_ramsey(self, times, artificial_detunings=None, label=None,
analyze=True, close_fig=True,
cal_states="auto", n_cal_points_per_state=2,
n=1, upload=True, last_ge_pulse=False, for_ef=False,
classified_ro=False, prep_params=None, exp_metadata=None):
if prep_params is None:
prep_params = self.preparation_params()
# Define the measurement label
if label is None:
label = f'Ramsey{"_ef" if for_ef else ""}'
if classified_ro:
label += '_classified'
if 'active' in prep_params['preparation_type']:
label += '_reset'
label += self.msmt_suffix
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
# create cal points
cal_states = CalibrationPoints.guess_cal_states(cal_states, for_ef)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
# create sequence
seq, sweep_points = sq.ramsey_active_reset(
times=times, artificial_detunings=artificial_detunings,
qb_name=self.name, cal_points=cp, n=n, for_ef=for_ef,
operation_dict=self.get_operation_dict(), upload=False,
last_ge_pulse=last_ge_pulse, prep_params=prep_params)
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Delay', unit='s'))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if classified_ro else
self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update(
{'sweep_points_dict': {self.name: times},
'sweep_name': 'Delay',
'sweep_unit': 's',
'cal_points': repr(cp),
'preparation_params': prep_params,
'last_ge_pulses': [last_ge_pulse],
'artificial_detuning': artificial_detunings,
'rotate': False if classified_ro else len(cp.states) != 0,
'data_to_fit': {self.name: 'pf' if for_ef else 'pe'}})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_echo(self, times=None, artificial_detuning=None,
upload=True, analyze=True, close_fig=True, cal_points=True,
label=None, exp_metadata=None):
if times is None:
raise ValueError("Unspecified times for measure_echo")
# Define the measurement label
if label == '':
label = 'Echo' + self.msmt_suffix
if cal_points:
step = np.abs(times[-1]-times[-2])
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step,
times[-1]+3*step, times[-1]+4*step]])
cal_states_dict = {'g': [-4, -3], 'e': [-2, -1]}
cal_states_rotations = {'g': 0, 'e': 1}
else:
sweep_points = times
cal_states_dict = None
cal_states_rotations = {}
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
Echo_swf = awg_swf.Echo(
pulse_pars=self.get_ge_pars(), RO_pars=self.get_ro_pars(),
artificial_detuning=artificial_detuning, upload=upload)
MC.set_sweep_function(Echo_swf)
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if
self.acq_weights_type() == 'optimal_qutrit'
else self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: sweep_points},
'use_cal_points': cal_points,
'rotate': cal_points,
'cal_states_dict': cal_states_dict,
'cal_states_rotations': cal_states_rotations if
self.acq_weights_type() != 'optimal_qutrit'
else None,
'data_to_fit': {self.name: 'pe'},
'artificial_detuning': artificial_detuning})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_echo_2nd_exc(self, times=None, artificial_detuning=None,
label=None, analyze=True,
cal_points=True, no_cal_points=6, upload=True,
last_ge_pulse=False, exp_metadata=None):
if times is None:
raise ValueError("Unspecified times for measure_ramsey")
if artificial_detuning is None:
log.warning('Artificial detuning is 0.')
if np.abs(artificial_detuning) < 1e3:
log.warning('The artificial detuning is too small. The units'
'should be Hz.')
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.'
'The units should be seconds.')
if label is None:
label = 'Echo_ef' + self.msmt_suffix
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
cal_states_dict = None
cal_states_rotations = {}
if cal_points:
step = np.abs(times[-1]-times[-2])
if no_cal_points == 6:
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step,
times[-1]+3*step, times[-1]+4*step,
times[-1]+5*step, times[-1]+6*step]])
cal_states_dict = {'g': [-6, -5], 'e': [-4, -3], 'f': [-2, -1]}
cal_states_rotations = {'g': 0, 'f': 1} if last_ge_pulse else \
{'e': 0, 'f': 1}
elif no_cal_points == 4:
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step,
times[-1]+3*step, times[-1]+4*step]])
cal_states_dict = {'g': [-4, -3], 'e': [-2, -1]}
cal_states_rotations = {'g': 0, 'e': 1}
elif no_cal_points == 2:
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step]])
cal_states_dict = {'g': [-2, -1]}
cal_states_rotations = {'g': 0}
else:
sweep_points = times
else:
sweep_points = times
Echo_2nd_swf = awg_swf.Echo_2nd_exc(
pulse_pars=self.get_ge_pars(),
pulse_pars_2nd=self.get_ef_pars(),
RO_pars=self.get_ro_pars(),
artificial_detuning=artificial_detuning,
cal_points=cal_points, upload=upload,
no_cal_points=no_cal_points,
last_ge_pulse=last_ge_pulse)
MC.set_sweep_function(Echo_2nd_swf)
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if
self.acq_weights_type() == 'optimal_qutrit'
else self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: sweep_points},
'rotate': cal_points,
'last_ge_pulse': last_ge_pulse,
'data_to_fit': {self.name: 'pf'},
'cal_states_dict': cal_states_dict,
'cal_states_rotations': cal_states_rotations if
self.acq_weights_type() != 'optimal_qutrit'
else None,
'artificial_detuning': artificial_detuning})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_randomized_benchmarking(
self, cliffords, nr_seeds,
gate_decomp='HZ', interleaved_gate=None,
n_cal_points_per_state=2, cal_states=(),
classified_ro=False, thresholded=True, label=None,
upload=True, analyze=True, prep_params=None,
exp_metadata=None, **kw):
'''
Performs a randomized benchmarking experiment on 1 qubit.
'''
# Define the measurement label
if label is None:
if interleaved_gate is None:
label = 'RB_{}_{}_seeds_{}_cliffords'.format(
gate_decomp, nr_seeds, cliffords[-1]) + self.msmt_suffix
else:
label = 'IRB_{}_{}_{}_seeds_{}_cliffords'.format(
interleaved_gate, gate_decomp, nr_seeds, cliffords[-1]) + \
self.msmt_suffix
if prep_params is None:
prep_params = self.preparation_params()
# Prepare the physical instruments for a time domain measurement
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
cal_states = CalibrationPoints.guess_cal_states(cal_states)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
sequences, hard_sweep_points, soft_sweep_points = \
sq.randomized_renchmarking_seqs(
qb_name=self.name, operation_dict=self.get_operation_dict(),
cliffords=cliffords, nr_seeds=np.arange(nr_seeds),
gate_decomposition=gate_decomp,
interleaved_gate=interleaved_gate, upload=False,
cal_points=cp, prep_params=prep_params)
hard_sweep_func = awg_swf.SegmentHardSweep(
sequence=sequences[0], upload=upload,
parameter_name='Nr. Cliffords', unit='')
MC.set_sweep_function(hard_sweep_func)
MC.set_sweep_points(hard_sweep_points)
MC.set_sweep_function_2D(awg_swf.SegmentSoftSweep(
hard_sweep_func, sequences, 'Nr. Seeds', ''))
MC.set_sweep_points_2D(soft_sweep_points)
if thresholded:
det_func = self.dig_avg_det
elif classified_ro:
det_func = self.int_avg_classif_det
else:
det_func = self.int_avg_det
MC.set_detector_function(det_func)
# create sweep points object
sp = SweepPoints('nr_seeds', np.arange(nr_seeds), '', 'Nr. Seeds')
sp.add_sweep_dimension()
sp.add_sweep_parameter('cliffords', cliffords, '',
'Number of applied Cliffords, $m$')
# create analysis pipeline object
pp = ProcessingPipeline(
'average', keys_in=det_func.value_names,
num_bins=[len(cliffords)]*len(det_func.value_names))
pp.add_node(
'get_std_deviation', keys_in=det_func.value_names,
num_bins=[len(cliffords)]*len(det_func.value_names))
pp.add_node('SingleQubitRBAnalysis', keys_in='previous',
std_keys=[k+' std' for k in det_func.value_names],
meas_obj_name=self.name, do_plotting=True)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'preparation_params': prep_params,
'cal_points': repr(cp),
'sweep_points': sp,
'meas_obj_sweep_points_map':
sp.get_sweep_points_map([self.name]),
'meas_obj_value_names_map': {
self.name: det_func.value_names},
'processing_pipe': pp})
MC.run_2D(label, exp_metadata=exp_metadata)
if analyze:
pla.PipelineDataAnalysis()
def measure_transients(self, states=('g', 'e'), upload=True,
analyze=True, acq_length=4097/1.8e9,
prep_params=None, exp_metadata=None, **kw):
"""
If the resulting transients will be used to caclulate the optimal
weight functions, then it is important that the UHFQC iavg_delay and
wint_delay are calibrated such that the weights and traces are
aligned: iavg_delay = 2*wint_delay.
"""
MC = self.instr_mc.get_instr()
name_extra = kw.get('name_extra', None)
if prep_params is None:
prep_params = self.preparation_params()
if exp_metadata is None:
exp_metadata = dict()
exp_metadata.update(
{'sweep_name': 'time',
'sweep_unit': ['s']})
with temporary_value(self.acq_length, acq_length):
self.prepare(drive='timedomain')
npoints = self.inp_avg_det.nr_samples
sweep_points = np.linspace(0, npoints / 1.8e9, npoints,
endpoint=False)
for state in states:
if state not in ['g', 'e', 'f']:
raise ValueError("Unrecognized state: {}. Must be 'g', 'e' "
"or 'f'.".format(state))
base_name = 'timetrace_{}'.format(state)
name = base_name + "_" + name_extra if name_extra is not None \
else base_name
seq, _ = sq.single_state_active_reset(
operation_dict=self.get_operation_dict(),
qb_name=self.name, state=state, prep_params=prep_params,
upload=False)
# set sweep function and run measurement
MC.set_sweep_function(awg_swf.SegmentHardSweep(sequence=seq,
upload=upload))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.inp_avg_det)
exp_metadata.update(dict(sweep_points_dict=sweep_points))
MC.run(name=name + self.msmt_suffix, exp_metadata=exp_metadata)
def measure_readout_pulse_scope(self, delays, freqs, RO_separation=None,
prep_pulses=None, comm_freq=225e6,
analyze=True, label=None,
close_fig=True, upload=True, verbose=False,
cal_points=((-4, -3), (-2, -1)), MC=None):
"""
From the documentation of the used sequence function:
Prepares the AWGs for a readout pulse shape and timing measurement.
The sequence consists of two readout pulses where the drive pulse start
time is swept through the first readout pulse. Because the photons in
the readout resonator induce an ac-Stark shift of the qubit frequency,
we can determine the readout pulse shape by sweeping the drive frequency
in an outer loop to determine the qubit frequency.
Important: This sequence includes two readouts per segment. For this
reason the calibration points are also duplicated.
Args:
delays: A list of delays between the start of the first readout pulse
and the center of the drive pulse.
RO_separation: Separation between the starts of the two readout pulses.
If the comm_freq parameter is not None, the used value
is increased to satisfy the commensurability constraint.
cal_points: True for default calibration points, False for no
calibration points or a list of two lists, containing
the indices of the calibration segments for the ground
and excited state.
comm_freq: The readout pulse separation will be a multiple of
1/comm_freq
"""
if delays is None:
raise ValueError("Unspecified delays for "
"measure_readout_pulse_scope")
if label is None:
label = 'Readout_pulse_scope' + self.msmt_suffix
if MC is None:
MC = self.instr_mc.get_instr()
if freqs is None:
freqs = self.f_qubit() + np.linspace(-50e6, 50e6, 201)
if RO_separation is None:
RO_separation = 2 * self.ro_length()
RO_separation += np.max(delays)
RO_separation += 200e-9 # for slack
self.prepare(drive='timedomain')
MC.set_sweep_function(awg_swf.Readout_pulse_scope_swf(
delays=delays,
pulse_pars=self.get_ge_pars(),
RO_pars=self.get_ro_pars(),
RO_separation=RO_separation,
cal_points=cal_points,
prep_pulses=prep_pulses,
comm_freq=comm_freq,
verbose=verbose,
upload=upload))
MC.set_sweep_points(delays)
MC.set_sweep_function_2D(swf.Offset_Sweep(
mc_parameter_wrapper.wrap_par_to_swf(
self.instr_ge_lo.get_instr().frequency),
-self.ge_mod_freq(),
parameter_name=self.name + ' drive frequency'))
MC.set_sweep_points_2D(freqs)
d = det.UHFQC_integrated_average_detector(
self.instr_uhf.get_instr(), self.instr_pulsar.get_instr(),
nr_averages=self.acq_averages(),
channels=self.int_avg_det.channels,
integration_length=self.acq_length(),
values_per_point=2, values_per_point_suffex=['_probe', '_measure'])
MC.set_detector_function(d)
MC.run_2D(label)
# Create a MeasurementAnalysis object for this measurement
if analyze:
ma.MeasurementAnalysis(TwoD=True, auto=True, close_fig=close_fig,
qb_name=self.name)
def measure_residual_readout_photons(
self, delays_to_relax, ramsey_times, delay_buffer=0,
cal_points=((-4, -3), (-2, -1)), verbose=False,
artificial_detuning=None, analyze=True,
label=None, close_fig=True, MC=None):
"""
From the documentation of the used sequence function:
The sequence consists of two readout pulses sandwitching two ramsey
pulses inbetween. The delay between the first readout pulse and first
ramsey pulse is swept, to measure the ac stark shift and dephasing
from any residual photons.
Important: This sequence includes two readouts per segment. For this
reason the calibration points are also duplicated.
Args:
delays_to_relax: delay between the end of the first readout
pulse and the start of the first ramsey pulse.
pulse_pars: Pulse dictionary for the ramsey pulse.
RO_pars: Pulse dictionary for the readout pulse.
ramsey_times: delays between ramsey pulses
delay_buffer: delay between the start of the last ramsey pulse and
the start of the second readout pulse.
cal_points: True for default calibration points, False for no
calibration points or a list of two lists,
containing the indices of the calibration
segments for the ground and excited state.
"""
if label is None:
label = 'residual_readout_photons' + self.msmt_suffix
if MC is None:
MC = self.instr_mc.get_instr()
# duplicate sweep points for the two preparation states
ramsey_times = np.vstack((ramsey_times, ramsey_times)).\
reshape((-1,), order='F')
self.prepare(drive='timedomain')
sf1 = awg_swf.readout_photons_in_resonator_swf(
delay_to_relax=delays_to_relax[0],
delay_buffer=delay_buffer,
ramsey_times=ramsey_times,
pulse_pars=self.get_ge_pars(),
RO_pars=self.get_ro_pars(),
cal_points=cal_points,
verbose=verbose,
artificial_detuning=artificial_detuning,
upload=False)
MC.set_sweep_function(sf1)
MC.set_sweep_points(ramsey_times)
sf2 = awg_swf.readout_photons_in_resonator_soft_swf(sf1)
MC.set_sweep_function_2D(sf2)
MC.set_sweep_points_2D(delays_to_relax)
d = det.UHFQC_integrated_average_detector(
self.instr_uhf.get_instr(), self.instr_pulsar.get_instr(),
nr_averages=self.acq_averages(),
channels=self.int_avg_det.channels,
integration_length=self.acq_length(),
values_per_point=2, values_per_point_suffex=['_test', '_measure'])
MC.set_detector_function(d)
MC.run_2D(label)
self.artificial_detuning = artificial_detuning
# Create a MeasurementAnalysis object for this measurement
if analyze:
kappa = list(map(lambda w:0.5*(self.RO_purcell_kappa() - np.real(
np.sqrt(-16*self.RO_J_coupling()*self.RO_J_coupling() +
(self.RO_purcell_kappa()-2j*(np.abs(w-self.f_RO_purcell())))*
(self.RO_purcell_kappa()-2j*(np.abs(w-self.f_RO_purcell())))
))),
[self.f_RO_resonator() - self.chi(),
self.f_RO_resonator() + self.chi()]))
if not (self.T2_star_ef() == 0):
T2star = self.T2_star_ef()
else:
if self.T2_star() == 0:
raise ValueError('T2star is not given.')
else:
T2star = self.T2_star()
tda.ReadoutROPhotonsAnalysis(t_start=None,
close_figs=close_fig, options_dict={
'f_qubit': self.f_qubit(),
'chi': self.chi(),
'kappa-effective': kappa,
'T2echo': T2star ,
'do_analysis': True,
'artif_detuning': self.artificial_detuning() },
do_fitting=True)
def measure_multi_element_segment_timing(
self, phases, ramsey_time=4e-6, nr_wait_elems=16,
elem_type='interleaved', cal_points=((-4, -3), (-2, -1)),
label=None, MC=None, upload=True, analyze=True, close_fig=True):
if label is None:
label = 'Multi_element_segment_timing' + self.msmt_suffix
if MC is None:
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
MC.set_sweep_function(awg_swf.MultiElemSegmentTimingSwf(
phases=phases,
qbn=self.name,
op_dict=self.get_operation_dict(),
ramsey_time=ramsey_time,
nr_wait_elems=nr_wait_elems,
elem_type=elem_type,
cal_points=cal_points,
upload=upload))
MC.set_sweep_points(phases)
d = det.UHFQC_integrated_average_detector(
self.instr_uhf.get_instr(), self.instr_pulsar.get_instr(), nr_averages=self.acq_averages(),
channels=self.int_avg_det.channels,
integration_length=self.acq_length(),
values_per_point=2, values_per_point_suffex=['_single_elem',
'_multi_elem'])
MC.set_detector_function(d)
metadata = dict(
ramsey_time=ramsey_time,
nr_wait_elems=nr_wait_elems,
elem_type=elem_type,
cal_points=cal_points
)
MC.run(label, exp_metadata=metadata)
# Create a MeasurementAnalysis object for this measurement
if analyze:
ma.MeasurementAnalysis(auto=True, close_fig=close_fig,
qb_name=self.name)
def measure_drive_mixer_spectrum(self, if_freqs, amplitude=0.5,
trigger_sep=5e-6, align_frequencies=True):
MC = self.instr_mc.get_instr()
if align_frequencies:
if_freqs = (if_freqs*trigger_sep).astype(np.int)/trigger_sep
s = swf.Offset_Sweep(
self.instr_ro_lo.get_instr().frequency,
self.ge_freq() - self.ro_mod_freq() - self.ge_mod_freq(),
name='Drive intermediate frequency',
parameter_name='Drive intermediate frequency')
MC.set_sweep_function(s)
MC.set_sweep_points(if_freqs)
MC.set_detector_function(self.int_avg_det_spec)
drive_pulse = dict(
pulse_type='GaussFilteredCosIQPulse',
pulse_length=self.acq_length(),
ref_point='start',
amplitude=amplitude,
I_channel=self.ge_I_channel(),
Q_channel=self.ge_Q_channel(),
mod_frequency=self.ge_mod_freq(),
phase_lock=True,
)
sq.pulse_list_list_seq([[self.get_acq_pars(), drive_pulse]])
with temporary_value(
(self.acq_weights_type, 'SSB'),
(self.instr_trigger.get_instr().pulse_period, trigger_sep),
):
self.prepare(drive='timedomain')
self.instr_pulsar.get_instr().start()
MC.run('ge_uc_spectrum' + self.msmt_suffix)
a = ma.MeasurementAnalysis(plot_args=dict(log=True, marker=''))
return a
def calibrate_drive_mixer_carrier(self, update=True, x0=(0., 0.),
initial_stepsize=0.01, trigger_sep=5e-6):
MC = self.instr_mc.get_instr()
ad_func_pars = {'adaptive_function': opti.nelder_mead,
'x0': x0,
'initial_step': [initial_stepsize, initial_stepsize],
'no_improv_break': 15,
'minimize': True,
'maxiter': 500}
chI_par = self.instr_pulsar.get_instr().parameters['{}_offset'.format(
self.ge_I_channel())]
chQ_par = self.instr_pulsar.get_instr().parameters['{}_offset'.format(
self.ge_Q_channel())]
MC.set_sweep_functions([chI_par, chQ_par])
MC.set_adaptive_function_parameters(ad_func_pars)
sq.pulse_list_list_seq([[self.get_acq_pars(), dict(
pulse_type='GaussFilteredCosIQPulse',
pulse_length=self.acq_length(),
ref_point='start',
amplitude=0,
I_channel=self.ge_I_channel(),
Q_channel=self.ge_Q_channel(),
)]])
with temporary_value(
(self.ro_freq, self.ge_freq() - self.ge_mod_freq()),
(self.acq_weights_type, 'SSB'),
(self.instr_trigger.get_instr().pulse_period, trigger_sep),
):
self.prepare(drive='timedomain')
MC.set_detector_function(det.IndexDetector(
self.int_avg_det_spec, 0))
self.instr_pulsar.get_instr().start(exclude=[self.instr_uhf()])
MC.run(name='drive_carrier_calibration' + self.msmt_suffix,
mode='adaptive')
a = ma.OptimizationAnalysis(label='drive_carrier_calibration')
# v2 creates a pretty picture of the optimizations
ma.OptimizationAnalysis_v2(label='drive_carrier_calibration')
ch_1_min = a.optimization_result[0][0]
ch_2_min = a.optimization_result[0][1]
if update:
self.ge_I_offset(ch_1_min)
self.ge_Q_offset(ch_2_min)
return ch_1_min, ch_2_min
def calibrate_drive_mixer_skewness(self, update=True, amplitude=0.5,
trigger_sep=5e-6,
initial_stepsize=(0.15, 10)):
MC = self.instr_mc.get_instr()
ad_func_pars = {'adaptive_function': opti.nelder_mead,
'x0': [self.ge_alpha(), self.ge_phi_skew()],
'initial_step': initial_stepsize,
'no_improv_break': 12,
'minimize': True,
'maxiter': 500}
MC.set_sweep_functions([self.ge_alpha, self.ge_phi_skew])
MC.set_adaptive_function_parameters(ad_func_pars)
with temporary_value(
(self.ge_alpha, self.ge_alpha()),
(self.ge_phi_skew, self.ge_phi_skew()),
(self.ro_freq, self.ge_freq() - 2*self.ge_mod_freq()),
(self.acq_weights_type, 'SSB'),
(self.instr_trigger.get_instr().pulse_period, trigger_sep),
):
self.prepare(drive='timedomain')
detector = self.int_avg_det_spec
detector.always_prepare = True
detector.AWG = self.instr_pulsar.get_instr()
detector.prepare_function = lambda \
alphaparam=self.ge_alpha, skewparam=self.ge_phi_skew: \
sq.pulse_list_list_seq([[self.get_acq_pars(), dict(
pulse_type='GaussFilteredCosIQPulse',
pulse_length=self.acq_length(),
ref_point='start',
amplitude=amplitude,
I_channel=self.ge_I_channel(),
Q_channel=self.ge_Q_channel(),
mod_frequency=self.ge_mod_freq(),
phase_lock=True,
alpha=alphaparam(),
phi_skew=skewparam(),
)]])
MC.set_detector_function(det.IndexDetector(detector, 0))
MC.run(name='drive_skewness_calibration' + self.msmt_suffix,
mode='adaptive')
a = ma.OptimizationAnalysis(label='drive_skewness_calibration')
# v2 creates a pretty picture of the optimizations
ma.OptimizationAnalysis_v2(label='drive_skewness_calibration')
# phi and alpha are the coefficients that go in the predistortion matrix
alpha = a.optimization_result[0][0]
phi = a.optimization_result[0][1]
if update:
self.ge_alpha(alpha)
self.ge_phi_skew(phi)
return alpha, phi
def calibrate_drive_mixer_skewness_NN(
self, update=True,make_fig=True, meas_grid=None, n_meas=100,
amplitude=0.1, trigger_sep=5e-6, two_rounds=False,
estimator='GRNN_neupy', hyper_parameter_dict=None,
first_round_limits=(0.6, 1.2, -50, 35), **kwargs):
if not len(first_round_limits) == 4:
log.error('Input variable `first_round_limits` in function call '
'`calibrate_drive_mixer_skewness_NN` needs to be a list '
'or 1D array of length 4.\nFound length '
'{} object instead!'.format(len(first_round_limits)))
if hyper_parameter_dict is None:
log.warning('No hyperparameters passed to predictive mixer '
'calibration routine. Default values for the estimator'
'will be used!\n')
hyper_parameter_dict = {'hidden_layers': [10],
'learning_rate': 1e-3,
'regularization_coefficient': 0.,
'std_scaling': 0.6,
'learning_steps': 5000,
'cv_n_fold': 5,
'polynomial_dimension': 2}
std_devs = kwargs.get('std_devs', [0.1, 10])
c = kwargs.pop('second_round_std_scale', 0.4)
# Could make sample size variable (maxiter) for better adapting)
if isinstance(std_devs, (list, np.ndarray)):
if len(std_devs) != 2:
log.error('std_devs passed in kwargs of `calibrate_drive_'
'mixer_NN` is of length: {}. '
'Requires length 2 instead.'.format(len(std_devs)))
MC = self.instr_mc.get_instr()
_alpha = self.ge_alpha()
_phi = self.ge_phi_skew()
for runs in range(3 if two_rounds else 2):
if runs == 0:
# half as many points from a uniform distribution at first run
meas_grid = np.array([
np.random.uniform(first_round_limits[0],
first_round_limits[1], n_meas//2),
np.random.uniform(first_round_limits[2],
first_round_limits[3], n_meas//2)])
else:
k = 1. if runs == 1 else c
meas_grid = np.array([
np.random.normal(_alpha, k*std_devs[0], n_meas),
np.random.normal(_phi, k*std_devs[1], n_meas)])
s1 = swf.Hard_Sweep()
s1.name = 'Amplitude ratio hardware sweep'
s1.label = r'Amplitude ratio, $\alpha$'
s1.unit = ''
s2 = swf.Hard_Sweep()
s2.name = 'Phase skew hardware sweep'
s2.label = r'Phase skew, $\phi$'
s2.unit = 'deg'
MC.set_sweep_functions([s1, s2])
MC.set_sweep_points(meas_grid.T)
pulse_list_list = []
for alpha, phi_skew in meas_grid.T:
pulse_list_list.append([self.get_acq_pars(), dict(
pulse_type='GaussFilteredCosIQPulse',
pulse_length=self.acq_length(),
ref_point='start',
amplitude=amplitude,
I_channel=self.ge_I_channel(),
Q_channel=self.ge_Q_channel(),
mod_frequency=self.ge_mod_freq(),
phase_lock=True,
alpha=alpha,
phi_skew=phi_skew,
)])
sq.pulse_list_list_seq(pulse_list_list)
with temporary_value(
(self.ro_freq, self.ge_freq() - 2*self.ge_mod_freq()),
(self.acq_weights_type, 'SSB'),
(self.instr_trigger.get_instr().pulse_period, trigger_sep),
):
self.prepare(drive='timedomain')
MC.set_detector_function(self.int_avg_det)
MC.run(name='drive_skewness_calibration' + self.msmt_suffix)
a = ma.OptimizationAnalysisNN(
label='drive_skewness_calibration',
hyper_parameter_dict=hyper_parameter_dict,
meas_grid=meas_grid.T,
estimator=estimator,
two_rounds=two_rounds,
round=runs, make_fig=make_fig)
_alpha = a.optimization_result[0]
_phi = a.optimization_result[1]
if update:
self.ge_alpha(_alpha)
self.ge_phi_skew(_phi)
return _alpha, _phi, a
def find_optimized_weights(self, update=True, measure=True,
qutrit=False, acq_length=4097/1.8e9, **kw):
# FIXME: Make a proper analysis class for this (Ants, 04.12.2017)
# I agree (Christian, 07.11.2018 -- around 1 year later)
levels = ('g', 'e', 'f') if qutrit else ('g', 'e')
if measure:
self.measure_transients(analyze=True, states=levels,
acq_length=acq_length, **kw)
# create label, measurement analysis and data for each level
if kw.get("name_extra", False):
labels = {l: 'timetrace_{}_'.format(l) + kw.get('name_extra')
+ "_{}".format(self.name) for l in levels}
else:
labels = {l: 'timetrace_{}'.format(l)
+ "_{}".format(self.name) for l in levels}
m_a = {l: ma.MeasurementAnalysis(label=labels[l]) for l in levels}
iq_traces = {l: m_a[l].measured_values[0]
+ 1j * m_a[l].measured_values[1] for l in levels}
final_basis_labels = ['ge'] # default basis vector if only qubit ro
if qutrit:
ref_state = kw.get('ref_state', 'g')
basis = [iq_traces[l] - iq_traces[ref_state] for l in levels
if l != ref_state]
basis_labels = [l + ref_state for l in levels if l != ref_state]
final_basis = math.gram_schmidt(np.array(basis).transpose())
final_basis = final_basis.transpose() # obtain basis vect as rows
# basis using second vector as primary vector
basis_2nd = list(reversed(basis))
final_basis_2nd = math.gram_schmidt(np.array(basis_2nd).transpose())
final_basis_2nd = final_basis_2nd.transpose()
if kw.get('non_ortho_basis', False):
final_basis_labels = basis_labels
final_basis = np.array([final_basis[0], final_basis_2nd[0]])
elif kw.get('basis_2nd', False):
final_basis_labels = [basis_labels[1]] + ['ortho']
final_basis = final_basis_2nd
else:
final_basis_labels = [basis_labels[0]] + ['ortho']
log.info(f"Setting Basis: {final_basis_labels}")
if update:
# FIXME: could merge qutrit and non qutrit although normalization is not
# the same but would be a good thing to do. First test if qutrit works
# well. idem in plot
if qutrit:
self.acq_weights_I(final_basis[0].real)
self.acq_weights_Q(final_basis[0].imag)
self.acq_weights_I2(final_basis[1].real)
self.acq_weights_Q2(final_basis[1].imag)
self.acq_weights_basis(final_basis_labels)
else:
wre = np.real(iq_traces['e'] - iq_traces['g'])
wim = np.imag(iq_traces['e'] - iq_traces['g'])
k = max(np.max(np.abs(wre)), np.max(np.abs(wim)))
wre /= k
wim /= k
self.acq_weights_I(wre)
self.acq_weights_Q(wim)
if kw.get('plot', True):
# TODO: Nathan: plot amplitude instead of I, Q ?
npoints = len(m_a['g'].sweep_points)
plot_ylabels = dict(g='d.c. voltage,\nNo pulse (V)',
e='d.c. voltage,\nPi_ge pulse (V)',
f='d.c. voltage,\nPi_gf pulse (V)')
tbase = np.linspace(0, npoints/1.8e9, npoints, endpoint=False)
modulation = np.exp(2j * np.pi * self.ro_mod_freq() * tbase)
fig, ax = plt.subplots(len(levels) + 1, figsize=(20,20))
ax[0].set_title('optimized weights ' + self.name +
"".join('\n' + m_a[l].timestamp_string for l in levels)
+ f'\nWeight Basis: {final_basis_labels}')
for i, l in enumerate(levels):
ax[i].plot(tbase / 1e-9, np.real(iq_traces[l] * modulation), '-',
label='I_' + l)
ax[i].plot(tbase / 1e-9, np.imag(iq_traces[l] * modulation), '-',
label='Q_' + l)
ax[i].set_ylabel(plot_ylabels[l])
ax[i].set_xlim(0, kw.get('tmax', 300))
ax[i].legend(loc='upper right')
if qutrit:
for i, vect in enumerate(final_basis):
ax[-1].plot(tbase / 1e-9, np.real(vect * modulation), '-',
label='I_' + str(i))
ax[-1].plot(tbase / 1e-9, np.imag(vect * modulation), '-',
label='Q_' + str(i))
else:
ax[-1].plot(tbase / 1e-9,
np.real((iq_traces['e'] - iq_traces['g']) * modulation), '-',
label='I')
ax[-1].plot(tbase / 1e-9,
np.imag((iq_traces['e'] - iq_traces['g']) * modulation), '-',
label='Q')
ax[-1].set_ylabel('d.c. voltage\ndifference (V)')
ax[-1].set_xlim(0, kw.get('tmax', 300))
ax[-1].legend(loc='upper right')
ax[-1].set_xlabel('Time (ns)')
m_a['g'].save_fig(plt.gcf(), 'timetraces', xlabel='time',
ylabel='voltage')
plt.tight_layout()
plt.close()
def find_ssro_fidelity(self, analyze=True, close_fig=True, no_fits=False,
upload=True, thresholded=False, label=None,
RO_comm=3 / 225e6, RO_slack=150e-9,
qutrit=False, update=False, prep_params=None):
"""
Conduct an off-on measurement on the qubit recording single-shot
results and determine the single shot readout fidelity.
Calculates the assignment fidelity `F_a` which is the average
probability of correctly guessing the state that was prepared. If
`no_fits` is `False` also finds the discrimination fidelity F_d, that
takes into account the probability of an bit flip after state
preparation, by fitting double gaussians to both |0> prepared and |1>
prepared datasets.
Args:
reps: Number of repetitions. If greater than 1, a 2D sweep will be
made with the second sweep function a NoneSweep with number of
sweep points equal to reps. Default 1.
analyze: Boolean flag, whether to analyse the measurement results.
Default `True`.
close_fig: Boolean flag to close the matplotlib's figure. If
`False`, then the plots can be viewed with `plt.show()`
Default `True`.
no_fits: Boolean flag to disable finding the discrimination
fidelity. Default `False`.
preselection_pulse: Whether to do an additional readout pulse
before state preparation. Default `True`.
qutrit: SSRO for 3 levels readout
Returns:
If `no_fits` is `False` returns assigment fidelity, discrimination
fidelity and SNR = 2 |mu00 - mu11| / (sigma00 + sigma11). Else
returns just assignment fidelity.
"""
MC = self.instr_mc.get_instr()
if label is None:
label = 'SSRO_fidelity'
if thresholded:
label += '_thresh'
if prep_params is None:
prep_params = self.preparation_params()
self.prepare(drive='timedomain')
RO_spacing = self.instr_uhf.get_instr().qas_0_delay() / 1.8e9
RO_spacing += self.acq_length()
RO_spacing += RO_slack # for slack
RO_spacing = np.ceil(RO_spacing / RO_comm) * RO_comm
if prep_params['preparation_type'] not in ['preselection', 'wait']:
raise NotImplementedError()
preselection = prep_params['preparation_type'] == 'preselection'
if thresholded:
det_func = self.dig_log_det
else:
det_func = self.int_log_det
if qutrit:
states = ('g', 'e', 'f')
for state in states:
seq, swp = sq.single_state_active_reset(
operation_dict=self.get_operation_dict(),
qb_name=self.name, state=state,
prep_params=prep_params, upload=False)
# set sweep function and run measurement
MC.set_sweep_function(awg_swf.SegmentHardSweep(sequence=seq,
upload=upload))
MC.set_sweep_points(swp)
MC.set_detector_function(det_func)
with temporary_value(MC.soft_avg, 1):
MC.run(name=label + '_{}'.format(state) + self.msmt_suffix)
else:
MC.set_sweep_function(awg_swf2.n_qubit_off_on(
pulse_pars_list=[self.get_ge_pars()],
RO_pars_list=[self.get_ro_pars()],
upload=upload,
preselection=preselection,
RO_spacing=RO_spacing))
MC.set_sweep_points(np.arange(4 if preselection else 2))
MC.set_detector_function(det_func)
with temporary_value(MC.soft_avg, 1):
MC.run(name=label + self.msmt_suffix)
if analyze:
if qutrit:
# TODO Nathan: could try and merge this with no qutrit to
# avoid logical branching
options = \
dict(classif_method='threshold' if thresholded else 'gmm',
pre_selection=preselection)
# options = 'gmm'
labels = [label + '_{}'.format(l) for l in states]
ssqtro = \
Singleshot_Readout_Analysis_Qutrit(label=labels,
options_dict=options)
state_prob_mtx = ssqtro.proc_data_dict[
'analysis_params']['state_prob_mtx_masked']
classifier_params = ssqtro.proc_data_dict[
'analysis_params'].get('classifier_params', None)
if update:
self.acq_classifier_params(classifier_params)
self.acq_state_prob_mtx(state_prob_mtx)
return state_prob_mtx, classifier_params
else:
rotate = self.acq_weights_type() in {'SSB', 'DSB'}
preselection = prep_params['preparation_type'] == 'preselection'
channels = det_func.value_names
if preselection:
nr_samples = 4
sample_0 = 0
sample_1 = 2
else:
nr_samples = 2
sample_0 = 0
sample_1 = 1
ana = ma.SSRO_Analysis(auto=True, close_fig=close_fig,
qb_name=self.name,
rotate=rotate, no_fits=no_fits,
channels=channels, nr_samples=nr_samples,
sample_0=sample_0, sample_1=sample_1,
preselection=preselection)
if not no_fits:
return ana.F_a, ana.F_d, ana.SNR
else:
return ana.F_a
def find_readout_angle(self, MC=None, upload=True, close_fig=True, update=True, nreps=10):
"""
Finds the optimal angle on the IQ plane for readout (optimal phase for
the boxcar integration weights)
If the Q wint channel is set to `None`, sets it to the next channel
after I.
Args:
MC: MeasurementControl object to use. Default `None`.
upload: Whether to update the AWG sequence. Default `True`.
close_fig: Wheter to close the figures in measurement analysis.
Default `True`.
update: Whether to update the integration weights and the Default `True`.
nreps: Default 10.
"""
if MC is None:
MC = self.instr_mc.get_instr()
label = 'RO_theta'
if self.acq_weights_Q() is None:
self.acq_weights_Q(
(self.acq_weights_I() + 1) % 9)
self.set_readout_weights(weights_type='SSB')
prev_shots = self.acq_shots()
self.acq_shots(2*(self.acq_shots()//2))
self.prepare(drive='timedomain')
MC.set_sweep_function(awg_swf.SingleLevel(
pulse_pars=self.get_ge_pars(),
RO_pars=self.get_ro_pars(),
upload=upload,
preselection=False))
spoints = np.arange(self.acq_shots())
MC.set_sweep_points(np.arange(self.acq_shots()))
MC.set_detector_function(self.int_log_det)
prev_avg = MC.soft_avg()
MC.soft_avg(1)
mode = '1D'
if nreps > 1:
MC.set_sweep_function_2D(swf.None_Sweep())
MC.set_sweep_points_2D(np.arange(nreps))
mode = '2D'
MC.run(name=label+self.msmt_suffix, mode=mode)
MC.soft_avg(prev_avg)
self.acq_shots(prev_shots)
rotate = self.acq_weights_Q() is not None
channels = self.int_log_det.value_names
ana = ma.SSRO_Analysis(auto=True, close_fig=close_fig,
rotate=rotate, no_fits=True,
channels=channels,
preselection=False)
if update:
self.acq_IQ_angle(self.acq_IQ_angle() + ana.theta)
return ana.theta
def find_qubit_frequency(self, freqs, method='cw_spectroscopy',
update=False, trigger_separation=3e-6,
close_fig=True, analyze_ef=False, analyze=True,
upload=True, label=None, **kw):
"""
WARNING: Does not automatically update the qubit frequency parameter.
Set update=True if you want this!
Args:
method: the spectroscopy type; options: 'pulsed',
'spectrsocopy'
update: whether to update the relevant qubit
parameters with the found frequency(ies)
MC: the measurement control object
close_fig: whether or not to close the figure
analyze_ef: whether or not to also look for the gf/2
Keyword Args:
interactive_plot: (default=False)
whether to plot with plotly or not
analyze_ef: (default=False)
whether to look for another f_ge/2 peak/dip
percentile: (default=20)
percentile of the data that is considered background noise
num_sigma_threshold: (default=5)
used to define the threshold above(below) which to look for
peaks(dips); threshold = background_mean +
num_sigma_threshold * background_std
window_len (default=3)
filtering window length; uses a_tools.smooth
analysis_window (default=10)
how many data points (calibration points) to remove before
sending data to peak_finder; uses a_tools.cut_edges,
data = data[(analysis_window//2):-(analysis_window//2)]
amp_only (default=False)
whether only I data exists
save_name (default='Source Frequency')
figure name with which it will be saved
auto (default=True)
automatically perform the entire analysis upon call
label (default=none?)
label of the analysis routine
folder (default=working folder)
working folder
NoCalPoints (default=4)
number of calibration points
print_fit_results (default=True)
print the fit report
print_frequency (default=False)
whether to print the f_ge and f_gf/2
make_fig {default=True)
whether or not to make a figure
show (default=True)
show the plots
show_guess (default=False)
plot with initial guess values
close_file (default=True)
close the hdf5 file
Returns:
the peak frequency(ies).
"""
if not update:
log.warning("Does not automatically update the qubit "
"frequency parameter. "
"Set update=True if you want this!")
if np.any(freqs<500e6):
log.warning(('Some of the values in the freqs array might be '
'too small. The units should be Hz.'))
if freqs is None:
f_span = kw.get('f_span', 100e6)
f_mean = kw.get('f_mean', self.f_qubit())
nr_points = kw.get('nr_points', 100)
if f_mean == 0:
log.warning("find_frequency does not know where to "
"look for the qubit. Please specify the "
"f_mean or the freqs function parameter.")
return 0
else:
freqs = np.linspace(f_mean - f_span/2, f_mean + f_span/2,
nr_points)
if 'pulse' not in method.lower():
if label is None:
label = 'spectroscopy' + self.msmt_suffix
if analyze_ef:
label = 'high_power_' + label
self.measure_qubit_spectroscopy(freqs, pulsed=False,
trigger_separation=trigger_separation,
label=label, close_fig=close_fig)
else:
if label is None:
label = 'pulsed_spec' + self.msmt_suffix
if analyze_ef:
label = 'high_power_' + label
self.measure_qubit_spectroscopy(freqs, pulsed=True, label=label,
close_fig=close_fig, upload=upload)
if analyze:
SpecA = ma.Qubit_Spectroscopy_Analysis(
qb_name=self.name,
analyze_ef=analyze_ef,
label=label,
close_fig=close_fig, **kw)
f0 = SpecA.fitted_freq
if update:
if not analyze_ef:
self.ge_freq(f0)
else:
f0_ef = 2*SpecA.fitted_freq_gf_over_2 - f0
self.ef_freq(f0_ef)
if analyze_ef:
return f0, f0_ef
else:
return f0
else:
return
def find_amplitudes(self, rabi_amps=None, label=None, for_ef=False,
n_cal_points_per_state=2, cal_states='auto',
upload=True, last_ge_pulse=False, classified_ro=False,
prep_params=None, analyze=True, update=False,
exp_metadata=None, **kw):
"""
Finds the pi and pi/2 pulse amplitudes from the fit to a Rabi
experiment. Uses the Rabi_Analysis(_new)
class from measurement_analysis.py
WARNING: Does not automatically update the qubit amplitudes.
Set update=True if you want this!
Analysis script for the Rabi measurement:
1. The I and Q data are rotated and normalized based on the calibration
points. In most analysis routines, the latter are typically 4:
2 X180 measurements, and 2 identity measurements, which get
averaged resulting in one X180 point and one identity point.
However, the default for Rabi is 2 (2 identity measurements)
because we typically do Rabi in order to find the correct amplitude
for an X180 pulse. However, if a previous such value exists, this
routine also accepts 4 cal pts. If X180_ef pulse was also
previously calibrated, this routine also accepts 6 cal pts.
2. The normalized data is fitted to a cosine function.
3. The pi-pulse and pi/2-pulse amplitudes are calculated from the fit.
4. The normalized data, the best fit results, and the pi and pi/2
pulses are plotted.
The ef analysis assumes the the e population is zero (because of the
ge X180 pulse at the end).
Arguments:
rabi_amps: amplitude sweep points for the
Rabi experiment
label: label of the analysis routine
for_ef: find amplitudes for the ef transition
update: update the qubit amp180 and amp90 parameters
MC: the measurement control object
close_fig: close the resulting figure?
cal_points whether to used calibration points of not
no_cal_points number of calibration points to use; if it's
the first time rabi is run
then 2 cal points (two I pulses at the end)
should be used for the ge Rabi,
and 4 (two I pulses and 2 ge X180 pulses at
the end) for the ef Rabi
last_ge_pulse whether to map the population to the ground
state after each run of the Rabi experiment
on the ef level
Keyword arguments:
other keyword arguments. The Rabi sweep parameters 'amps_mean',
'amps_span', and 'nr_poinys' should be passed here. This will
result in a sweep over rabi_amps = np.linspace(amps_mean -
amps_span/2, amps_mean + amps_span/2, nr_points)
auto (default=True)
automatically perform the entire analysis upon call
print_fit_results (default=True)
print the fit report
make_fig {default=True)
whether or not to make a figure
show (default=True)
show the plots
show_guess (default=False)
plot with initial guess values
show_amplitudes (default=True)
print the pi&piHalf pulses amplitudes
plot_amplitudes (default=True)
plot the pi&piHalf pulses amplitudes
no_of_columns (default=1)
number of columns in your paper; figure sizes will be adjusted
accordingly (1 col: figsize = ( 7in , 4in ) 2 cols: figsize =
( 3.375in , 2.25in ), PRL guidelines)
Returns:
pi and pi/2 pulses amplitudes + their stderr as a dictionary with
keys 'piPulse', 'piHalfPulse', 'piPulse_std', 'piHalfPulse_std'.
"""
if not update:
log.warning("Does not automatically update the qubit pi and "
"pi/2 amplitudes. Set update=True if you want this!")
#how many times to apply the Rabi pulse
n = kw.get('n', 1)
if rabi_amps is None:
raise ValueError('rabi_amps is None.')
#Perform Rabi
self.measure_rabi(amps=rabi_amps, analyze=False,
upload=upload, label=label, n=n,
n_cal_points_per_state=n_cal_points_per_state,
cal_states=cal_states, last_ge_pulse=last_ge_pulse,
for_ef=for_ef, classified_ro=classified_ro,
prep_params=prep_params, exp_metadata=exp_metadata)
#get pi and pi/2 amplitudes from the analysis results
if analyze:
rabi_ana = tda.RabiAnalysis(qb_names=[self.name])
if update:
amp180 = rabi_ana.proc_data_dict['analysis_params_dict'][
self.name]['piPulse']
if not for_ef:
self.ge_amp180(amp180)
self.ge_amp90_scale(0.5)
else:
self.ef_amp180(amp180)
self.ef_amp90_scale(0.5)
return
def find_T1(self, times, n_cal_points_per_state=2, cal_states='auto',
upload=True, last_ge_pulse=False, classified_ro=False,
prep_params=None, analyze=True, update=False, label=None,
for_ef=False, exp_metadata=None, **kw):
"""
Finds the relaxation time T1 from the fit to an exponential
decay function.
WARNING: Does not automatically update the qubit T1 parameter.
Set update=True if you want this!
Routine:
1. Apply pi pulse to get population in the excited state.
2. Wait for different amounts of time before doing a measurement.
Uses the T1_Analysis class from measurement_analysis.py.
The ef analysis assumes the the e population is zero (because of the
ge X180 pulse at the end).
Arguments:
times: array of times to wait before measurement
label: label of the analysis routine
for_ef: find T1 for the 2nd excitation (ef)
update: update the qubit T1 parameter
MC: the measurement control object
close_fig: close the resulting figure?
Keyword Arguments:
other keyword arguments. The the parameters times_mean, times_span,
nr_points should be passed here. These are an alternative to
passing the times array.
auto (default=True)
automatically perform the entire analysis upon call
print_fit_results (default=True)
print the fit report
make_fig (default=True)
whether to make the figures or not
show_guess (default=False)
plot with initial guess values
show_T1 (default=True)
print the T1 and T1_stderr
no_of_columns (default=1)
number of columns in your paper; figure sizes will be adjusted
accordingly (1 col: figsize = ( 7in , 4in ) 2 cols:
figsize = ( 3.375in , 2.25in ), PRL guidelines)
Returns:
the relaxation time T1 + standard deviation as a dictionary with
keys: 'T1', and 'T1_std'
! Specify either the times array or the times_mean value (defaults to
5 micro-s) and the span around it (defaults to 10 micro-s) as kw.
Then the script will construct the sweep points as
np.linspace(times_mean - times_span/2, times_mean + times_span/2,
nr_points)
"""
if not update:
log.warning("Does not automatically update the qubit "
"T1 parameter. Set update=True if you want this!")
if np.any(times > 1e-3):
raise ValueError('Some of the values in the times array might be too '
'large. The units should be seconds.')
if times is None:
times_span = kw.get('times_span', 10e-6)
times_mean = kw.get('times_mean', 5e-6)
nr_points = kw.get('nr_points', 50)
if times_mean == 0:
log.warning("find_T1 does not know how long to wait before"
"doing the read out. Please specify the "
"times_mean or the times function parameter.")
return 0
else:
times = np.linspace(times_mean - times_span / 2, times_mean +
times_span / 2, nr_points)
# Perform measurement
self.measure_T1(times=times,
analyze=False, upload=upload,
last_ge_pulse=last_ge_pulse, for_ef=for_ef,
n_cal_points_per_state=n_cal_points_per_state,
cal_states=cal_states, classified_ro=classified_ro,
prep_params=prep_params, label=label,
exp_metadata=exp_metadata)
# Extract T1 and T1_stddev from ma.T1_Analysis
if analyze:
T1_ana = tda.T1Analysis(qb_names=[self.name])
if update:
T1 = T1_ana.proc_data_dict['analysis_params_dict'][
self.name]['T1']
if for_ef:
self.T1_ef(T1)
else:
self.T1(T1)
return T1
def find_rb_gate_fidelity(self, cliffords, nr_seeds, label=None,
gate_decomposition='HZ', interleaved_gate=None,
thresholded=True, classified_ro=False,
n_cal_points_per_state=2, cal_states=(),
upload=True, analyze=True,
prep_params=None, exp_metadata=None, **kw):
if cliffords is None:
raise ValueError("Unspecified cliffords array")
if label is None:
if interleaved_gate is None:
label = 'RB_{}_{}_seeds_{}_cliffords'.format(
gate_decomposition, nr_seeds,
cliffords[-1]) + self.msmt_suffix
else:
label = 'IRB_{}_{}_{}_seeds_{}_cliffords'.format(
interleaved_gate, gate_decomposition,
nr_seeds, cliffords[-1]) + self.msmt_suffix
#Perform measurement
self.measure_randomized_benchmarking(
cliffords=cliffords, nr_seeds=nr_seeds,
gate_decomp=gate_decomposition, interleaved_gate=interleaved_gate,
n_cal_points_per_state=n_cal_points_per_state, cal_states=cal_states,
classified_ro=classified_ro, thresholded=thresholded, label=label,
upload=upload, analyze=False, prep_params=prep_params,
exp_metadata=exp_metadata)
#Analysis
if analyze:
pla.PipelineDataAnalysis()
def find_frequency_T2_ramsey(self, times, artificial_detunings=None,
upload=True, label=None, n=1,
cal_states="auto", n_cal_points_per_state=2,
analyze=True, update=False, for_ef=False,
last_ge_pulse=False, classified_ro=False,
prep_params=None, exp_metadata=None, **kw):
"""
Finds the real qubit GE or EF transition frequencies and the dephasing
rates T2* or T2*_ef from the fit to a Ramsey experiment.
Uses the Ramsey_Analysis class for Ramsey with one artificial detuning,
and the Ramsey_Analysis_multiple_detunings class for Ramsey with 2
artificial detunings.
Has support only for 1 or 2 artifical detunings.
WARNING: Does not automatically update the qubit freq and T2_star
parameters. Set update=True if you want this!
Arguments:
times array of times over which to sweep in
the Ramsey measurement
artificial_detunings: difference between drive frequency and
qubit frequency estimated from
qubit spectroscopy. Must be a list with
one or two entries.
upload: upload sequence to AWG
update: update the qubit frequency and T2*
parameters
label: measurement label
cal_points: use calibration points or not
no_cal_points: number of cal_points (4 for ge;
2,4,6 for ef)
analyze: perform analysis
close_fig: close the resulting figure
update: update relevant parameters
for_ef: perform msmt and analysis on ef transition
last_ge_pulse: ge pi pulse at the end of each sequence
Keyword arguments:
For one artificial detuning, the Ramsey sweep time delays array
'times', or the parameter 'times_mean' should be passed
here (in seconds).
Returns:
The real qubit frequency + stddev, the dephasing rate T2* + stddev.
For 1 artificial_detuning:
! Specify either the times array or the times_mean value (defaults
to 2.5 micro-s) and the span around it (times_mean; defaults to 5
micro-s) as kw. Then the script will construct the sweep points as
times = np.linspace(times_mean - times_span/2, times_mean +
times_span/2, nr_points).
"""
if not update:
log.warning("Does not automatically update the qubit frequency "
"and T2_star parameters. "
"Set update=True if you want this!")
if artificial_detunings is None:
log.warning('Artificial_detuning is None; qubit driven at "%s" '
'estimated with spectroscopy' %self.f_qubit())
if np.any(np.asarray(np.abs(artificial_detunings)) < 1e3):
log.warning('The artificial detuning is too small.')
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.')
self.measure_ramsey(times, artificial_detunings=artificial_detunings,
label=label, cal_states=cal_states, n=n,
n_cal_points_per_state=n_cal_points_per_state,
last_ge_pulse=last_ge_pulse, for_ef=for_ef,
classified_ro=classified_ro, upload=upload,
prep_params=prep_params, exp_metadata=exp_metadata,
analyze=False)
# # Check if one or more artificial detunings
if (hasattr(artificial_detunings, '__iter__') and
(len(artificial_detunings) > 1)):
multiple_detunings = True
else:
multiple_detunings = False
if analyze:
if multiple_detunings:
ramsey_ana = ma.Ramsey_Analysis(
auto=True,
label=label,
qb_name=self.name,
NoCalPoints=len(cal_states)*n_cal_points_per_state,
for_ef=for_ef,
last_ge_pulse=last_ge_pulse,
artificial_detuning=artificial_detunings, **kw)
# get new freq and T2* from analysis results
new_qubit_freq = ramsey_ana.qubit_frequency # value
T2_star = ramsey_ana.T2_star['T2_star'] # dict
else:
ramsey_ana = tda.RamseyAnalysis(
qb_names=[self.name], options_dict=dict(
fit_gaussian_decay=kw.get('fit_gaussian_decay', True)))
new_qubit_freq = ramsey_ana.proc_data_dict[
'analysis_params_dict'][self.name]['exp_decay_' + self.name][
'new_qb_freq']
T2_star = ramsey_ana.proc_data_dict[
'analysis_params_dict'][self.name]['exp_decay_' + self.name][
'T2_star']
if update:
if for_ef:
try:
self.ef_freq(new_qubit_freq)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
try:
self.T2_star_ef(T2_star)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
else:
try:
self.ge_freq(new_qubit_freq)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
try:
self.T2_star(T2_star)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
def find_T2_echo(self, times, artificial_detuning=None,
upload=True, label=None,
cal_points=True, no_cal_points=None,
analyze=True, for_ef=False,
close_fig=True, update=False,
last_ge_pulse=False, **kw):
"""
Finds the qubit T2 Echo.
Uses the EchoAnalysis class in timedomain_analysis.py.
WARNING: Does not automatically update the qubit freq and T2_star
parameters. Set update=True if you want this!
Arguments:
times array of times over which to sweep in
the Ramsey measurement
artificial_detuning: difference between drive frequency and
qubit frequency estimated from
qubit spectroscopy. Must be a list with
one or two entries.
upload: upload sequence to AWG
update: update the qubit frequency and T2*
parameters
label: measurement label
cal_points: use calibration points or not
analyze: perform analysis
close_fig: close the resulting figure
update: update relevant parameters
Keyword arguments:
The time delays array 'times', or the parameter 'times_mean'
should be passed here (in seconds).
Returns:
Nothing
"""
if not update:
log.warning("Does not automatically update the qubit "
"T2_echo parameter. "
"Set update=True if you want this!")
if artificial_detuning == None:
log.warning('Artificial_detuning is None; applying resonant '
'drive.')
else:
if np.any(np.asarray(np.abs(artificial_detuning)) < 1e3):
log.warning('The artificial detuning is too small.')
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.')
if cal_points and no_cal_points is None:
log.warning('no_cal_points is None. Defaults to 4 if '
'for_ef==False, or to 6 if for_ef==True.')
if for_ef:
no_cal_points = 6
else:
no_cal_points = 4
if not cal_points:
no_cal_points = 0
if label is None:
if for_ef:
label = 'Echo_ef' + self.msmt_suffix
else:
label = 'Echo' + self.msmt_suffix
if times is None:
times_span = kw.get('times_span', 5e-6)
times_mean = kw.get('times_mean', 2.5e-6)
nr_points = kw.get('nr_points', 50)
if times_mean == 0:
log.warning("find_T2_echo does not know "
"over which times to do Ramsey. Please "
"specify the times_mean or the times "
"function parameter.")
return 0
else:
times = np.linspace(times_mean - times_span/2,
times_mean + times_span/2,
nr_points)
# perform measurement
if for_ef:
self.measure_echo_2nd_exc(times=times,
artificial_detuning=artificial_detuning,
label=label, cal_points=cal_points,
no_cal_points=no_cal_points, upload=upload,
last_ge_pulse=last_ge_pulse)
else:
self.measure_echo(
times=times, artificial_detuning=artificial_detuning,
cal_points=cal_points,
close_fig=close_fig, upload=upload, label=label)
if analyze:
echo_ana = tda.EchoAnalysis(
qb_names=[self.name],
options_dict={
'artificial_detuning': artificial_detuning,
'fit_gaussian_decay':
kw.get('fit_gaussian_decay', True)})
if update:
T2_echo = echo_ana.proc_data_dict[
'analysis_params_dict'][self.name]['T2_echo']
try:
self.T2(T2_echo)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
return
def find_qscale(self, qscales, label=None, for_ef=False,
last_ge_pulse=False, upload=True, analyze=True,
cal_states="auto", n_cal_points_per_state=2,
classified_ro=False, prep_params=None,
exp_metadata=None, update=False, **kw):
'''
Performs the QScale calibration measurement ( (xX)-(xY)-(xmY) ) and
extracts the optimal QScale parameter
from the fits (ma.QScale_Analysis).
WARNING: Does not automatically update the qubit qscale parameter. Set
update=True if you want this!
ma.QScale_Analysis:
1. The I and Q data are rotated and normalized based on the calibration
points. In most
analysis routines, the latter are typically 4: 2 X180 measurements,
and 2 identity measurements, which get averaged resulting in one
X180 point and one identity point.
2. The data points for the same qscale value are extracted (every other
3rd point because the sequence
used for this measurement applies the 3 sets of pulses
( (xX)-(xY)-(xmY) ) consecutively for each qscale value).
3. The xX data is fitted to a lmfit.models.ConstantModel(), and the
other 2 to an lmfit.models.LinearModel().
4. The data and the resulting fits are all plotted on the same graph
(self.make_figures).
5. The optimal qscale parameter is obtained from the point where the 2
linear fits intersect.
Other possible input parameters:
qscales
array of qscale values over which to sweep...
or qscales_mean and qscales_span
...or the mean qscale value and the span around it
(defaults to 3) as kw. Then the script will construct the sweep
points as np.linspace(qscales_mean - qscales_span/2,
qscales_mean + qscales_span/2, nr_points)
Keyword parameters:
label (default=none?)
label of the analysis routine
for_ef (default=False)
whether to obtain the drag_qscale_ef parameter
update (default=True)
whether or not to update the qubit drag_qscale parameter with
the found value
MC (default=self.MC)
the measurement control object
close_fig (default=True)
close the resulting figure
last_ge_pulse (default=True)
whether to apply an X180 ge pulse at the end
Keyword parameters:
qscale_mean (default=self.drag_qscale()
mean of the desired qscale sweep values
qscale_span (default=3)
span around the qscale mean
nr_points (default=30)
number of sweep points between mean-span/2 and mean+span/2
auto (default=True)
automatically perform the entire analysis upon call
folder (default=working folder)
Working folder
NoCalPoints (default=4)
Number of calibration points
cal_points (default=[[-4, -3], [-2, -1]])
The indices of the calibration points
show (default=True)
show the plot
show_guess (default=False)
plot with initial guess values
plot_title (default=measurementstring)
the title for the plot as a string
xlabel (default=self.xlabel)
the label for the x axis as a string
ylabel (default=r'$F|1\rangle$')
the label for the x axis as a string
close_file (default=True)
close the hdf5 file
Returns:
the optimal DRAG QScale parameter + its stderr as a dictionary with
keys 'qscale' and 'qscale_std'.
'''
if not update:
log.warning("Does not automatically update the qubit qscale "
"parameter. "
"Set update=True if you want this!")
qscales = np.repeat(qscales, 3)
#Perform the qscale calibration measurement
self.measure_qscale(qscales=qscales, upload=upload, label=label,
cal_states=cal_states, exp_metadata=exp_metadata,
n_cal_points_per_state=n_cal_points_per_state,
last_ge_pulse=last_ge_pulse, for_ef=for_ef,
classified_ro=classified_ro,
prep_params=prep_params, analyze=False)
# Perform analysis and extract the optimal qscale parameter
# Returns the optimal qscale parameter
if analyze:
qscale_ana = tda.QScaleAnalysis(qb_names=[self.name])
if update:
qscale = qscale_ana.proc_data_dict['analysis_params_dict'][
self.name]['qscale']
if for_ef:
self.ef_motzoi(qscale)
else:
self.ge_motzoi(qscale)
return
def calculate_anharmonicity(self, update=False):
"""
Computes the qubit anaharmonicity using f_ef (self.f_ef_qubit)
and f_ge (self.f_qubit).
It is assumed that the latter values exist.
WARNING: Does not automatically update the qubit anharmonicity
parameter. Set update=True if you want this!
"""
if not update:
log.warning("Does not automatically update the qubit "
"anharmonicity parameter. "
"Set update=True if you want this!")
if self.ge_freq() == 0:
log.warning('f_ge = 0. Run qubit spectroscopy or Ramsey.')
if self.ef_freq() == 0:
log.warning('f_ef = 0. Run qubit spectroscopy or Ramsey.')
anharmonicity = self.ef_freq() - self.ge_freq()
if update:
self.anharmonicity(anharmonicity)
return anharmonicity
def calculate_EC_EJ(self, update=True, **kw):
"""
Extracts EC and EJ from a least squares fit to the transmon
Hamiltonian solutions. It uses a_tools.calculate_transmon_transitions,
f_ge and f_ef.
WARNING: Does not automatically update the qubit EC and EJ parameters.
Set update=True if you want this!
Keyword Arguments:
asym: (default=0)
asymmetry d (Koch (2007), eqn 2.18) for asymmetric junctions
reduced_flux: (default=0)
reduced magnetic flux through SQUID
no_transitions (default=2)
how many transitions (levels) are you interested in
dim: (default=None)
dimension of Hamiltonian will be (2*dim+1,2*dim+1)
"""
if not update:
log.warning("Does not automatically update the qubit EC and EJ "
"parameters. "
"Set update=True if you want this!")
(EC,EJ) = a_tools.fit_EC_EJ(self.f_qubit(), self.f_ef_qubit(), **kw)
if update:
self.EC_qubit(EC)
self.EJ_qubit(EJ)
return EC, EJ
def find_readout_frequency(self, freqs=None, update=False, MC=None,
qutrit=False, **kw):
"""
Find readout frequency at which contrast between the states of the
qubit is the highest.
You need a working pi-pulse for this to work, as well as a pi_ef
pulse if you intend to use `for_3_level_ro`. Also, if your
readout pulse length is much longer than the T1, the results will not
be nice as the excited state spectrum will be mixed with the ground
state spectrum.
Args:
freqs: frequencies to sweep
qutrit (bool): find optimal frequency for 3-level readout.
Default is False.
**kw:
Returns:
"""
# FIXME: Make proper analysis class for this (Ants, 04.12.2017)
if not update:
log.info("Does not automatically update the RO resonator "
"parameters. Set update=True if you want this!")
if freqs is None:
if self.f_RO() is not None:
f_span = kw.pop('f_span', 20e6)
fmin = self.f_RO() - f_span
fmax = self.f_RO() + f_span
n_freq = kw.pop('n_freq', 401)
freqs = np.linspace(fmin, fmax, n_freq)
else:
raise ValueError("Unspecified frequencies for find_resonator_"
"frequency and no previous value exists")
if np.any(freqs < 500e6):
log.warning('Some of the values in the freqs array might be '
'too small. The units should be Hz.')
if MC is None:
MC = self.instr_mc.get_instr()
levels = ('g', 'e', 'f') if qutrit else ('g', 'e')
self.measure_dispersive_shift(freqs, states=levels, analyze=False)
labels = {l: '{}-spec'.format(l) + self.msmt_suffix for l in levels}
m_a = {l: ma.MeasurementAnalysis(label=labels[l]) for l in levels}
trace = {l: m_a[l].measured_values[0] *
np.exp(1j * np.pi * m_a[l].measured_values[1] / 180.)
for l in levels}
# FIXME: make something that doesn't require a conditional branching
if qutrit:
total_dist = np.abs(trace['e'] - trace['g']) + \
np.abs(trace['f'] - trace['g']) + \
np.abs(trace['f'] - trace['e'])
fmax = freqs[np.argmax(total_dist)]
# FIXME: just as debug plotting for now
fig, ax = plt.subplots(2)
ax[0].plot(freqs, np.abs(trace['g']), label='g')
ax[0].plot(freqs, np.abs(trace['e']), label='e')
ax[0].plot(freqs, np.abs(trace['f']), label='f')
ax[0].set_ylabel('Amplitude')
ax[0].legend()
ax[1].plot(freqs, np.abs(trace['e'] - trace['g']), label='eg')
ax[1].plot(freqs, np.abs(trace['f'] - trace['g']), label='fg')
ax[1].plot(freqs, np.abs(trace['e'] - trace['f']), label='ef')
ax[1].plot(freqs, total_dist, label='total distance')
ax[1].set_xlabel("Freq. [Hz]")
ax[1].set_ylabel('Distance in IQ plane')
ax[0].set_title("Current RO_freq: {} Hz\nOptimal Freq: {} Hz".format(
self.ro_freq(),
fmax))
plt.legend()
m_a['g'].save_fig(fig, 'IQplane_distance')
plt.show()
if kw.get('analyze', True):
sa.ResonatorSpectroscopy_v2(labels=[l for l in labels.values()])
else:
fmax = freqs[np.argmax(np.abs(trace['e'] - trace['g']))]
log.info("Optimal RO frequency to distinguish states {}: {} Hz"
.format(levels, fmax))
if kw.get('analyze', True):
SA = sa.ResonatorSpectroscopy(t_start=[m_a['g'].timestamp_string,
m_a['e'].timestamp_string],
options_dict=dict(simultan=True,
fit_options=dict(
model='hanger_with_pf'),
scan_label=''),
do_fitting=True)
# FIXME Nathan: remove 3 level dependency; fix this analysis:
# if qutrit:
# SA2 = sa.ResonatorSpectroscopy(t_start=m_a['f'].timestamp_string,
# options_dict=dict(simultan=False,
# fit_options = dict(
# model='hanger_with_pf'),
# scan_label=''),
# do_fitting=True)
if update:
# FIXME Nathan: update parameters accordingly
self.ro_freq(SA.f_RO if not qutrit else fmax)
self.chi(SA.chi)
self.f_RO_resonator(SA.f_RO_res)
self.f_RO_purcell(SA.f_PF)
self.RO_purcell_kappa(SA.kappa)
self.RO_J_coupling(SA.J_)
if kw.pop('get_CLEAR_params', False):
if self.ro_CLEAR_segment_length is None:
self.ro_CLEAR_segment_length = self.ro_length/10
if kw.get('max_amp_difference', False) :
'''this gives the ratio of the maximal hight for'''
'''the segments to the base amplitude'''
max_diff = kw.pop('max_amp_difference')
else:
max_diff = 3
self.ro_CLEAR_delta_amp_segment = \
sim_CLEAR.get_CLEAR_amplitudes(
self.f_RO_purcell, self.f_RO_resonator,
self.ro_freq, self.RO_purcell_kappa,
self.RO_J_coupling, self.chi, 1, self.ro_length,
length_segments=self.ro_CLEAR_segment_length,
sigma=self.ro_sigma,
max_amp_diff=max_diff) * self.ro_amp
def measure_dispersive_shift(self, freqs, analyze=True, close_fig=True,
upload=True, states=('g','e'), prep_params=None):
""" Varies the frequency of the microwave source to the resonator and
measures the transmittance """
if freqs is None:
raise ValueError("Unspecified frequencies for "
"measure_resonator_spectroscopy")
if np.any(freqs < 500e6):
log.warning(('Some of the values in the freqs array '
'might be too small. The units should be Hz.'))
if prep_params is None:
prep_params = self.preparation_params()
assert isinstance(states, tuple), \
"states should be a tuple, not {}".format(type(states))
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
for state in states:
sq.single_state_active_reset(
operation_dict=self.get_operation_dict(),
qb_name=self.name,
state=state, prep_params=prep_params, upload=upload)
MC.set_sweep_function(self.swf_ro_freq_lo())
MC.set_sweep_points(freqs)
MC.set_detector_function(self.int_avg_det_spec)
self.instr_pulsar.get_instr().start(exclude=[self.instr_uhf()])
MC.run(name=f"{state}-spec" + self.msmt_suffix)
self.instr_pulsar.get_instr().stop()
if analyze:
ma.MeasurementAnalysis(auto=True, close_fig=close_fig,
qb_name=self.name)
def calibrate_flux_pulse_timing(self, freqs=None, delays=None, MC=None,
analyze=False, update=False,**kw):
"""
flux pulse timing calibration
does a 2D measuement of the type:
-------|X180| ---------------- |RO|
<----->
| fluxpulse |
where the flux pulse delay and the drive pulse frequency of the X180 pulse
are swept.
Args:
MC: measurement control object
freqs: numpy array frequencies in Hz for the flux pulse scope type experiment
delays: numpy array with delays (in s) swept through
as delay of the drive pulse
analyze: bool, if True, then the measured data
gets analyzed (for detailed documentation of the analysis see in
the FluxPulse_timing_calibration class update: bool, if True,
the AWG channel delay gets corrected, such that single qubit
gates and flux pulses have no relative delay
Returns:
fitted_delay: float, only returned, if analyze is True.
"""
if MC is None:
MC = self.instr_mc.get_instr()
channel = self.flux_pulse_channel()
pulse_length = kw.pop('pulse_length', 100e-9)
self.flux_pulse_length(pulse_length)
amplitude = kw.pop('amplitude', 0.5)
self.flux_pulse_amp(amplitude)
measurement_string = 'Flux_pulse_delay_calibration_{}'.format(self.name)
if freqs is None:
freqs = self.f_qubit() + np.linspace(-50e6, 50e6, 20, endpoint=False)
if delays is None:
delays = np.linspace(-100e-9, pulse_length + 100e-9, 40, endpoint=False)
self.prepare(drive='timedomain')
detector_fun = self.int_avg_det
s1 = awg_swf.Fluxpulse_scope_swf(self)
s2 = awg_swf.Fluxpulse_scope_drive_freq_sweep(self)
MC.set_sweep_function(s1)
MC.set_sweep_points(delays)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(freqs)
MC.set_detector_function(detector_fun)
MC.run_2D(measurement_string)
if analyze:
flux_pulse_timing_ma = ma.FluxPulse_timing_calibration(
label=measurement_string,
flux_pulse_length=pulse_length,
qb_name=self.name,
auto=True,
plot=True)
if update:
new_delay = self.instr_pulsar.get_instr().get('{}_delay'.format(channel)) + \
flux_pulse_timing_ma.fitted_delay
self.instr_pulsar.get_instr().set('{}_delay'.format(channel), new_delay)
print('updated delay of channel {}.'.format(channel))
else:
log.warning('Not updated, since update was disabled.')
return flux_pulse_timing_ma.fitted_delay
else:
return
def calibrate_flux_pulse_frequency(self, MC=None, thetas=None, ampls=None,
analyze=False,
plot=False,
ampls_bidirectional = False,
**kw):
"""
flux pulse frequency calibration
does a 2D measuement of the type:
X90_separation
< -- ---- ----------- --->
|X90| -------------- |X90| --- |RO|
| fluxpulse |
where the flux pulse amplitude and the angle of the second X90 pulse
are swept.
Args:
MC: measurement control object
thetas: numpy array with angles (in rad) for the Ramsey type
ampls: numpy array with amplitudes (in V) swept through
as flux pulse amplitudes
ampls_bidirectional: bool, for use if the qubit is parked at sweetspot.
If true, the flux pulse amplitudes are swept to positive
and negative voltages and the frequency model fit is ]
performed on the combined dataset
analyze: bool, if True, then the measured data
gets analyzed ( ma.fit_qubit_frequency() )
"""
if MC is None:
MC = self.instr_mc.get_instr()
channel = self.flux_pulse_channel()
clock_rate = MC.station.pulsar.clock(channel)
X90_separation = kw.pop('X90_separation', 200e-9)
distorted = kw.pop('distorted', False)
distortion_dict = kw.pop('distortion_dict', None)
pulse_length = kw.pop('pulse_length', 30e-9)
self.flux_pulse_length(pulse_length)
pulse_delay = kw.pop('pulse_delay', 50e-9)
self.flux_pulse_delay(pulse_delay)
if thetas is None:
thetas = np.linspace(0, 2*np.pi, 8, endpoint=False)
if ampls is None:
ampls = np.linspace(0, 1, 21)
ampls_flag = True
self.prepare(drive='timedomain')
detector_fun = self.int_avg_det
s1 = awg_swf.Ramsey_interleaved_fluxpulse_sweep(
self,
X90_separation=X90_separation,
distorted=distorted,
distortion_dict=distortion_dict)
s2 = awg_swf.Ramsey_fluxpulse_ampl_sweep(self, s1)
MC.set_sweep_function(s1)
MC.set_sweep_points(thetas)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(ampls)
MC.set_detector_function(detector_fun)
measurement_string_1 = 'Flux_pulse_frequency_calibration_{}_1'.format(self.name)
MC.run_2D(measurement_string_1)
if ampls_bidirectional:
MC.set_sweep_function(s1)
MC.set_sweep_points(thetas)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(-ampls)
MC.set_detector_function(detector_fun)
measurement_string_2 = 'Flux_pulse_frequency_calibration_{}_2'.format(self.name)
MC.run_2D(measurement_string_2)
if analyze:
flux_pulse_ma_1 = ma.Fluxpulse_Ramsey_2D_Analysis(
label=measurement_string_1,
X90_separation=X90_separation,
flux_pulse_length=pulse_length,
qb_name=self.name,
auto=False)
flux_pulse_ma_1.fit_all(extrapolate_phase=True, plot=True)
phases = flux_pulse_ma_1.fitted_phases
ampls = flux_pulse_ma_1.sweep_points_2D
if ampls_bidirectional:
flux_pulse_ma_2 = ma.Fluxpulse_Ramsey_2D_Analysis(
label=measurement_string_2,
X90_separation=X90_separation,
flux_pulse_length=pulse_length,
qb_name=self.name,
auto=False)
flux_pulse_ma_2.fit_all(extrapolate_phase=True, plot=True)
phases = np.concatenate(flux_pulse_ma_2.fitted_phases[-1:0:-1],
flux_pulse_ma_1.fitted_phases)
ampls = np.concatenate(flux_pulse_ma_2.sweep_points_2D[-1:0:-1],
flux_pulse_ma_1.sweep_points_2D)
instrument_settings = flux_pulse_ma_1.data_file['Instrument settings']
qubit_attrs = instrument_settings[self.name].attrs
E_c = kw.pop('E_c', qubit_attrs.get('E_c', 0.3e9))
f_max = kw.pop('f_max', qubit_attrs.get('f_max', self.f_qubit()))
V_per_phi0 = kw.pop('V_per_phi0',
qubit_attrs.get('V_per_phi0', 1.))
dac_sweet_spot = kw.pop('dac_sweet_spot',
qubit_attrs.get('dac_sweet_spot', 0))
freqs = f_max - phases/(2*np.pi*pulse_length)
fit_res = ma.fit_qubit_frequency(ampls, freqs, E_c=E_c, f_max=f_max,
V_per_phi0=V_per_phi0,
dac_sweet_spot=dac_sweet_spot
)
print(fit_res.fit_report())
if plot and ampls_bidirectional:
fit_res.plot()
if ampls_bidirectional:
return fit_res
def calibrate_CPhase_dynamic_phases(self,
flux_pulse_length=None,
flux_pulse_amp=None,
flux_pulse_delay=None,
thetas=None,
X90_separation=None,
flux_pulse_channel=None,
MC=None, label=None,
analyze=True, update=True, **kw):
"""
CPhase dynamic phase calibration
does a measuement of the type:
X90_separation
< -- ---- ----------- --->
|X90| -------------- |X90| --- |RO|
| fluxpulse |
where the angle of the second X90 pulse is swept for
the flux pulse amplitude in [0,cphase_ampl].
Args:
MC: measurement control object
thetas: numpy array with angles (in rad) for the Ramsey type
ampls: numpy array with amplitudes (in V) swept through
as flux pulse amplitudes
analyze: bool, if True, then the measured data
gets analyzed (
"""
if MC is None:
MC = self.instr_mc.get_instr()
if flux_pulse_amp is None:
flux_pulse_amp = self.flux_pulse_amp()
log.warning('flux_pulse_amp is not specified. Using the value'
'in the flux_pulse_amp parameter.')
if flux_pulse_length is None:
flux_pulse_length = self.flux_pulse_length()
log.warning('flux_pulse_length is not specified. Using the value'
'in the flux_pulse_length parameter.')
if flux_pulse_delay is None:
flux_pulse_delay = self.flux_pulse_delay()
log.warning('flux_pulse_delay is not specified. Using the value'
'in the flux_pulse_delay parameter.')
if flux_pulse_channel is None:
flux_pulse_channel = self.flux_pulse_channel()
log.warning('flux_pulse_channel is not specified. Using the value'
'in the flux_pulse_channel parameter.')
if thetas is None:
thetas = np.linspace(0, 4*np.pi, 16)
print('Sweeping over phases thata=np.linspace(0, 4*np.pi, 16).')
if label is None:
label = 'Dynamic_phase_measurement_{}_{}_filter'.format(
self.name, self.flux_pulse_channel())
self.measure_dynamic_phase(flux_pulse_length=flux_pulse_length,
flux_pulse_amp=flux_pulse_amp,
flux_pulse_channel=flux_pulse_channel,
flux_pulse_delay=flux_pulse_delay,
X90_separation=X90_separation,
thetas=thetas,
MC=MC,
label=label)
if analyze:
MA = ma.Dynamic_phase_Analysis(
TwoD=True,
flux_pulse_amp=flux_pulse_amp,
flux_pulse_length=flux_pulse_length,
qb_name=self.name, **kw)
dynamic_phase = MA.dyn_phase
print('fitted dynamic phase on {}: {:0.3f} [deg]'.format(self.name,
dynamic_phase))
if update:
try:
self.dynamic_phase(dynamic_phase)
except Exception:
log.warning('Could not update '
'{}.dynamic_phase().'.format(self.name))
return dynamic_phase
else:
return
def measure_flux_pulse_scope(self, freqs, delays, cz_pulse_name,
analyze=True, cal_points=True,
upload=True, label=None,
n_cal_points_per_state=2, cal_states='auto',
prep_params=None, exp_metadata=None):
'''
flux pulse scope measurement used to determine the shape of flux pulses
set up as a 2D measurement (delay and drive pulse frequecy are
being swept)
pulse sequence:
<- delay ->
| ------------- |X180| --------------------- |RO|
| --- | ---- fluxpulse ----- |
Args:
freqs (numpy array): array of drive frequencies
delays (numpy array): array of delays of the drive pulse w.r.t
the flux pulse
pulse_length (float): flux pulse length (if not specified, the
self.flux_pulse_length() is taken)
pulse_amp (float): flux pulse amplitude (if not specified, the
self.flux_pulse_amp() is taken)
pulse_delay (float): flux pulse delay
MC (MeasurementControl): if None, then the self.MC is taken
Returns: None
'''
if label is None:
label = 'Flux_scope_{}'.format(self.name)
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
if cal_points:
cal_states = CalibrationPoints.guess_cal_states(cal_states)
cp = CalibrationPoints.single_qubit(
self.name, cal_states, n_per_state=n_cal_points_per_state)
else:
cp = None
if prep_params is None:
prep_params = self.preparation_params()
seq, sweep_points, sweep_points_2D = \
fsqs.fluxpulse_scope_sequence(
delays=delays, freqs=freqs, qb_name=self.name,
operation_dict=self.get_operation_dict(),
cz_pulse_name=cz_pulse_name, cal_points=cp,
prep_params=prep_params, upload=False)
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Delay', unit='s'))
MC.set_sweep_points(sweep_points)
MC.set_sweep_function_2D(swf.Offset_Sweep(
self.instr_ge_lo.get_instr().frequency,
-self.ge_mod_freq(),
name='Drive frequency',
parameter_name='Drive frequency', unit='Hz'))
MC.set_sweep_points_2D(sweep_points_2D)
MC.set_detector_function(self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: delays},
'sweep_points_dict_2D': {self.name: freqs},
'use_cal_points': cal_points,
'preparation_params': prep_params,
'cal_points': repr(cp),
'rotate': cal_points,
'data_to_fit': {self.name: 'pe'},
"sweep_name": "Delay",
"sweep_unit": "s"})
MC.run_2D(label, exp_metadata=exp_metadata)
if analyze:
try:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name],
options_dict=dict(TwoD=True))
except Exception:
ma.MeasurementAnalysis(TwoD=True)
def measure_flux_pulse_scope_nzcz_alpha(
self, nzcz_alphas, delays, CZ_pulse_name=None,
cal_points=True, upload=True, upload_all=True,
spacing=30e-9, MC=None):
if MC is None:
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
if cal_points:
step = np.abs(delays[-1] - delays[-2])
sweep_points = np.concatenate(
[delays, [delays[-1]+step, delays[-1]+2*step,
delays[-1]+3*step, delays[-1]+4*step]])
else:
sweep_points = delays
s1 = awg_swf.Fluxpulse_scope_nzcz_alpha_hard_swf(
qb_name=self.name, nzcz_alpha=nzcz_alphas[0],
CZ_pulse_name=CZ_pulse_name,
operation_dict=self.get_operation_dict(),
cal_points=cal_points, upload=False,
upload_all=upload_all, spacing=spacing)
s2 = awg_swf.Fluxpulse_scope_nzcz_alpha_soft_sweep(
s1, upload=upload)
MC.set_sweep_function(s1)
MC.set_sweep_points(sweep_points)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(nzcz_alphas)
MC.set_detector_function(self.int_avg_det)
MC.run_2D('Flux_scope_nzcz_alpha' + self.msmt_suffix)
ma.MeasurementAnalysis(TwoD=True)
def add_CZ_pulse(qbc, qbt):
"""
Args:
qbc: Control qubit. A QudevTransmon object corresponding to the qubit
that we apply the flux pulse on.
qbt: Target qubit. A QudevTransmon object corresponding to the qubit
we induce the conditional phase on.
"""
# add flux pulse parameters
op_name = 'upCZ ' + qbt.name
ps_name = 'upCZ_' + qbt.name
if np.any([op_name == i for i in qbc.get_operation_dict().keys()]):
# do not try to add it again if operation already exists
raise ValueError('Operation {} already exists.'.format(op_name))
else:
qbc.add_operation(op_name)
qbc.add_pulse_parameter(op_name, ps_name + '_cz_target_qb',
'cz_target_qb',
initial_value=qbt.name,
vals=vals.Enum(qbt.name))
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_type', 'pulse_type',
initial_value='BufferedCZPulse',
vals=vals.Enum('BufferedSquarePulse',
'BufferedCZPulse',
'NZBufferedCZPulse',
'BufferedCZPulseEffectiveTime'))
qbc.add_pulse_parameter(op_name, ps_name + '_channel', 'channel',
initial_value='', vals=vals.Strings())
qbc.add_pulse_parameter(op_name, ps_name + '_aux_channels_dict',
'aux_channels_dict',
initial_value={}, vals=vals.Dict())
qbc.add_pulse_parameter(op_name, ps_name + '_amplitude', 'amplitude',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_frequency', 'frequency',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_phase', 'phase',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_length',
'pulse_length',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_alpha', 'alpha',
initial_value=1, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_buffer_length_start',
'buffer_length_start', initial_value=10e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_buffer_length_end',
'buffer_length_end', initial_value=10e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_extra_buffer_aux_pulse',
'extra_buffer_aux_pulse', initial_value=5e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_delay',
'pulse_delay',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_basis_rotation',
'basis_rotation', initial_value={},
vals=vals.Dict())
qbc.add_pulse_parameter(op_name, ps_name + '_gaussian_filter_sigma',
'gaussian_filter_sigma', initial_value=2e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_chevron_func',
'chevron_func', initial_value=None,
vals=vals.Callable(),
docstring="Callable required when using "
"effective time CZ pulse to "
"straighten Chevron.")
def add_CZ_MG_pulse(qbc, qbt):
"""
Args:
qbc: Control qubit. A QudevTransmon object corresponding to the qubit
that we apply the flux pulse on.
qbt: Target qubit. A QudevTransmon object corresponding to the qubit
we induce the conditional phase on.
"""
# add flux pulse parameters
op_name = 'CZ ' + qbt.name
ps_name = 'CZ_' + qbt.name
if np.any([op_name == i for i in qbc.get_operation_dict().keys()]):
# do not try to add it again if operation already exists
raise ValueError('Operation {} already exists.'.format(op_name))
else:
qbc.add_operation(op_name)
qbc.add_pulse_parameter(op_name, ps_name + '_cz_target_qb',
'cz_target_qb',
initial_value=qbt.name,
vals=vals.Enum(qbt.name))
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_type', 'pulse_type',
initial_value='NZMartinisGellarPulse',
vals=vals.Enum('NZMartinisGellarPulse',
'BufferedCZPulse'))
qbc.add_pulse_parameter(op_name, ps_name + '_channel', 'channel',
initial_value='', vals=vals.Strings())
qbc.add_pulse_parameter(op_name, ps_name + '_aux_channels_dict',
'aux_channels_dict',
initial_value={}, vals=vals.Dict())
qbc.add_pulse_parameter(op_name, ps_name + '_theta_f', 'theta_f',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_lambda_2', 'lambda_2',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_qbc_freq', 'qbc_freq',
initial_value=qbc.ge_freq(),
vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_qbt_freq', 'qbt_freq',
initial_value=qbt.ge_freq(),
vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_length',
'pulse_length',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_alpha', 'alpha',
initial_value=1, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_buffer_length_start',
'buffer_length_start', initial_value=10e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_buffer_length_end',
'buffer_length_end', initial_value=10e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_extra_buffer_aux_pulse',
'extra_buffer_aux_pulse', initial_value=5e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_anharmonicity',
'anharmonicity',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_J', 'J',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_basis_rotation',
'basis_rotation', initial_value={},
vals=vals.Dict())
qbc.add_pulse_parameter(op_name, ps_name + '_dv_dphi', 'dv_dphi',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_loop_asym', 'loop_asym',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_wave_generation_func',
'wave_generation_func', initial_value=None,
vals=vals.Callable())
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_delay',
'pulse_delay',
initial_value=0, vals=vals.Numbers())
Increase no_improve_break to 12.
import logging
log = logging.getLogger(__name__)
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from qcodes.instrument.parameter import (
ManualParameter, InstrumentRefParameter)
from qcodes.utils import validators as vals
from pycqed.analysis_v2.readout_analysis import Singleshot_Readout_Analysis_Qutrit
from pycqed.measurement import detector_functions as det
from pycqed.measurement import awg_sweep_functions as awg_swf
from pycqed.measurement import awg_sweep_functions_multi_qubit as awg_swf2
from pycqed.measurement import sweep_functions as swf
from pycqed.measurement.sweep_points import SweepPoints
from pycqed.measurement.calibration_points import CalibrationPoints
from pycqed.analysis_v3.processing_pipeline import ProcessingPipeline
from pycqed.measurement.pulse_sequences import single_qubit_tek_seq_elts as sq
from pycqed.measurement.pulse_sequences import fluxing_sequences as fsqs
from pycqed.analysis_v3 import pipeline_analysis as pla
from pycqed.analysis import measurement_analysis as ma
from pycqed.analysis_v2 import timedomain_analysis as tda
import pycqed.analysis.randomized_benchmarking_analysis as rbma
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.utilities.general import add_suffix_to_dict_keys
from pycqed.utilities.general import temporary_value
from pycqed.instrument_drivers.meta_instrument.qubit_objects.qubit_object \
import Qubit
from pycqed.measurement import optimization as opti
from pycqed.measurement import mc_parameter_wrapper
import pycqed.analysis_v2.spectroscopy_analysis as sa
from pycqed.utilities import math
try:
import pycqed.simulations.readout_mode_simulations_for_CLEAR_pulse \
as sim_CLEAR
except ModuleNotFoundError:
log.warning('"readout_mode_simulations_for_CLEAR_pulse" not imported.')
class QuDev_transmon(Qubit):
def __init__(self, name, **kw):
super().__init__(name, **kw)
self.add_parameter('instr_mc',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_ge_lo',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_pulsar',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_uhf',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_ro_lo',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_trigger',
parameter_class=InstrumentRefParameter)
# device parameters for user only
# could be cleaned up
self.add_parameter('f_RO_resonator', label='RO resonator frequency',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('f_RO_purcell', label='RO purcell filter frequency',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('RO_purcell_kappa', label='Purcell filter kappa',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('RO_J_coupling', label='J coupling of RO resonator'
'and purcell filter',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('Q_RO_resonator', label='RO resonator Q factor',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('ssro_contrast', unit='arb.', initial_value=0,
label='integrated g-e trace contrast',
parameter_class=ManualParameter)
self.add_parameter('optimal_acquisition_delay', label='Optimal '
'acquisition delay', unit='s', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('T1', label='Qubit relaxation', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T1_ef', label='Qubit relaxation', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T2', label='Qubit dephasing Echo', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T2_ef', label='Qubit dephasing Echo', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T2_star', label='Qubit dephasing', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('T2_star_ef', label='Qubit dephasing', unit='s',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('anharmonicity', label='Qubit anharmonicity',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('dynamic_phase', label='CZ dynamic phase',
unit='deg', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('EC_qubit', label='Qubit EC', unit='Hz',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('EJ_qubit', label='Qubit EJ', unit='Hz',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('chi', unit='Hz', parameter_class=ManualParameter,
label='Chi')
# readout pulse parameters
self.add_parameter('ro_freq', unit='Hz',
parameter_class=ManualParameter,
label='Readout frequency')
self.add_parameter('ro_I_offset', unit='V', initial_value=0,
parameter_class=ManualParameter,
label='DC offset for the readout I channel')
self.add_parameter('ro_Q_offset', unit='V', initial_value=0,
parameter_class=ManualParameter,
label='DC offset for the readout Q channel')
self.add_parameter('ro_lo_power', unit='dBm',
parameter_class=ManualParameter,
label='Readout pulse upconversion mixer LO power')
self.add_operation('RO')
self.add_pulse_parameter('RO', 'ro_pulse_type', 'pulse_type',
vals=vals.Enum('GaussFilteredCosIQPulse',
'GaussFilteredCosIQPulseMultiChromatic'),
initial_value='GaussFilteredCosIQPulse')
self.add_pulse_parameter('RO', 'ro_I_channel', 'I_channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('RO', 'ro_Q_channel', 'Q_channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('RO', 'ro_amp', 'amplitude',
initial_value=0.001,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_length', 'pulse_length',
initial_value=2e-6, vals=vals.Numbers())
self.add_pulse_parameter('RO', 'ro_delay', 'pulse_delay',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('RO', 'ro_mod_freq', 'mod_frequency',
initial_value=100e6,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_phase', 'phase',
initial_value=0,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_phi_skew', 'phi_skew',
initial_value=0,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_alpha', 'alpha',
initial_value=1,
vals=vals.MultiType(vals.Numbers(), vals.Lists()))
self.add_pulse_parameter('RO', 'ro_sigma',
'gaussian_filter_sigma',
initial_value=10e-9, vals=vals.Numbers())
self.add_pulse_parameter('RO', 'ro_nr_sigma', 'nr_sigma',
initial_value=5, vals=vals.Numbers())
self.add_pulse_parameter('RO', 'ro_phase_lock', 'phase_lock',
initial_value=True, vals=vals.Bool())
self.add_pulse_parameter('RO', 'ro_basis_rotation',
'basis_rotation', initial_value={},
docstring='Dynamic phase acquired by other '
'qubits due to a measurement tone on'
' this qubit.',
label='RO pulse basis rotation dictionary',
vals=vals.Dict())
# acquisition parameters
self.add_parameter('acq_I_channel', initial_value=0,
vals=vals.Enum(0, 1, 2, 3, 4, 5, 6, 7, 8),
parameter_class=ManualParameter)
self.add_parameter('acq_Q_channel', initial_value=1,
vals=vals.Enum(0, 1, 2, 3, 4, 5, 6, 7, 8, None),
parameter_class=ManualParameter)
self.add_parameter('acq_averages', initial_value=1024,
vals=vals.Ints(0, 1000000),
parameter_class=ManualParameter)
self.add_parameter('acq_shots', initial_value=4094,
docstring='Number of single shot measurements to do'
'in single shot experiments.',
vals=vals.Ints(0, 1048576),
parameter_class=ManualParameter)
self.add_parameter('acq_length', initial_value=2.2e-6,
vals=vals.Numbers(min_value=1e-8,
max_value=4097/1.2e9),
parameter_class=ManualParameter)
self.add_parameter('acq_IQ_angle', initial_value=0,
docstring='The phase of the integration weights '
'when using SSB, DSB or square_rot '
'integration weights',
label='Acquisition IQ angle', unit='rad',
parameter_class=ManualParameter)
self.add_parameter('acq_weights_I', vals=vals.Arrays(),
label='Optimized weights for I channel',
parameter_class=ManualParameter)
self.add_parameter('acq_weights_Q', vals=vals.Arrays(),
label='Optimized weights for Q channel',
parameter_class=ManualParameter)
self.add_parameter('acq_weights_type', initial_value='SSB',
vals=vals.Enum('SSB', 'DSB', 'optimal',
'square_rot', 'manual',
'optimal_qutrit'),
docstring=(
'Determines what type of integration weights to '
'use: \n\tSSB: Single sideband demodulation\n\t'
'DSB: Double sideband demodulation\n\toptimal: '
'waveforms specified in "ro_acq_weight_func_I" '
'and "ro_acq_weight_func_Q"\n\tsquare_rot: uses '
'a single integration channel with boxcar '
'weights'),
parameter_class=ManualParameter)
self.add_parameter('acq_weights_I2', vals=vals.Arrays(),
label='Optimized weights for second integration '
'channel I',
docstring=("Used for double weighted integration "
"during qutrit readout"),
parameter_class=ManualParameter)
self.add_parameter('acq_weights_Q2', vals=vals.Arrays(),
label='Optimized weights for second integration '
'channel Q',
docstring=("Used for double weighted integration "
"during qutrit readout"),
parameter_class=ManualParameter)
self.add_parameter('acq_weights_basis', vals=vals.Lists(),
label="weight basis used",
docstring=("Used to log the weights basis for "
"integration during qutrit readout. E.g."
" ['ge', 'gf'] or ['ge', 'ortho']."),
parameter_class=ManualParameter)
self.add_parameter('acq_classifier_params', vals=vals.Dict(),
label='Parameters for the qutrit classifier.',
docstring=("Used in the int_avg_classif_det to "
"classify single shots into g, e, f."),
parameter_class=ManualParameter)
self.add_parameter('acq_state_prob_mtx', vals=vals.Arrays(),
label='SSRO correction matrix.',
docstring=("Matrix of measured vs prepared qubit "
"states."),
parameter_class=ManualParameter)
# qubit drive pulse parameters
self.add_parameter('ge_freq', label='Qubit drive frequency', unit='Hz',
initial_value=0, parameter_class=ManualParameter)
self.add_parameter('ge_lo_power', unit='dBm',
parameter_class=ManualParameter,
label='Qubit drive pulse mixer LO power')
self.add_parameter('ge_I_offset', unit='V', initial_value=0,
parameter_class=ManualParameter,
label='DC offset for the drive line I channel')
self.add_parameter('ge_Q_offset', unit='V', initial_value=0,
parameter_class=ManualParameter,
label='DC offset for the drive line Q channel')
# add drive pulse parameters
self.add_operation('X180')
self.add_pulse_parameter('X180', 'ge_pulse_type', 'pulse_type',
initial_value='SSB_DRAG_pulse',
vals=vals.Enum('SSB_DRAG_pulse'))
self.add_pulse_parameter('X180', 'ge_I_channel', 'I_channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('X180', 'ge_Q_channel', 'Q_channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('X180', 'ge_amp180', 'amplitude',
initial_value=0.001, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_amp90_scale', 'amp90_scale',
initial_value=0.5, vals=vals.Numbers(0, 1))
self.add_pulse_parameter('X180', 'ge_delay', 'pulse_delay',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_sigma', 'sigma',
initial_value=10e-9, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_nr_sigma', 'nr_sigma',
initial_value=5, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_motzoi', 'motzoi',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_mod_freq', 'mod_frequency',
initial_value=-100e6, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_phi_skew', 'phi_skew',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_alpha', 'alpha',
initial_value=1, vals=vals.Numbers())
self.add_pulse_parameter('X180', 'ge_X_phase', 'phase',
initial_value=0, vals=vals.Numbers())
# qubit 2nd excitation drive pulse parameters
self.add_parameter('ef_freq', label='Qubit ef drive frequency',
unit='Hz', initial_value=0,
parameter_class=ManualParameter)
self.add_operation('X180_ef')
self.add_pulse_parameter('X180_ef', 'ef_pulse_type', 'pulse_type',
initial_value='SSB_DRAG_pulse',
vals=vals.Enum('SSB_DRAG_pulse'))
self.add_pulse_parameter('X180_ef', 'ef_amp180', 'amplitude',
initial_value=0.001, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_amp90_scale', 'amp90_scale',
initial_value=0.5, vals=vals.Numbers(0, 1))
self.add_pulse_parameter('X180_ef', 'ef_delay', 'pulse_delay',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_sigma', 'sigma',
initial_value=10e-9, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_nr_sigma', 'nr_sigma',
initial_value=5, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_motzoi', 'motzoi',
initial_value=0, vals=vals.Numbers())
self.add_pulse_parameter('X180_ef', 'ef_X_phase', 'phase',
initial_value=0, vals=vals.Numbers())
# add qubit spectroscopy parameters
self.add_parameter('spec_power', unit='dBm', initial_value=-20,
parameter_class=ManualParameter,
label='Qubit spectroscopy power')
self.add_operation('Spec')
self.add_pulse_parameter('Spec', 'spec_pulse_type', 'pulse_type',
initial_value='SquarePulse',
vals=vals.Enum('SquarePulse'))
self.add_pulse_parameter('Spec', 'spec_marker_channel', 'channel',
initial_value=None, vals=vals.Strings())
self.add_pulse_parameter('Spec', 'spec_marker_amp', 'amplitude',
vals=vals.Numbers(), initial_value=1)
self.add_pulse_parameter('Spec', 'spec_marker_length', 'length',
initial_value=5e-6, vals=vals.Numbers())
self.add_pulse_parameter('Spec', 'spec_marker_delay', 'pulse_delay',
vals=vals.Numbers(), initial_value=0)
# dc flux parameters
self.add_parameter('dc_flux_parameter', initial_value=None,
label='QCoDeS parameter to sweep the dc flux',
parameter_class=ManualParameter)
# Pulse preparation parameters
DEFAULT_PREP_PARAMS = dict(preparation_type='wait',
post_ro_wait=1e-6, reset_reps=1,
final_reset_pulse=True,
threshold_mapping={
self.name: {0: 'g', 1: 'e'}})
self.add_parameter('preparation_params', parameter_class=ManualParameter,
initial_value=DEFAULT_PREP_PARAMS, vals=vals.Dict())
def get_idn(self):
return {'driver': str(self.__class__), 'name': self.name}
def update_detector_functions(self):
if self.acq_Q_channel() is None or \
self.acq_weights_type() not in ['SSB', 'DSB', 'optimal_qutrit']:
channels = [self.acq_I_channel()]
else:
channels = [self.acq_I_channel(), self.acq_Q_channel()]
self.int_log_det = det.UHFQC_integration_logging_det(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_shots=self.acq_shots(),
integration_length=self.acq_length(),
result_logging_mode='raw')
self.int_avg_classif_det = det.UHFQC_classifier_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_shots=self.acq_averages(),
integration_length=self.acq_length(),
get_values_function_kwargs={
'classifier_params': self.acq_classifier_params(),
'state_prob_mtx': self.acq_state_prob_mtx()
})
self.int_avg_det = det.UHFQC_integrated_average_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_averages=self.acq_averages(),
integration_length=self.acq_length(),
result_logging_mode='raw')
self.dig_avg_det = det.UHFQC_integrated_average_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_averages=self.acq_averages(),
integration_length=self.acq_length(),
result_logging_mode='digitized')
nr_samples = int(self.acq_length() *
self.instr_uhf.get_instr().clock_freq())
self.inp_avg_det = det.UHFQC_input_average_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
nr_averages=self.acq_averages(),
nr_samples=nr_samples)
self.dig_log_det = det.UHFQC_integration_logging_det(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_pulsar.get_instr(),
channels=channels, nr_shots=self.acq_shots(),
integration_length=self.acq_length(),
result_logging_mode='digitized')
self.int_avg_det_spec = det.UHFQC_integrated_average_detector(
UHFQC=self.instr_uhf.get_instr(),
AWG=self.instr_uhf.get_instr(),
channels=[self.acq_I_channel(), self.acq_Q_channel()],
nr_averages=self.acq_averages(),
integration_length=self.acq_length(),
result_logging_mode='raw', real_imag=False, single_int_avg=True)
def prepare(self, drive='timedomain'):
# configure readout local oscillators
lo = self.instr_ro_lo
if lo() is not None:
lo.get_instr().pulsemod_state('Off')
lo.get_instr().power(self.ro_lo_power())
# in case of multichromatic readout, take first ro freq, else just
# wrap the frequency in a list and take the first
if np.ndim(self.ro_freq()) == 0:
ro_freq = [self.ro_freq()]
else:
ro_freq = self.ro_freq()
if np.ndim(self.ro_mod_freq()) == 0:
ro_mod_freq = [self.ro_mod_freq()]
else:
ro_mod_freq = self.ro_mod_freq()
lo.get_instr().frequency(ro_freq[0] - ro_mod_freq[0])
lo.get_instr().on()
# configure qubit drive local oscillator
lo = self.instr_ge_lo
if lo() is not None:
if drive is None:
lo.get_instr().off()
elif drive == 'continuous_spec':
lo.get_instr().pulsemod_state('Off')
lo.get_instr().power(self.spec_power())
lo.get_instr().frequency(self.ge_freq())
lo.get_instr().on()
elif drive == 'pulsed_spec':
lo.get_instr().pulsemod_state('On')
lo.get_instr().power(self.spec_power())
lo.get_instr().frequency(self.ge_freq())
lo.get_instr().on()
elif drive == 'timedomain':
lo.get_instr().pulsemod_state('Off')
lo.get_instr().power(self.ge_lo_power())
lo.get_instr().frequency(self.ge_freq() - self.ge_mod_freq())
lo.get_instr().on()
else:
raise ValueError("Invalid drive parameter '{}'".format(drive)
+ ". Valid options are None, 'continuous_spec"
+ "', 'pulsed_spec' and 'timedomain'.")
# set awg channel dc offsets
offset_list = [('ro_I_channel', 'ro_I_offset'),
('ro_Q_channel', 'ro_Q_offset')]
if drive == 'timedomain':
offset_list += [('ge_I_channel', 'ge_I_offset'),
('ge_Q_channel', 'ge_Q_offset')]
for channel_par, offset_par in offset_list:
self.instr_pulsar.get_instr().set(
self.get(channel_par) + '_offset', self.get(offset_par))
# other preparations
self.update_detector_functions()
self.set_readout_weights()
def set_readout_weights(self, weights_type=None, f_mod=None):
if weights_type is None:
weights_type = self.acq_weights_type()
if f_mod is None:
f_mod = self.ro_mod_freq()
if weights_type == 'manual':
pass
elif weights_type == 'optimal':
if (self.acq_weights_I() is None or self.acq_weights_Q() is None):
log.warning('Optimal weights are None, not setting '
'integration weights')
return
# When optimal weights are used, only the RO I weight
# channel is used
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_real'.format(
self.acq_I_channel()), self.acq_weights_I().copy())
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_imag'.format(
self.acq_I_channel()), self.acq_weights_Q().copy())
self.instr_uhf.get_instr().set('qas_0_rotations_{}'.format(
self.acq_I_channel()), 1.0-1.0j)
elif weights_type == 'optimal_qutrit':
for w_f in [self.acq_weights_I, self.acq_weights_Q,
self.acq_weights_I2, self.acq_weights_Q2]:
if w_f() is None:
log.warning('The optimal weights {} are None. '
'\nNot setting integration weights.'
.format(w_f.name))
return
# if all weights are not None, set first integration weights (real
# and imag) on channel I amd second integration weights on channel
# Q.
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_real'.format(
self.acq_I_channel()),
self.acq_weights_I().copy())
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_imag'.format(
self.acq_I_channel()),
self.acq_weights_Q().copy())
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_real'.format(
self.acq_Q_channel()),
self.acq_weights_I2().copy())
self.instr_uhf.get_instr().set('qas_0_integration_weights_{}_imag'.format(
self.acq_Q_channel()),
self.acq_weights_Q2().copy())
self.instr_uhf.get_instr().set('qas_0_rotations_{}'.format(
self.acq_I_channel()), 1.0-1.0j)
self.instr_uhf.get_instr().set('qas_0_rotations_{}'.format(
self.acq_Q_channel()), 1.0-1.0j)
else:
tbase = np.arange(0, 4097 / 1.8e9, 1 / 1.8e9)
theta = self.acq_IQ_angle()
cosI = np.array(np.cos(2 * np.pi * f_mod * tbase + theta))
sinI = np.array(np.sin(2 * np.pi * f_mod * tbase + theta))
c1 = self.acq_I_channel()
c2 = self.acq_Q_channel()
uhf = self.instr_uhf.get_instr()
if weights_type == 'SSB':
uhf.set('qas_0_integration_weights_{}_real'.format(c1), cosI)
uhf.set('qas_0_rotations_{}'.format(c1), 1.0+1.0j)
uhf.set('qas_0_integration_weights_{}_real'.format(c2), sinI)
uhf.set('qas_0_rotations_{}'.format(c2), 1.0-1.0j)
uhf.set('qas_0_integration_weights_{}_imag'.format(c1), sinI)
uhf.set('qas_0_integration_weights_{}_imag'.format(c2), cosI)
elif weights_type == 'DSB':
uhf.set('qas_0_integration_weights_{}_real'.format(c1), cosI)
uhf.set('qas_0_rotations_{}'.format(c1), 1.0+0j)
uhf.set('qas_0_integration_weights_{}_real'.format(c2), sinI)
uhf.set('qas_0_rotations_{}'.format(c2), 1.0+0j)
elif weights_type == 'square_rot':
uhf.set('qas_0_integration_weights_{}_real'.format(c1), cosI)
uhf.set('qas_0_rotations_{}'.format(c1), 1.0+1.0j)
uhf.set('qas_0_integration_weights_{}_imag'.format(c1), sinI)
else:
raise KeyError('Invalid weights type: {}'.format(weights_type))
def get_spec_pars(self):
return self.get_operation_dict()['Spec ' + self.name]
def get_ro_pars(self):
return self.get_operation_dict()['RO ' + self.name]
def get_acq_pars(self):
return self.get_operation_dict()['Acq ' + self.name]
def get_ge_pars(self):
return self.get_operation_dict()['X180 ' + self.name]
def get_ef_pars(self):
return self.get_operation_dict()['X180_ef ' + self.name]
def get_operation_dict(self, operation_dict=None):
if operation_dict is None:
operation_dict = {}
operation_dict = super().get_operation_dict(operation_dict)
operation_dict['Spec ' + self.name]['operation_type'] = 'Other'
operation_dict['RO ' + self.name]['operation_type'] = 'RO'
operation_dict['X180 ' + self.name]['operation_type'] = 'MW'
operation_dict['X180_ef ' + self.name]['operation_type'] = 'MW'
operation_dict['X180 ' + self.name]['basis'] = self.name
operation_dict['X180_ef ' + self.name]['basis'] = self.name + \
'_ef'
operation_dict['X180_ef ' + self.name]['I_channel'] = \
operation_dict['X180 ' + self.name]['I_channel']
operation_dict['X180_ef ' + self.name]['Q_channel'] = \
operation_dict['X180 ' + self.name]['Q_channel']
operation_dict['X180_ef ' + self.name]['phi_skew'] = \
operation_dict['X180 ' + self.name]['phi_skew']
operation_dict['X180_ef ' + self.name]['alpha'] = \
operation_dict['X180 ' + self.name]['alpha']
operation_dict['Acq ' + self.name] = deepcopy(
operation_dict['RO ' + self.name])
operation_dict['Acq ' + self.name]['amplitude'] = 0
if self.ef_freq() == 0:
operation_dict['X180_ef ' + self.name]['mod_frequency'] = None
else:
operation_dict['X180_ef ' + self.name]['mod_frequency'] = \
self.ef_freq() - self.ge_freq() + self.ge_mod_freq()
operation_dict.update(add_suffix_to_dict_keys(
sq.get_pulse_dict_from_pars(
operation_dict['X180 ' + self.name]), ' ' + self.name))
operation_dict.update(add_suffix_to_dict_keys(
sq.get_pulse_dict_from_pars(
operation_dict['X180_ef ' + self.name]), '_ef ' + self.name))
if np.ndim(self.ro_freq()) != 0:
delta_freqs = np.diff(self.ro_freq(), prepend=self.ro_freq()[0])
mods = [self.ro_mod_freq() + d for d in delta_freqs]
operation_dict['RO ' + self.name]['mod_frequency'] = mods
return operation_dict
def swf_ro_freq_lo(self):
return swf.Offset_Sweep(
self.instr_ro_lo.get_instr().frequency,
-self.ro_mod_freq(),
name='Readout frequency',
parameter_name='Readout frequency')
def swf_ro_mod_freq(self):
return swf.Offset_Sweep(
self.ro_mod_freq,
self.instr_ro_lo.get_instr().frequency(),
name='Readout frequency',
parameter_name='Readout frequency')
def measure_resonator_spectroscopy(self, freqs, sweep_points_2D=None,
sweep_function_2D=None,
trigger_separation=3e-6,
upload=True, analyze=True,
close_fig=True, label=None):
""" Varies the frequency of the microwave source to the resonator and
measures the transmittance """
if np.any(freqs < 500e6):
log.warning(('Some of the values in the freqs array might be '
'too small. The units should be Hz.'))
if label is None:
if sweep_function_2D is not None:
label = 'resonator_scan_2d' + self.msmt_suffix
else:
label = 'resonator_scan' + self.msmt_suffix
self.prepare(drive=None)
if upload:
sq.pulse_list_list_seq([[self.get_ro_pars()]])
MC = self.instr_mc.get_instr()
MC.set_sweep_function(self.swf_ro_freq_lo())
if sweep_function_2D is not None:
MC.set_sweep_function_2D(sweep_function_2D)
mode = '2D'
else:
mode = '1D'
MC.set_sweep_points(freqs)
if sweep_points_2D is not None:
MC.set_sweep_points_2D(sweep_points_2D)
MC.set_detector_function(self.int_avg_det_spec)
with temporary_value(self.instr_trigger.get_instr().pulse_period,
trigger_separation):
self.instr_pulsar.get_instr().start(exclude=[self.instr_uhf()])
MC.run(name=label, mode=mode)
self.instr_pulsar.get_instr().stop()
if analyze:
ma.MeasurementAnalysis(close_fig=close_fig, qb_name=self.name,
TwoD=(mode == '2D'))
def measure_qubit_spectroscopy(self, freqs, sweep_points_2D=None,
sweep_function_2D=None, pulsed=True, trigger_separation=13e-6,
upload=True, analyze=True, close_fig=True, label=None):
""" Varies qubit drive frequency and measures the resonator
transmittance """
if np.any(freqs < 500e6):
log.warning(('Some of the values in the freqs array might be '
'too small. The units should be Hz.'))
if pulsed:
if label is None:
if sweep_function_2D is not None:
label = 'pulsed_spec_2d' + self.msmt_suffix
else:
label = 'pulsed_spec' + self.msmt_suffix
self.prepare(drive='pulsed_spec')
if upload:
sq.pulse_list_list_seq([[self.get_spec_pars(),
self.get_ro_pars()]])
else:
if label is None:
if sweep_function_2D is not None:
label = 'continuous_spec_2d' + self.msmt_suffix
else:
label = 'continuous_spec' + self.msmt_suffix
self.prepare(drive='continuous_spec')
if upload:
sq.pulse_list_list_seq([[self.get_ro_pars()]])
MC = self.instr_mc.get_instr()
MC.set_sweep_function(self.instr_ge_lo.get_instr().frequency)
if sweep_function_2D is not None:
MC.set_sweep_function_2D(sweep_function_2D)
mode = '2D'
else:
mode = '1D'
MC.set_sweep_points(freqs)
if sweep_points_2D is not None:
MC.set_sweep_points_2D(sweep_points_2D)
MC.set_detector_function(self.int_avg_det_spec)
with temporary_value(self.instr_trigger.get_instr().pulse_period,
trigger_separation):
self.instr_pulsar.get_instr().start(exclude=[self.instr_uhf()])
MC.run(name=label, mode=mode)
self.instr_pulsar.get_instr().stop()
if analyze:
ma.MeasurementAnalysis(close_fig=close_fig, qb_name=self.name,
TwoD=(mode == '2D'))
def measure_rabi(self, amps, analyze=True, upload=True, label=None, n=1,
last_ge_pulse=False, n_cal_points_per_state=2,
cal_states='auto', for_ef=False, classified_ro=False,
prep_params=None, exp_metadata=None):
"""
Varies the amplitude of the qubit drive pulse and measures the readout
resonator transmission.
Args:
amps the array of drive pulse amplitudes
analyze whether to create a (base) MeasurementAnalysis
object for this measurement; offers possibility to
manually analyse data using the classes in
measurement_analysis.py
close_fig whether or not to close the default analysis figure
cal_points whether or not to use calibration points
no_cal_points how many calibration points to use
upload whether or not to upload the sequence to the AWG
label the measurement label
n the number of times the drive pulses with the same
amplitude should be repeated in each measurement
"""
if prep_params is None:
prep_params = self.preparation_params()
# Define the measurement label
if label is None:
label = 'Rabi_ef' if for_ef else 'Rabi'
if n != 1:
label += f'-n{n}'
if classified_ro:
label += '_classified'
if 'active' in prep_params['preparation_type']:
label += '_reset'
label += self.msmt_suffix
# Prepare the physical instruments for a time domain measurement
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
cal_states = CalibrationPoints.guess_cal_states(cal_states, for_ef)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
seq, sweep_points = sq.rabi_seq_active_reset(
amps=amps, qb_name=self.name, cal_points=cp, n=n, for_ef=for_ef,
operation_dict=self.get_operation_dict(), upload=False,
last_ge_pulse=last_ge_pulse, prep_params=prep_params)
# Specify the sweep function, the sweep points,
# and the detector function, and run the measurement
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Amplitude', unit='V'))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if classified_ro else
self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: amps},
'preparation_params': prep_params,
'cal_points': repr(cp),
'rotate': False if classified_ro else
len(cp.states) != 0,
'last_ge_pulses': [last_ge_pulse],
'data_to_fit': {self.name: 'pf' if for_ef else 'pe'},
"sweep_name": "Amplitude",
"sweep_unit": "V"})
MC.run(label, exp_metadata=exp_metadata)
# Create a MeasurementAnalysis object for this measurement
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_rabi_amp90(self, scales=np.linspace(0.3, 0.7, 31), n=1,
MC=None, analyze=True, close_fig=True, upload=True):
self.prepare(drive='timedomain')
if MC is None:
MC = self.instr_mc.get_instr()
MC.set_sweep_function(awg_swf.Rabi_amp90(
pulse_pars=self.get_ge_pars(), RO_pars=self.get_ro_pars(), n=n,
upload=upload))
MC.set_sweep_points(scales)
MC.set_detector_function(self.int_avg_det)
MC.run('Rabi_amp90_scales_n{}'.format(n)+ self.msmt_suffix)
def measure_T1(self, times=None, analyze=True, upload=True,
last_ge_pulse=False, n_cal_points_per_state=2,
cal_states='auto', for_ef=False, classified_ro=False,
prep_params=None, label=None,
exp_metadata=None):
if times is None:
raise ValueError("Unspecified times for measure_T1")
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.'
'The units should be seconds.')
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
if prep_params is None:
prep_params = self.preparation_params()
# Define the measurement label
if label is None:
label = f'T1{"_ef" if for_ef else ""}' + self.msmt_suffix
cal_states = CalibrationPoints.guess_cal_states(cal_states, for_ef)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
seq, sweep_points = sq.t1_active_reset(
times=times, qb_name=self.name, cal_points=cp, for_ef=for_ef,
operation_dict=self.get_operation_dict(), upload=False,
last_ge_pulse=last_ge_pulse, prep_params=prep_params)
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Time', unit='s'))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if classified_ro else
self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: times},
'preparation_params': prep_params,
'cal_points': repr(cp),
'rotate': False if classified_ro else
len(cp.states) != 0,
'last_ge_pulses': [last_ge_pulse],
'data_to_fit': {self.name: 'pf' if for_ef else 'pe'},
"sweep_name": "Time",
"sweep_unit": "s"})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_qscale(self, qscales=None, analyze=True, upload=True, label=None,
cal_states="auto", n_cal_points_per_state=2,
last_ge_pulse=False, for_ef=False, classified_ro=False,
prep_params=None, exp_metadata=None):
if qscales is None:
raise ValueError("Unspecified qscale values for measure_qscale")
uniques = np.unique(qscales[range(3)])
if uniques.size > 1:
raise ValueError("The values in the qscales array are not repeated "
"3 times.")
if prep_params is None:
prep_params = self.preparation_params()
log.debug(f"Preparation Parameters:\n{prep_params}")
# Define the measurement label
if label is None:
label = f'Qscale{"_ef" if for_ef else ""}'
if classified_ro:
label += '_classified'
if 'active' in prep_params['preparation_type']:
label += '_reset'
label += self.msmt_suffix
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
# create cal points
cal_states = CalibrationPoints.guess_cal_states(cal_states, for_ef)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
# create sequence
seq, sweep_points = sq.qscale_active_reset(qscales=qscales,
qb_name=self.name, cal_points=cp, for_ef=for_ef,
operation_dict=self.get_operation_dict(), upload=False,
last_ge_pulse=last_ge_pulse, prep_params=prep_params)
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Qscale factor'))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if classified_ro else
self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: qscales},
'sweep_name': 'Qscale factor',
'sweep_unit': '',
'preparation_params': prep_params,
'cal_points': repr(cp),
'rotate': False if classified_ro else len(cp.states) != 0,
'last_ge_pulses': [last_ge_pulse],
'data_to_fit': {self.name: 'pf' if for_ef else 'pe'}})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_ramsey_multiple_detunings(self, times=None,
artificial_detunings=None, label='',
MC=None, analyze=True, close_fig=True,
cal_points=True, upload=True,
exp_metadata=None):
log.error("This function is deprecated, please use measure_ramsey()")
if times is None:
raise ValueError("Unspecified times for measure_ramsey")
if artificial_detunings is None:
log.warning('Artificial detuning is 0.')
uniques = np.unique(times[range(len(artificial_detunings))])
if uniques.size>1:
raise ValueError("The values in the times array are not repeated "
"len(artificial_detunings) times.")
if np.any(np.asarray(np.abs(artificial_detunings))<1e3):
log.warning('The artificial detuning is too small. The units '
'should be Hz.')
if np.any(times>1e-3):
log.warning('The values in the times array might be too large.'
'The units should be seconds.')
self.prepare(drive='timedomain')
if MC is None:
MC = self.instr_mc.get_instr()
# Define the measurement label
if label == '':
label = 'Ramsey_mult_det' + self.msmt_suffix
if cal_points:
len_art_det = len(artificial_detunings)
step = np.abs(times[-1] - times[-len_art_det-1])
sweep_points = np.concatenate(
[times, [times[-1] + step, times[-1] + 2*step,
times[-1] + 3*step, times[-1] + 4*step]])
else:
sweep_points = times
Rams_swf = awg_swf.Ramsey_multiple_detunings(
pulse_pars=self.get_ge_pars(), RO_pars=self.get_ro_pars(),
artificial_detunings=artificial_detunings, cal_points=cal_points,
upload=upload)
MC.set_sweep_function(Rams_swf)
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: sweep_points},
'use_cal_points': cal_points,
'artificial_detunings': artificial_detunings})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
ma.MeasurementAnalysis(auto=True, close_fig=close_fig,
qb_name=self.name)
def measure_ramsey_dyn_decoupling(self, times=None, artificial_detuning=0,
label='', MC=None, analyze=True,
close_fig=True, cal_points=True,
upload=True, nr_echo_pulses=4,
seq_func=None, cpmg_scheme=True,
exp_metadata=None):
if times is None:
raise ValueError("Unspecified times for measure_ramsey")
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.'
'The units should be seconds.')
if artificial_detuning is None:
log.warning('Artificial detuning is 0.')
if np.abs(artificial_detuning) < 1e3:
log.warning('The artificial detuning is too small. The units'
'should be Hz.')
if seq_func is None:
seq_func = sq.ramsey_seq
self.prepare(drive='timedomain')
if MC is None:
MC = self.instr_mc.get_instr()
# Define the measurement label
if label == '':
label = 'Ramsey' + self.msmt_suffix
if cal_points:
step = np.abs(times[-1]-times[-2])
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step,
times[-1]+3*step, times[-1]+4*step]])
else:
sweep_points = times
Rams_swf = awg_swf.Ramsey_decoupling_swf(
seq_func=seq_func,
pulse_pars=self.get_ge_pars(), RO_pars=self.get_ro_pars(),
artificial_detuning=artificial_detuning, cal_points=cal_points,
upload=upload, nr_echo_pulses=nr_echo_pulses, cpmg_scheme=cpmg_scheme)
MC.set_sweep_function(Rams_swf)
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: sweep_points},
'use_cal_points': cal_points,
'cpmg_scheme': cpmg_scheme,
'nr_echo_pulses': nr_echo_pulses,
'seq_func': seq_func,
'artificial_detuning': artificial_detuning})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
RamseyA = ma.Ramsey_Analysis(
auto=True,
label=label,
qb_name=self.name,
NoCalPoints=4,
artificial_detuning=artificial_detuning,
close_fig=close_fig)
def measure_ramsey(self, times, artificial_detunings=None, label=None,
analyze=True, close_fig=True,
cal_states="auto", n_cal_points_per_state=2,
n=1, upload=True, last_ge_pulse=False, for_ef=False,
classified_ro=False, prep_params=None, exp_metadata=None):
if prep_params is None:
prep_params = self.preparation_params()
# Define the measurement label
if label is None:
label = f'Ramsey{"_ef" if for_ef else ""}'
if classified_ro:
label += '_classified'
if 'active' in prep_params['preparation_type']:
label += '_reset'
label += self.msmt_suffix
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
# create cal points
cal_states = CalibrationPoints.guess_cal_states(cal_states, for_ef)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
# create sequence
seq, sweep_points = sq.ramsey_active_reset(
times=times, artificial_detunings=artificial_detunings,
qb_name=self.name, cal_points=cp, n=n, for_ef=for_ef,
operation_dict=self.get_operation_dict(), upload=False,
last_ge_pulse=last_ge_pulse, prep_params=prep_params)
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Delay', unit='s'))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if classified_ro else
self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update(
{'sweep_points_dict': {self.name: times},
'sweep_name': 'Delay',
'sweep_unit': 's',
'cal_points': repr(cp),
'preparation_params': prep_params,
'last_ge_pulses': [last_ge_pulse],
'artificial_detuning': artificial_detunings,
'rotate': False if classified_ro else len(cp.states) != 0,
'data_to_fit': {self.name: 'pf' if for_ef else 'pe'}})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_echo(self, times=None, artificial_detuning=None,
upload=True, analyze=True, close_fig=True, cal_points=True,
label=None, exp_metadata=None):
if times is None:
raise ValueError("Unspecified times for measure_echo")
# Define the measurement label
if label == '':
label = 'Echo' + self.msmt_suffix
if cal_points:
step = np.abs(times[-1]-times[-2])
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step,
times[-1]+3*step, times[-1]+4*step]])
cal_states_dict = {'g': [-4, -3], 'e': [-2, -1]}
cal_states_rotations = {'g': 0, 'e': 1}
else:
sweep_points = times
cal_states_dict = None
cal_states_rotations = {}
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
Echo_swf = awg_swf.Echo(
pulse_pars=self.get_ge_pars(), RO_pars=self.get_ro_pars(),
artificial_detuning=artificial_detuning, upload=upload)
MC.set_sweep_function(Echo_swf)
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if
self.acq_weights_type() == 'optimal_qutrit'
else self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: sweep_points},
'use_cal_points': cal_points,
'rotate': cal_points,
'cal_states_dict': cal_states_dict,
'cal_states_rotations': cal_states_rotations if
self.acq_weights_type() != 'optimal_qutrit'
else None,
'data_to_fit': {self.name: 'pe'},
'artificial_detuning': artificial_detuning})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_echo_2nd_exc(self, times=None, artificial_detuning=None,
label=None, analyze=True,
cal_points=True, no_cal_points=6, upload=True,
last_ge_pulse=False, exp_metadata=None):
if times is None:
raise ValueError("Unspecified times for measure_ramsey")
if artificial_detuning is None:
log.warning('Artificial detuning is 0.')
if np.abs(artificial_detuning) < 1e3:
log.warning('The artificial detuning is too small. The units'
'should be Hz.')
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.'
'The units should be seconds.')
if label is None:
label = 'Echo_ef' + self.msmt_suffix
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
cal_states_dict = None
cal_states_rotations = {}
if cal_points:
step = np.abs(times[-1]-times[-2])
if no_cal_points == 6:
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step,
times[-1]+3*step, times[-1]+4*step,
times[-1]+5*step, times[-1]+6*step]])
cal_states_dict = {'g': [-6, -5], 'e': [-4, -3], 'f': [-2, -1]}
cal_states_rotations = {'g': 0, 'f': 1} if last_ge_pulse else \
{'e': 0, 'f': 1}
elif no_cal_points == 4:
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step,
times[-1]+3*step, times[-1]+4*step]])
cal_states_dict = {'g': [-4, -3], 'e': [-2, -1]}
cal_states_rotations = {'g': 0, 'e': 1}
elif no_cal_points == 2:
sweep_points = np.concatenate(
[times, [times[-1]+step, times[-1]+2*step]])
cal_states_dict = {'g': [-2, -1]}
cal_states_rotations = {'g': 0}
else:
sweep_points = times
else:
sweep_points = times
Echo_2nd_swf = awg_swf.Echo_2nd_exc(
pulse_pars=self.get_ge_pars(),
pulse_pars_2nd=self.get_ef_pars(),
RO_pars=self.get_ro_pars(),
artificial_detuning=artificial_detuning,
cal_points=cal_points, upload=upload,
no_cal_points=no_cal_points,
last_ge_pulse=last_ge_pulse)
MC.set_sweep_function(Echo_2nd_swf)
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.int_avg_classif_det if
self.acq_weights_type() == 'optimal_qutrit'
else self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: sweep_points},
'rotate': cal_points,
'last_ge_pulse': last_ge_pulse,
'data_to_fit': {self.name: 'pf'},
'cal_states_dict': cal_states_dict,
'cal_states_rotations': cal_states_rotations if
self.acq_weights_type() != 'optimal_qutrit'
else None,
'artificial_detuning': artificial_detuning})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name])
def measure_randomized_benchmarking(
self, cliffords, nr_seeds,
gate_decomp='HZ', interleaved_gate=None,
n_cal_points_per_state=2, cal_states=(),
classified_ro=False, thresholded=True, label=None,
upload=True, analyze=True, prep_params=None,
exp_metadata=None, **kw):
'''
Performs a randomized benchmarking experiment on 1 qubit.
'''
# Define the measurement label
if label is None:
if interleaved_gate is None:
label = 'RB_{}_{}_seeds_{}_cliffords'.format(
gate_decomp, nr_seeds, cliffords[-1]) + self.msmt_suffix
else:
label = 'IRB_{}_{}_{}_seeds_{}_cliffords'.format(
interleaved_gate, gate_decomp, nr_seeds, cliffords[-1]) + \
self.msmt_suffix
if prep_params is None:
prep_params = self.preparation_params()
# Prepare the physical instruments for a time domain measurement
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
cal_states = CalibrationPoints.guess_cal_states(cal_states)
cp = CalibrationPoints.single_qubit(self.name, cal_states,
n_per_state=n_cal_points_per_state)
sequences, hard_sweep_points, soft_sweep_points = \
sq.randomized_renchmarking_seqs(
qb_name=self.name, operation_dict=self.get_operation_dict(),
cliffords=cliffords, nr_seeds=np.arange(nr_seeds),
gate_decomposition=gate_decomp,
interleaved_gate=interleaved_gate, upload=False,
cal_points=cp, prep_params=prep_params)
hard_sweep_func = awg_swf.SegmentHardSweep(
sequence=sequences[0], upload=upload,
parameter_name='Nr. Cliffords', unit='')
MC.set_sweep_function(hard_sweep_func)
MC.set_sweep_points(hard_sweep_points)
MC.set_sweep_function_2D(awg_swf.SegmentSoftSweep(
hard_sweep_func, sequences, 'Nr. Seeds', ''))
MC.set_sweep_points_2D(soft_sweep_points)
if thresholded:
det_func = self.dig_avg_det
elif classified_ro:
det_func = self.int_avg_classif_det
else:
det_func = self.int_avg_det
MC.set_detector_function(det_func)
# create sweep points object
sp = SweepPoints('nr_seeds', np.arange(nr_seeds), '', 'Nr. Seeds')
sp.add_sweep_dimension()
sp.add_sweep_parameter('cliffords', cliffords, '',
'Number of applied Cliffords, $m$')
# create analysis pipeline object
pp = ProcessingPipeline(
'average', keys_in=det_func.value_names,
num_bins=[len(cliffords)]*len(det_func.value_names))
pp.add_node(
'get_std_deviation', keys_in=det_func.value_names,
num_bins=[len(cliffords)]*len(det_func.value_names))
pp.add_node('SingleQubitRBAnalysis', keys_in='previous',
std_keys=[k+' std' for k in det_func.value_names],
meas_obj_name=self.name, do_plotting=True)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'preparation_params': prep_params,
'cal_points': repr(cp),
'sweep_points': sp,
'meas_obj_sweep_points_map':
sp.get_sweep_points_map([self.name]),
'meas_obj_value_names_map': {
self.name: det_func.value_names},
'processing_pipe': pp})
MC.run_2D(label, exp_metadata=exp_metadata)
if analyze:
pla.PipelineDataAnalysis()
def measure_transients(self, states=('g', 'e'), upload=True,
analyze=True, acq_length=4097/1.8e9,
prep_params=None, exp_metadata=None, **kw):
"""
If the resulting transients will be used to caclulate the optimal
weight functions, then it is important that the UHFQC iavg_delay and
wint_delay are calibrated such that the weights and traces are
aligned: iavg_delay = 2*wint_delay.
"""
MC = self.instr_mc.get_instr()
name_extra = kw.get('name_extra', None)
if prep_params is None:
prep_params = self.preparation_params()
if exp_metadata is None:
exp_metadata = dict()
exp_metadata.update(
{'sweep_name': 'time',
'sweep_unit': ['s']})
with temporary_value(self.acq_length, acq_length):
self.prepare(drive='timedomain')
npoints = self.inp_avg_det.nr_samples
sweep_points = np.linspace(0, npoints / 1.8e9, npoints,
endpoint=False)
for state in states:
if state not in ['g', 'e', 'f']:
raise ValueError("Unrecognized state: {}. Must be 'g', 'e' "
"or 'f'.".format(state))
base_name = 'timetrace_{}'.format(state)
name = base_name + "_" + name_extra if name_extra is not None \
else base_name
seq, _ = sq.single_state_active_reset(
operation_dict=self.get_operation_dict(),
qb_name=self.name, state=state, prep_params=prep_params,
upload=False)
# set sweep function and run measurement
MC.set_sweep_function(awg_swf.SegmentHardSweep(sequence=seq,
upload=upload))
MC.set_sweep_points(sweep_points)
MC.set_detector_function(self.inp_avg_det)
exp_metadata.update(dict(sweep_points_dict=sweep_points))
MC.run(name=name + self.msmt_suffix, exp_metadata=exp_metadata)
def measure_readout_pulse_scope(self, delays, freqs, RO_separation=None,
prep_pulses=None, comm_freq=225e6,
analyze=True, label=None,
close_fig=True, upload=True, verbose=False,
cal_points=((-4, -3), (-2, -1)), MC=None):
"""
From the documentation of the used sequence function:
Prepares the AWGs for a readout pulse shape and timing measurement.
The sequence consists of two readout pulses where the drive pulse start
time is swept through the first readout pulse. Because the photons in
the readout resonator induce an ac-Stark shift of the qubit frequency,
we can determine the readout pulse shape by sweeping the drive frequency
in an outer loop to determine the qubit frequency.
Important: This sequence includes two readouts per segment. For this
reason the calibration points are also duplicated.
Args:
delays: A list of delays between the start of the first readout pulse
and the center of the drive pulse.
RO_separation: Separation between the starts of the two readout pulses.
If the comm_freq parameter is not None, the used value
is increased to satisfy the commensurability constraint.
cal_points: True for default calibration points, False for no
calibration points or a list of two lists, containing
the indices of the calibration segments for the ground
and excited state.
comm_freq: The readout pulse separation will be a multiple of
1/comm_freq
"""
if delays is None:
raise ValueError("Unspecified delays for "
"measure_readout_pulse_scope")
if label is None:
label = 'Readout_pulse_scope' + self.msmt_suffix
if MC is None:
MC = self.instr_mc.get_instr()
if freqs is None:
freqs = self.f_qubit() + np.linspace(-50e6, 50e6, 201)
if RO_separation is None:
RO_separation = 2 * self.ro_length()
RO_separation += np.max(delays)
RO_separation += 200e-9 # for slack
self.prepare(drive='timedomain')
MC.set_sweep_function(awg_swf.Readout_pulse_scope_swf(
delays=delays,
pulse_pars=self.get_ge_pars(),
RO_pars=self.get_ro_pars(),
RO_separation=RO_separation,
cal_points=cal_points,
prep_pulses=prep_pulses,
comm_freq=comm_freq,
verbose=verbose,
upload=upload))
MC.set_sweep_points(delays)
MC.set_sweep_function_2D(swf.Offset_Sweep(
mc_parameter_wrapper.wrap_par_to_swf(
self.instr_ge_lo.get_instr().frequency),
-self.ge_mod_freq(),
parameter_name=self.name + ' drive frequency'))
MC.set_sweep_points_2D(freqs)
d = det.UHFQC_integrated_average_detector(
self.instr_uhf.get_instr(), self.instr_pulsar.get_instr(),
nr_averages=self.acq_averages(),
channels=self.int_avg_det.channels,
integration_length=self.acq_length(),
values_per_point=2, values_per_point_suffex=['_probe', '_measure'])
MC.set_detector_function(d)
MC.run_2D(label)
# Create a MeasurementAnalysis object for this measurement
if analyze:
ma.MeasurementAnalysis(TwoD=True, auto=True, close_fig=close_fig,
qb_name=self.name)
def measure_residual_readout_photons(
self, delays_to_relax, ramsey_times, delay_buffer=0,
cal_points=((-4, -3), (-2, -1)), verbose=False,
artificial_detuning=None, analyze=True,
label=None, close_fig=True, MC=None):
"""
From the documentation of the used sequence function:
The sequence consists of two readout pulses sandwitching two ramsey
pulses inbetween. The delay between the first readout pulse and first
ramsey pulse is swept, to measure the ac stark shift and dephasing
from any residual photons.
Important: This sequence includes two readouts per segment. For this
reason the calibration points are also duplicated.
Args:
delays_to_relax: delay between the end of the first readout
pulse and the start of the first ramsey pulse.
pulse_pars: Pulse dictionary for the ramsey pulse.
RO_pars: Pulse dictionary for the readout pulse.
ramsey_times: delays between ramsey pulses
delay_buffer: delay between the start of the last ramsey pulse and
the start of the second readout pulse.
cal_points: True for default calibration points, False for no
calibration points or a list of two lists,
containing the indices of the calibration
segments for the ground and excited state.
"""
if label is None:
label = 'residual_readout_photons' + self.msmt_suffix
if MC is None:
MC = self.instr_mc.get_instr()
# duplicate sweep points for the two preparation states
ramsey_times = np.vstack((ramsey_times, ramsey_times)).\
reshape((-1,), order='F')
self.prepare(drive='timedomain')
sf1 = awg_swf.readout_photons_in_resonator_swf(
delay_to_relax=delays_to_relax[0],
delay_buffer=delay_buffer,
ramsey_times=ramsey_times,
pulse_pars=self.get_ge_pars(),
RO_pars=self.get_ro_pars(),
cal_points=cal_points,
verbose=verbose,
artificial_detuning=artificial_detuning,
upload=False)
MC.set_sweep_function(sf1)
MC.set_sweep_points(ramsey_times)
sf2 = awg_swf.readout_photons_in_resonator_soft_swf(sf1)
MC.set_sweep_function_2D(sf2)
MC.set_sweep_points_2D(delays_to_relax)
d = det.UHFQC_integrated_average_detector(
self.instr_uhf.get_instr(), self.instr_pulsar.get_instr(),
nr_averages=self.acq_averages(),
channels=self.int_avg_det.channels,
integration_length=self.acq_length(),
values_per_point=2, values_per_point_suffex=['_test', '_measure'])
MC.set_detector_function(d)
MC.run_2D(label)
self.artificial_detuning = artificial_detuning
# Create a MeasurementAnalysis object for this measurement
if analyze:
kappa = list(map(lambda w:0.5*(self.RO_purcell_kappa() - np.real(
np.sqrt(-16*self.RO_J_coupling()*self.RO_J_coupling() +
(self.RO_purcell_kappa()-2j*(np.abs(w-self.f_RO_purcell())))*
(self.RO_purcell_kappa()-2j*(np.abs(w-self.f_RO_purcell())))
))),
[self.f_RO_resonator() - self.chi(),
self.f_RO_resonator() + self.chi()]))
if not (self.T2_star_ef() == 0):
T2star = self.T2_star_ef()
else:
if self.T2_star() == 0:
raise ValueError('T2star is not given.')
else:
T2star = self.T2_star()
tda.ReadoutROPhotonsAnalysis(t_start=None,
close_figs=close_fig, options_dict={
'f_qubit': self.f_qubit(),
'chi': self.chi(),
'kappa-effective': kappa,
'T2echo': T2star ,
'do_analysis': True,
'artif_detuning': self.artificial_detuning() },
do_fitting=True)
def measure_multi_element_segment_timing(
self, phases, ramsey_time=4e-6, nr_wait_elems=16,
elem_type='interleaved', cal_points=((-4, -3), (-2, -1)),
label=None, MC=None, upload=True, analyze=True, close_fig=True):
if label is None:
label = 'Multi_element_segment_timing' + self.msmt_suffix
if MC is None:
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
MC.set_sweep_function(awg_swf.MultiElemSegmentTimingSwf(
phases=phases,
qbn=self.name,
op_dict=self.get_operation_dict(),
ramsey_time=ramsey_time,
nr_wait_elems=nr_wait_elems,
elem_type=elem_type,
cal_points=cal_points,
upload=upload))
MC.set_sweep_points(phases)
d = det.UHFQC_integrated_average_detector(
self.instr_uhf.get_instr(), self.instr_pulsar.get_instr(), nr_averages=self.acq_averages(),
channels=self.int_avg_det.channels,
integration_length=self.acq_length(),
values_per_point=2, values_per_point_suffex=['_single_elem',
'_multi_elem'])
MC.set_detector_function(d)
metadata = dict(
ramsey_time=ramsey_time,
nr_wait_elems=nr_wait_elems,
elem_type=elem_type,
cal_points=cal_points
)
MC.run(label, exp_metadata=metadata)
# Create a MeasurementAnalysis object for this measurement
if analyze:
ma.MeasurementAnalysis(auto=True, close_fig=close_fig,
qb_name=self.name)
def measure_drive_mixer_spectrum(self, if_freqs, amplitude=0.5,
trigger_sep=5e-6, align_frequencies=True):
MC = self.instr_mc.get_instr()
if align_frequencies:
if_freqs = (if_freqs*trigger_sep).astype(np.int)/trigger_sep
s = swf.Offset_Sweep(
self.instr_ro_lo.get_instr().frequency,
self.ge_freq() - self.ro_mod_freq() - self.ge_mod_freq(),
name='Drive intermediate frequency',
parameter_name='Drive intermediate frequency')
MC.set_sweep_function(s)
MC.set_sweep_points(if_freqs)
MC.set_detector_function(self.int_avg_det_spec)
drive_pulse = dict(
pulse_type='GaussFilteredCosIQPulse',
pulse_length=self.acq_length(),
ref_point='start',
amplitude=amplitude,
I_channel=self.ge_I_channel(),
Q_channel=self.ge_Q_channel(),
mod_frequency=self.ge_mod_freq(),
phase_lock=True,
)
sq.pulse_list_list_seq([[self.get_acq_pars(), drive_pulse]])
with temporary_value(
(self.acq_weights_type, 'SSB'),
(self.instr_trigger.get_instr().pulse_period, trigger_sep),
):
self.prepare(drive='timedomain')
self.instr_pulsar.get_instr().start()
MC.run('ge_uc_spectrum' + self.msmt_suffix)
a = ma.MeasurementAnalysis(plot_args=dict(log=True, marker=''))
return a
def calibrate_drive_mixer_carrier(self, update=True, x0=(0., 0.),
initial_stepsize=0.01, trigger_sep=5e-6):
MC = self.instr_mc.get_instr()
ad_func_pars = {'adaptive_function': opti.nelder_mead,
'x0': x0,
'initial_step': [initial_stepsize, initial_stepsize],
'no_improv_break': 15,
'minimize': True,
'maxiter': 500}
chI_par = self.instr_pulsar.get_instr().parameters['{}_offset'.format(
self.ge_I_channel())]
chQ_par = self.instr_pulsar.get_instr().parameters['{}_offset'.format(
self.ge_Q_channel())]
MC.set_sweep_functions([chI_par, chQ_par])
MC.set_adaptive_function_parameters(ad_func_pars)
sq.pulse_list_list_seq([[self.get_acq_pars(), dict(
pulse_type='GaussFilteredCosIQPulse',
pulse_length=self.acq_length(),
ref_point='start',
amplitude=0,
I_channel=self.ge_I_channel(),
Q_channel=self.ge_Q_channel(),
)]])
with temporary_value(
(self.ro_freq, self.ge_freq() - self.ge_mod_freq()),
(self.acq_weights_type, 'SSB'),
(self.instr_trigger.get_instr().pulse_period, trigger_sep),
):
self.prepare(drive='timedomain')
MC.set_detector_function(det.IndexDetector(
self.int_avg_det_spec, 0))
self.instr_pulsar.get_instr().start(exclude=[self.instr_uhf()])
MC.run(name='drive_carrier_calibration' + self.msmt_suffix,
mode='adaptive')
a = ma.OptimizationAnalysis(label='drive_carrier_calibration')
# v2 creates a pretty picture of the optimizations
ma.OptimizationAnalysis_v2(label='drive_carrier_calibration')
ch_1_min = a.optimization_result[0][0]
ch_2_min = a.optimization_result[0][1]
if update:
self.ge_I_offset(ch_1_min)
self.ge_Q_offset(ch_2_min)
return ch_1_min, ch_2_min
def calibrate_drive_mixer_skewness(self, update=True, amplitude=0.5,
trigger_sep=5e-6, no_improv_break=50,
initial_stepsize=(0.15, 10)):
MC = self.instr_mc.get_instr()
ad_func_pars = {'adaptive_function': opti.nelder_mead,
'x0': [self.ge_alpha(), self.ge_phi_skew()],
'initial_step': initial_stepsize,
'no_improv_break': no_improv_break,
'minimize': True,
'maxiter': 500}
MC.set_sweep_functions([self.ge_alpha, self.ge_phi_skew])
MC.set_adaptive_function_parameters(ad_func_pars)
with temporary_value(
(self.ge_alpha, self.ge_alpha()),
(self.ge_phi_skew, self.ge_phi_skew()),
(self.ro_freq, self.ge_freq() - 2*self.ge_mod_freq()),
(self.acq_weights_type, 'SSB'),
(self.instr_trigger.get_instr().pulse_period, trigger_sep),
):
self.prepare(drive='timedomain')
detector = self.int_avg_det_spec
detector.always_prepare = True
detector.AWG = self.instr_pulsar.get_instr()
detector.prepare_function = lambda \
alphaparam=self.ge_alpha, skewparam=self.ge_phi_skew: \
sq.pulse_list_list_seq([[self.get_acq_pars(), dict(
pulse_type='GaussFilteredCosIQPulse',
pulse_length=self.acq_length(),
ref_point='start',
amplitude=amplitude,
I_channel=self.ge_I_channel(),
Q_channel=self.ge_Q_channel(),
mod_frequency=self.ge_mod_freq(),
phase_lock=True,
alpha=alphaparam(),
phi_skew=skewparam(),
)]])
MC.set_detector_function(det.IndexDetector(detector, 0))
MC.run(name='drive_skewness_calibration' + self.msmt_suffix,
mode='adaptive')
a = ma.OptimizationAnalysis(label='drive_skewness_calibration')
# v2 creates a pretty picture of the optimizations
ma.OptimizationAnalysis_v2(label='drive_skewness_calibration')
# phi and alpha are the coefficients that go in the predistortion matrix
alpha = a.optimization_result[0][0]
phi = a.optimization_result[0][1]
if update:
self.ge_alpha(alpha)
self.ge_phi_skew(phi)
return alpha, phi
def calibrate_drive_mixer_skewness_NN(
self, update=True,make_fig=True, meas_grid=None, n_meas=100,
amplitude=0.1, trigger_sep=5e-6, two_rounds=False,
estimator='GRNN_neupy', hyper_parameter_dict=None,
first_round_limits=(0.6, 1.2, -50, 35), **kwargs):
if not len(first_round_limits) == 4:
log.error('Input variable `first_round_limits` in function call '
'`calibrate_drive_mixer_skewness_NN` needs to be a list '
'or 1D array of length 4.\nFound length '
'{} object instead!'.format(len(first_round_limits)))
if hyper_parameter_dict is None:
log.warning('No hyperparameters passed to predictive mixer '
'calibration routine. Default values for the estimator'
'will be used!\n')
hyper_parameter_dict = {'hidden_layers': [10],
'learning_rate': 1e-3,
'regularization_coefficient': 0.,
'std_scaling': 0.6,
'learning_steps': 5000,
'cv_n_fold': 5,
'polynomial_dimension': 2}
std_devs = kwargs.get('std_devs', [0.1, 10])
c = kwargs.pop('second_round_std_scale', 0.4)
# Could make sample size variable (maxiter) for better adapting)
if isinstance(std_devs, (list, np.ndarray)):
if len(std_devs) != 2:
log.error('std_devs passed in kwargs of `calibrate_drive_'
'mixer_NN` is of length: {}. '
'Requires length 2 instead.'.format(len(std_devs)))
MC = self.instr_mc.get_instr()
_alpha = self.ge_alpha()
_phi = self.ge_phi_skew()
for runs in range(3 if two_rounds else 2):
if runs == 0:
# half as many points from a uniform distribution at first run
meas_grid = np.array([
np.random.uniform(first_round_limits[0],
first_round_limits[1], n_meas//2),
np.random.uniform(first_round_limits[2],
first_round_limits[3], n_meas//2)])
else:
k = 1. if runs == 1 else c
meas_grid = np.array([
np.random.normal(_alpha, k*std_devs[0], n_meas),
np.random.normal(_phi, k*std_devs[1], n_meas)])
s1 = swf.Hard_Sweep()
s1.name = 'Amplitude ratio hardware sweep'
s1.label = r'Amplitude ratio, $\alpha$'
s1.unit = ''
s2 = swf.Hard_Sweep()
s2.name = 'Phase skew hardware sweep'
s2.label = r'Phase skew, $\phi$'
s2.unit = 'deg'
MC.set_sweep_functions([s1, s2])
MC.set_sweep_points(meas_grid.T)
pulse_list_list = []
for alpha, phi_skew in meas_grid.T:
pulse_list_list.append([self.get_acq_pars(), dict(
pulse_type='GaussFilteredCosIQPulse',
pulse_length=self.acq_length(),
ref_point='start',
amplitude=amplitude,
I_channel=self.ge_I_channel(),
Q_channel=self.ge_Q_channel(),
mod_frequency=self.ge_mod_freq(),
phase_lock=True,
alpha=alpha,
phi_skew=phi_skew,
)])
sq.pulse_list_list_seq(pulse_list_list)
with temporary_value(
(self.ro_freq, self.ge_freq() - 2*self.ge_mod_freq()),
(self.acq_weights_type, 'SSB'),
(self.instr_trigger.get_instr().pulse_period, trigger_sep),
):
self.prepare(drive='timedomain')
MC.set_detector_function(self.int_avg_det)
MC.run(name='drive_skewness_calibration' + self.msmt_suffix)
a = ma.OptimizationAnalysisNN(
label='drive_skewness_calibration',
hyper_parameter_dict=hyper_parameter_dict,
meas_grid=meas_grid.T,
estimator=estimator,
two_rounds=two_rounds,
round=runs, make_fig=make_fig)
_alpha = a.optimization_result[0]
_phi = a.optimization_result[1]
if update:
self.ge_alpha(_alpha)
self.ge_phi_skew(_phi)
return _alpha, _phi, a
def find_optimized_weights(self, update=True, measure=True,
qutrit=False, acq_length=4097/1.8e9, **kw):
# FIXME: Make a proper analysis class for this (Ants, 04.12.2017)
# I agree (Christian, 07.11.2018 -- around 1 year later)
levels = ('g', 'e', 'f') if qutrit else ('g', 'e')
if measure:
self.measure_transients(analyze=True, states=levels,
acq_length=acq_length, **kw)
# create label, measurement analysis and data for each level
if kw.get("name_extra", False):
labels = {l: 'timetrace_{}_'.format(l) + kw.get('name_extra')
+ "_{}".format(self.name) for l in levels}
else:
labels = {l: 'timetrace_{}'.format(l)
+ "_{}".format(self.name) for l in levels}
m_a = {l: ma.MeasurementAnalysis(label=labels[l]) for l in levels}
iq_traces = {l: m_a[l].measured_values[0]
+ 1j * m_a[l].measured_values[1] for l in levels}
final_basis_labels = ['ge'] # default basis vector if only qubit ro
if qutrit:
ref_state = kw.get('ref_state', 'g')
basis = [iq_traces[l] - iq_traces[ref_state] for l in levels
if l != ref_state]
basis_labels = [l + ref_state for l in levels if l != ref_state]
final_basis = math.gram_schmidt(np.array(basis).transpose())
final_basis = final_basis.transpose() # obtain basis vect as rows
# basis using second vector as primary vector
basis_2nd = list(reversed(basis))
final_basis_2nd = math.gram_schmidt(np.array(basis_2nd).transpose())
final_basis_2nd = final_basis_2nd.transpose()
if kw.get('non_ortho_basis', False):
final_basis_labels = basis_labels
final_basis = np.array([final_basis[0], final_basis_2nd[0]])
elif kw.get('basis_2nd', False):
final_basis_labels = [basis_labels[1]] + ['ortho']
final_basis = final_basis_2nd
else:
final_basis_labels = [basis_labels[0]] + ['ortho']
log.info(f"Setting Basis: {final_basis_labels}")
if update:
# FIXME: could merge qutrit and non qutrit although normalization is not
# the same but would be a good thing to do. First test if qutrit works
# well. idem in plot
if qutrit:
self.acq_weights_I(final_basis[0].real)
self.acq_weights_Q(final_basis[0].imag)
self.acq_weights_I2(final_basis[1].real)
self.acq_weights_Q2(final_basis[1].imag)
self.acq_weights_basis(final_basis_labels)
else:
wre = np.real(iq_traces['e'] - iq_traces['g'])
wim = np.imag(iq_traces['e'] - iq_traces['g'])
k = max(np.max(np.abs(wre)), np.max(np.abs(wim)))
wre /= k
wim /= k
self.acq_weights_I(wre)
self.acq_weights_Q(wim)
if kw.get('plot', True):
# TODO: Nathan: plot amplitude instead of I, Q ?
npoints = len(m_a['g'].sweep_points)
plot_ylabels = dict(g='d.c. voltage,\nNo pulse (V)',
e='d.c. voltage,\nPi_ge pulse (V)',
f='d.c. voltage,\nPi_gf pulse (V)')
tbase = np.linspace(0, npoints/1.8e9, npoints, endpoint=False)
modulation = np.exp(2j * np.pi * self.ro_mod_freq() * tbase)
fig, ax = plt.subplots(len(levels) + 1, figsize=(20,20))
ax[0].set_title('optimized weights ' + self.name +
"".join('\n' + m_a[l].timestamp_string for l in levels)
+ f'\nWeight Basis: {final_basis_labels}')
for i, l in enumerate(levels):
ax[i].plot(tbase / 1e-9, np.real(iq_traces[l] * modulation), '-',
label='I_' + l)
ax[i].plot(tbase / 1e-9, np.imag(iq_traces[l] * modulation), '-',
label='Q_' + l)
ax[i].set_ylabel(plot_ylabels[l])
ax[i].set_xlim(0, kw.get('tmax', 300))
ax[i].legend(loc='upper right')
if qutrit:
for i, vect in enumerate(final_basis):
ax[-1].plot(tbase / 1e-9, np.real(vect * modulation), '-',
label='I_' + str(i))
ax[-1].plot(tbase / 1e-9, np.imag(vect * modulation), '-',
label='Q_' + str(i))
else:
ax[-1].plot(tbase / 1e-9,
np.real((iq_traces['e'] - iq_traces['g']) * modulation), '-',
label='I')
ax[-1].plot(tbase / 1e-9,
np.imag((iq_traces['e'] - iq_traces['g']) * modulation), '-',
label='Q')
ax[-1].set_ylabel('d.c. voltage\ndifference (V)')
ax[-1].set_xlim(0, kw.get('tmax', 300))
ax[-1].legend(loc='upper right')
ax[-1].set_xlabel('Time (ns)')
m_a['g'].save_fig(plt.gcf(), 'timetraces', xlabel='time',
ylabel='voltage')
plt.tight_layout()
plt.close()
def find_ssro_fidelity(self, analyze=True, close_fig=True, no_fits=False,
upload=True, thresholded=False, label=None,
RO_comm=3 / 225e6, RO_slack=150e-9,
qutrit=False, update=False, prep_params=None):
"""
Conduct an off-on measurement on the qubit recording single-shot
results and determine the single shot readout fidelity.
Calculates the assignment fidelity `F_a` which is the average
probability of correctly guessing the state that was prepared. If
`no_fits` is `False` also finds the discrimination fidelity F_d, that
takes into account the probability of an bit flip after state
preparation, by fitting double gaussians to both |0> prepared and |1>
prepared datasets.
Args:
reps: Number of repetitions. If greater than 1, a 2D sweep will be
made with the second sweep function a NoneSweep with number of
sweep points equal to reps. Default 1.
analyze: Boolean flag, whether to analyse the measurement results.
Default `True`.
close_fig: Boolean flag to close the matplotlib's figure. If
`False`, then the plots can be viewed with `plt.show()`
Default `True`.
no_fits: Boolean flag to disable finding the discrimination
fidelity. Default `False`.
preselection_pulse: Whether to do an additional readout pulse
before state preparation. Default `True`.
qutrit: SSRO for 3 levels readout
Returns:
If `no_fits` is `False` returns assigment fidelity, discrimination
fidelity and SNR = 2 |mu00 - mu11| / (sigma00 + sigma11). Else
returns just assignment fidelity.
"""
MC = self.instr_mc.get_instr()
if label is None:
label = 'SSRO_fidelity'
if thresholded:
label += '_thresh'
if prep_params is None:
prep_params = self.preparation_params()
self.prepare(drive='timedomain')
RO_spacing = self.instr_uhf.get_instr().qas_0_delay() / 1.8e9
RO_spacing += self.acq_length()
RO_spacing += RO_slack # for slack
RO_spacing = np.ceil(RO_spacing / RO_comm) * RO_comm
if prep_params['preparation_type'] not in ['preselection', 'wait']:
raise NotImplementedError()
preselection = prep_params['preparation_type'] == 'preselection'
if thresholded:
det_func = self.dig_log_det
else:
det_func = self.int_log_det
if qutrit:
states = ('g', 'e', 'f')
for state in states:
seq, swp = sq.single_state_active_reset(
operation_dict=self.get_operation_dict(),
qb_name=self.name, state=state,
prep_params=prep_params, upload=False)
# set sweep function and run measurement
MC.set_sweep_function(awg_swf.SegmentHardSweep(sequence=seq,
upload=upload))
MC.set_sweep_points(swp)
MC.set_detector_function(det_func)
with temporary_value(MC.soft_avg, 1):
MC.run(name=label + '_{}'.format(state) + self.msmt_suffix)
else:
MC.set_sweep_function(awg_swf2.n_qubit_off_on(
pulse_pars_list=[self.get_ge_pars()],
RO_pars_list=[self.get_ro_pars()],
upload=upload,
preselection=preselection,
RO_spacing=RO_spacing))
MC.set_sweep_points(np.arange(4 if preselection else 2))
MC.set_detector_function(det_func)
with temporary_value(MC.soft_avg, 1):
MC.run(name=label + self.msmt_suffix)
if analyze:
if qutrit:
# TODO Nathan: could try and merge this with no qutrit to
# avoid logical branching
options = \
dict(classif_method='threshold' if thresholded else 'gmm',
pre_selection=preselection)
# options = 'gmm'
labels = [label + '_{}'.format(l) for l in states]
ssqtro = \
Singleshot_Readout_Analysis_Qutrit(label=labels,
options_dict=options)
state_prob_mtx = ssqtro.proc_data_dict[
'analysis_params']['state_prob_mtx_masked']
classifier_params = ssqtro.proc_data_dict[
'analysis_params'].get('classifier_params', None)
if update:
self.acq_classifier_params(classifier_params)
self.acq_state_prob_mtx(state_prob_mtx)
return state_prob_mtx, classifier_params
else:
rotate = self.acq_weights_type() in {'SSB', 'DSB'}
preselection = prep_params['preparation_type'] == 'preselection'
channels = det_func.value_names
if preselection:
nr_samples = 4
sample_0 = 0
sample_1 = 2
else:
nr_samples = 2
sample_0 = 0
sample_1 = 1
ana = ma.SSRO_Analysis(auto=True, close_fig=close_fig,
qb_name=self.name,
rotate=rotate, no_fits=no_fits,
channels=channels, nr_samples=nr_samples,
sample_0=sample_0, sample_1=sample_1,
preselection=preselection)
if not no_fits:
return ana.F_a, ana.F_d, ana.SNR
else:
return ana.F_a
def find_readout_angle(self, MC=None, upload=True, close_fig=True, update=True, nreps=10):
"""
Finds the optimal angle on the IQ plane for readout (optimal phase for
the boxcar integration weights)
If the Q wint channel is set to `None`, sets it to the next channel
after I.
Args:
MC: MeasurementControl object to use. Default `None`.
upload: Whether to update the AWG sequence. Default `True`.
close_fig: Wheter to close the figures in measurement analysis.
Default `True`.
update: Whether to update the integration weights and the Default `True`.
nreps: Default 10.
"""
if MC is None:
MC = self.instr_mc.get_instr()
label = 'RO_theta'
if self.acq_weights_Q() is None:
self.acq_weights_Q(
(self.acq_weights_I() + 1) % 9)
self.set_readout_weights(weights_type='SSB')
prev_shots = self.acq_shots()
self.acq_shots(2*(self.acq_shots()//2))
self.prepare(drive='timedomain')
MC.set_sweep_function(awg_swf.SingleLevel(
pulse_pars=self.get_ge_pars(),
RO_pars=self.get_ro_pars(),
upload=upload,
preselection=False))
spoints = np.arange(self.acq_shots())
MC.set_sweep_points(np.arange(self.acq_shots()))
MC.set_detector_function(self.int_log_det)
prev_avg = MC.soft_avg()
MC.soft_avg(1)
mode = '1D'
if nreps > 1:
MC.set_sweep_function_2D(swf.None_Sweep())
MC.set_sweep_points_2D(np.arange(nreps))
mode = '2D'
MC.run(name=label+self.msmt_suffix, mode=mode)
MC.soft_avg(prev_avg)
self.acq_shots(prev_shots)
rotate = self.acq_weights_Q() is not None
channels = self.int_log_det.value_names
ana = ma.SSRO_Analysis(auto=True, close_fig=close_fig,
rotate=rotate, no_fits=True,
channels=channels,
preselection=False)
if update:
self.acq_IQ_angle(self.acq_IQ_angle() + ana.theta)
return ana.theta
def find_qubit_frequency(self, freqs, method='cw_spectroscopy',
update=False, trigger_separation=3e-6,
close_fig=True, analyze_ef=False, analyze=True,
upload=True, label=None, **kw):
"""
WARNING: Does not automatically update the qubit frequency parameter.
Set update=True if you want this!
Args:
method: the spectroscopy type; options: 'pulsed',
'spectrsocopy'
update: whether to update the relevant qubit
parameters with the found frequency(ies)
MC: the measurement control object
close_fig: whether or not to close the figure
analyze_ef: whether or not to also look for the gf/2
Keyword Args:
interactive_plot: (default=False)
whether to plot with plotly or not
analyze_ef: (default=False)
whether to look for another f_ge/2 peak/dip
percentile: (default=20)
percentile of the data that is considered background noise
num_sigma_threshold: (default=5)
used to define the threshold above(below) which to look for
peaks(dips); threshold = background_mean +
num_sigma_threshold * background_std
window_len (default=3)
filtering window length; uses a_tools.smooth
analysis_window (default=10)
how many data points (calibration points) to remove before
sending data to peak_finder; uses a_tools.cut_edges,
data = data[(analysis_window//2):-(analysis_window//2)]
amp_only (default=False)
whether only I data exists
save_name (default='Source Frequency')
figure name with which it will be saved
auto (default=True)
automatically perform the entire analysis upon call
label (default=none?)
label of the analysis routine
folder (default=working folder)
working folder
NoCalPoints (default=4)
number of calibration points
print_fit_results (default=True)
print the fit report
print_frequency (default=False)
whether to print the f_ge and f_gf/2
make_fig {default=True)
whether or not to make a figure
show (default=True)
show the plots
show_guess (default=False)
plot with initial guess values
close_file (default=True)
close the hdf5 file
Returns:
the peak frequency(ies).
"""
if not update:
log.warning("Does not automatically update the qubit "
"frequency parameter. "
"Set update=True if you want this!")
if np.any(freqs<500e6):
log.warning(('Some of the values in the freqs array might be '
'too small. The units should be Hz.'))
if freqs is None:
f_span = kw.get('f_span', 100e6)
f_mean = kw.get('f_mean', self.f_qubit())
nr_points = kw.get('nr_points', 100)
if f_mean == 0:
log.warning("find_frequency does not know where to "
"look for the qubit. Please specify the "
"f_mean or the freqs function parameter.")
return 0
else:
freqs = np.linspace(f_mean - f_span/2, f_mean + f_span/2,
nr_points)
if 'pulse' not in method.lower():
if label is None:
label = 'spectroscopy' + self.msmt_suffix
if analyze_ef:
label = 'high_power_' + label
self.measure_qubit_spectroscopy(freqs, pulsed=False,
trigger_separation=trigger_separation,
label=label, close_fig=close_fig)
else:
if label is None:
label = 'pulsed_spec' + self.msmt_suffix
if analyze_ef:
label = 'high_power_' + label
self.measure_qubit_spectroscopy(freqs, pulsed=True, label=label,
close_fig=close_fig, upload=upload)
if analyze:
SpecA = ma.Qubit_Spectroscopy_Analysis(
qb_name=self.name,
analyze_ef=analyze_ef,
label=label,
close_fig=close_fig, **kw)
f0 = SpecA.fitted_freq
if update:
if not analyze_ef:
self.ge_freq(f0)
else:
f0_ef = 2*SpecA.fitted_freq_gf_over_2 - f0
self.ef_freq(f0_ef)
if analyze_ef:
return f0, f0_ef
else:
return f0
else:
return
def find_amplitudes(self, rabi_amps=None, label=None, for_ef=False,
n_cal_points_per_state=2, cal_states='auto',
upload=True, last_ge_pulse=False, classified_ro=False,
prep_params=None, analyze=True, update=False,
exp_metadata=None, **kw):
"""
Finds the pi and pi/2 pulse amplitudes from the fit to a Rabi
experiment. Uses the Rabi_Analysis(_new)
class from measurement_analysis.py
WARNING: Does not automatically update the qubit amplitudes.
Set update=True if you want this!
Analysis script for the Rabi measurement:
1. The I and Q data are rotated and normalized based on the calibration
points. In most analysis routines, the latter are typically 4:
2 X180 measurements, and 2 identity measurements, which get
averaged resulting in one X180 point and one identity point.
However, the default for Rabi is 2 (2 identity measurements)
because we typically do Rabi in order to find the correct amplitude
for an X180 pulse. However, if a previous such value exists, this
routine also accepts 4 cal pts. If X180_ef pulse was also
previously calibrated, this routine also accepts 6 cal pts.
2. The normalized data is fitted to a cosine function.
3. The pi-pulse and pi/2-pulse amplitudes are calculated from the fit.
4. The normalized data, the best fit results, and the pi and pi/2
pulses are plotted.
The ef analysis assumes the the e population is zero (because of the
ge X180 pulse at the end).
Arguments:
rabi_amps: amplitude sweep points for the
Rabi experiment
label: label of the analysis routine
for_ef: find amplitudes for the ef transition
update: update the qubit amp180 and amp90 parameters
MC: the measurement control object
close_fig: close the resulting figure?
cal_points whether to used calibration points of not
no_cal_points number of calibration points to use; if it's
the first time rabi is run
then 2 cal points (two I pulses at the end)
should be used for the ge Rabi,
and 4 (two I pulses and 2 ge X180 pulses at
the end) for the ef Rabi
last_ge_pulse whether to map the population to the ground
state after each run of the Rabi experiment
on the ef level
Keyword arguments:
other keyword arguments. The Rabi sweep parameters 'amps_mean',
'amps_span', and 'nr_poinys' should be passed here. This will
result in a sweep over rabi_amps = np.linspace(amps_mean -
amps_span/2, amps_mean + amps_span/2, nr_points)
auto (default=True)
automatically perform the entire analysis upon call
print_fit_results (default=True)
print the fit report
make_fig {default=True)
whether or not to make a figure
show (default=True)
show the plots
show_guess (default=False)
plot with initial guess values
show_amplitudes (default=True)
print the pi&piHalf pulses amplitudes
plot_amplitudes (default=True)
plot the pi&piHalf pulses amplitudes
no_of_columns (default=1)
number of columns in your paper; figure sizes will be adjusted
accordingly (1 col: figsize = ( 7in , 4in ) 2 cols: figsize =
( 3.375in , 2.25in ), PRL guidelines)
Returns:
pi and pi/2 pulses amplitudes + their stderr as a dictionary with
keys 'piPulse', 'piHalfPulse', 'piPulse_std', 'piHalfPulse_std'.
"""
if not update:
log.warning("Does not automatically update the qubit pi and "
"pi/2 amplitudes. Set update=True if you want this!")
#how many times to apply the Rabi pulse
n = kw.get('n', 1)
if rabi_amps is None:
raise ValueError('rabi_amps is None.')
#Perform Rabi
self.measure_rabi(amps=rabi_amps, analyze=False,
upload=upload, label=label, n=n,
n_cal_points_per_state=n_cal_points_per_state,
cal_states=cal_states, last_ge_pulse=last_ge_pulse,
for_ef=for_ef, classified_ro=classified_ro,
prep_params=prep_params, exp_metadata=exp_metadata)
#get pi and pi/2 amplitudes from the analysis results
if analyze:
rabi_ana = tda.RabiAnalysis(qb_names=[self.name])
if update:
amp180 = rabi_ana.proc_data_dict['analysis_params_dict'][
self.name]['piPulse']
if not for_ef:
self.ge_amp180(amp180)
self.ge_amp90_scale(0.5)
else:
self.ef_amp180(amp180)
self.ef_amp90_scale(0.5)
return
def find_T1(self, times, n_cal_points_per_state=2, cal_states='auto',
upload=True, last_ge_pulse=False, classified_ro=False,
prep_params=None, analyze=True, update=False, label=None,
for_ef=False, exp_metadata=None, **kw):
"""
Finds the relaxation time T1 from the fit to an exponential
decay function.
WARNING: Does not automatically update the qubit T1 parameter.
Set update=True if you want this!
Routine:
1. Apply pi pulse to get population in the excited state.
2. Wait for different amounts of time before doing a measurement.
Uses the T1_Analysis class from measurement_analysis.py.
The ef analysis assumes the the e population is zero (because of the
ge X180 pulse at the end).
Arguments:
times: array of times to wait before measurement
label: label of the analysis routine
for_ef: find T1 for the 2nd excitation (ef)
update: update the qubit T1 parameter
MC: the measurement control object
close_fig: close the resulting figure?
Keyword Arguments:
other keyword arguments. The the parameters times_mean, times_span,
nr_points should be passed here. These are an alternative to
passing the times array.
auto (default=True)
automatically perform the entire analysis upon call
print_fit_results (default=True)
print the fit report
make_fig (default=True)
whether to make the figures or not
show_guess (default=False)
plot with initial guess values
show_T1 (default=True)
print the T1 and T1_stderr
no_of_columns (default=1)
number of columns in your paper; figure sizes will be adjusted
accordingly (1 col: figsize = ( 7in , 4in ) 2 cols:
figsize = ( 3.375in , 2.25in ), PRL guidelines)
Returns:
the relaxation time T1 + standard deviation as a dictionary with
keys: 'T1', and 'T1_std'
! Specify either the times array or the times_mean value (defaults to
5 micro-s) and the span around it (defaults to 10 micro-s) as kw.
Then the script will construct the sweep points as
np.linspace(times_mean - times_span/2, times_mean + times_span/2,
nr_points)
"""
if not update:
log.warning("Does not automatically update the qubit "
"T1 parameter. Set update=True if you want this!")
if np.any(times > 1e-3):
raise ValueError('Some of the values in the times array might be too '
'large. The units should be seconds.')
if times is None:
times_span = kw.get('times_span', 10e-6)
times_mean = kw.get('times_mean', 5e-6)
nr_points = kw.get('nr_points', 50)
if times_mean == 0:
log.warning("find_T1 does not know how long to wait before"
"doing the read out. Please specify the "
"times_mean or the times function parameter.")
return 0
else:
times = np.linspace(times_mean - times_span / 2, times_mean +
times_span / 2, nr_points)
# Perform measurement
self.measure_T1(times=times,
analyze=False, upload=upload,
last_ge_pulse=last_ge_pulse, for_ef=for_ef,
n_cal_points_per_state=n_cal_points_per_state,
cal_states=cal_states, classified_ro=classified_ro,
prep_params=prep_params, label=label,
exp_metadata=exp_metadata)
# Extract T1 and T1_stddev from ma.T1_Analysis
if analyze:
T1_ana = tda.T1Analysis(qb_names=[self.name])
if update:
T1 = T1_ana.proc_data_dict['analysis_params_dict'][
self.name]['T1']
if for_ef:
self.T1_ef(T1)
else:
self.T1(T1)
return T1
def find_rb_gate_fidelity(self, cliffords, nr_seeds, label=None,
gate_decomposition='HZ', interleaved_gate=None,
thresholded=True, classified_ro=False,
n_cal_points_per_state=2, cal_states=(),
upload=True, analyze=True,
prep_params=None, exp_metadata=None, **kw):
if cliffords is None:
raise ValueError("Unspecified cliffords array")
if label is None:
if interleaved_gate is None:
label = 'RB_{}_{}_seeds_{}_cliffords'.format(
gate_decomposition, nr_seeds,
cliffords[-1]) + self.msmt_suffix
else:
label = 'IRB_{}_{}_{}_seeds_{}_cliffords'.format(
interleaved_gate, gate_decomposition,
nr_seeds, cliffords[-1]) + self.msmt_suffix
#Perform measurement
self.measure_randomized_benchmarking(
cliffords=cliffords, nr_seeds=nr_seeds,
gate_decomp=gate_decomposition, interleaved_gate=interleaved_gate,
n_cal_points_per_state=n_cal_points_per_state, cal_states=cal_states,
classified_ro=classified_ro, thresholded=thresholded, label=label,
upload=upload, analyze=False, prep_params=prep_params,
exp_metadata=exp_metadata)
#Analysis
if analyze:
pla.PipelineDataAnalysis()
def find_frequency_T2_ramsey(self, times, artificial_detunings=None,
upload=True, label=None, n=1,
cal_states="auto", n_cal_points_per_state=2,
analyze=True, update=False, for_ef=False,
last_ge_pulse=False, classified_ro=False,
prep_params=None, exp_metadata=None, **kw):
"""
Finds the real qubit GE or EF transition frequencies and the dephasing
rates T2* or T2*_ef from the fit to a Ramsey experiment.
Uses the Ramsey_Analysis class for Ramsey with one artificial detuning,
and the Ramsey_Analysis_multiple_detunings class for Ramsey with 2
artificial detunings.
Has support only for 1 or 2 artifical detunings.
WARNING: Does not automatically update the qubit freq and T2_star
parameters. Set update=True if you want this!
Arguments:
times array of times over which to sweep in
the Ramsey measurement
artificial_detunings: difference between drive frequency and
qubit frequency estimated from
qubit spectroscopy. Must be a list with
one or two entries.
upload: upload sequence to AWG
update: update the qubit frequency and T2*
parameters
label: measurement label
cal_points: use calibration points or not
no_cal_points: number of cal_points (4 for ge;
2,4,6 for ef)
analyze: perform analysis
close_fig: close the resulting figure
update: update relevant parameters
for_ef: perform msmt and analysis on ef transition
last_ge_pulse: ge pi pulse at the end of each sequence
Keyword arguments:
For one artificial detuning, the Ramsey sweep time delays array
'times', or the parameter 'times_mean' should be passed
here (in seconds).
Returns:
The real qubit frequency + stddev, the dephasing rate T2* + stddev.
For 1 artificial_detuning:
! Specify either the times array or the times_mean value (defaults
to 2.5 micro-s) and the span around it (times_mean; defaults to 5
micro-s) as kw. Then the script will construct the sweep points as
times = np.linspace(times_mean - times_span/2, times_mean +
times_span/2, nr_points).
"""
if not update:
log.warning("Does not automatically update the qubit frequency "
"and T2_star parameters. "
"Set update=True if you want this!")
if artificial_detunings is None:
log.warning('Artificial_detuning is None; qubit driven at "%s" '
'estimated with spectroscopy' %self.f_qubit())
if np.any(np.asarray(np.abs(artificial_detunings)) < 1e3):
log.warning('The artificial detuning is too small.')
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.')
self.measure_ramsey(times, artificial_detunings=artificial_detunings,
label=label, cal_states=cal_states, n=n,
n_cal_points_per_state=n_cal_points_per_state,
last_ge_pulse=last_ge_pulse, for_ef=for_ef,
classified_ro=classified_ro, upload=upload,
prep_params=prep_params, exp_metadata=exp_metadata,
analyze=False)
# # Check if one or more artificial detunings
if (hasattr(artificial_detunings, '__iter__') and
(len(artificial_detunings) > 1)):
multiple_detunings = True
else:
multiple_detunings = False
if analyze:
if multiple_detunings:
ramsey_ana = ma.Ramsey_Analysis(
auto=True,
label=label,
qb_name=self.name,
NoCalPoints=len(cal_states)*n_cal_points_per_state,
for_ef=for_ef,
last_ge_pulse=last_ge_pulse,
artificial_detuning=artificial_detunings, **kw)
# get new freq and T2* from analysis results
new_qubit_freq = ramsey_ana.qubit_frequency # value
T2_star = ramsey_ana.T2_star['T2_star'] # dict
else:
ramsey_ana = tda.RamseyAnalysis(
qb_names=[self.name], options_dict=dict(
fit_gaussian_decay=kw.get('fit_gaussian_decay', True)))
new_qubit_freq = ramsey_ana.proc_data_dict[
'analysis_params_dict'][self.name]['exp_decay_' + self.name][
'new_qb_freq']
T2_star = ramsey_ana.proc_data_dict[
'analysis_params_dict'][self.name]['exp_decay_' + self.name][
'T2_star']
if update:
if for_ef:
try:
self.ef_freq(new_qubit_freq)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
try:
self.T2_star_ef(T2_star)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
else:
try:
self.ge_freq(new_qubit_freq)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
try:
self.T2_star(T2_star)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
def find_T2_echo(self, times, artificial_detuning=None,
upload=True, label=None,
cal_points=True, no_cal_points=None,
analyze=True, for_ef=False,
close_fig=True, update=False,
last_ge_pulse=False, **kw):
"""
Finds the qubit T2 Echo.
Uses the EchoAnalysis class in timedomain_analysis.py.
WARNING: Does not automatically update the qubit freq and T2_star
parameters. Set update=True if you want this!
Arguments:
times array of times over which to sweep in
the Ramsey measurement
artificial_detuning: difference between drive frequency and
qubit frequency estimated from
qubit spectroscopy. Must be a list with
one or two entries.
upload: upload sequence to AWG
update: update the qubit frequency and T2*
parameters
label: measurement label
cal_points: use calibration points or not
analyze: perform analysis
close_fig: close the resulting figure
update: update relevant parameters
Keyword arguments:
The time delays array 'times', or the parameter 'times_mean'
should be passed here (in seconds).
Returns:
Nothing
"""
if not update:
log.warning("Does not automatically update the qubit "
"T2_echo parameter. "
"Set update=True if you want this!")
if artificial_detuning == None:
log.warning('Artificial_detuning is None; applying resonant '
'drive.')
else:
if np.any(np.asarray(np.abs(artificial_detuning)) < 1e3):
log.warning('The artificial detuning is too small.')
if np.any(times > 1e-3):
log.warning('The values in the times array might be too large.')
if cal_points and no_cal_points is None:
log.warning('no_cal_points is None. Defaults to 4 if '
'for_ef==False, or to 6 if for_ef==True.')
if for_ef:
no_cal_points = 6
else:
no_cal_points = 4
if not cal_points:
no_cal_points = 0
if label is None:
if for_ef:
label = 'Echo_ef' + self.msmt_suffix
else:
label = 'Echo' + self.msmt_suffix
if times is None:
times_span = kw.get('times_span', 5e-6)
times_mean = kw.get('times_mean', 2.5e-6)
nr_points = kw.get('nr_points', 50)
if times_mean == 0:
log.warning("find_T2_echo does not know "
"over which times to do Ramsey. Please "
"specify the times_mean or the times "
"function parameter.")
return 0
else:
times = np.linspace(times_mean - times_span/2,
times_mean + times_span/2,
nr_points)
# perform measurement
if for_ef:
self.measure_echo_2nd_exc(times=times,
artificial_detuning=artificial_detuning,
label=label, cal_points=cal_points,
no_cal_points=no_cal_points, upload=upload,
last_ge_pulse=last_ge_pulse)
else:
self.measure_echo(
times=times, artificial_detuning=artificial_detuning,
cal_points=cal_points,
close_fig=close_fig, upload=upload, label=label)
if analyze:
echo_ana = tda.EchoAnalysis(
qb_names=[self.name],
options_dict={
'artificial_detuning': artificial_detuning,
'fit_gaussian_decay':
kw.get('fit_gaussian_decay', True)})
if update:
T2_echo = echo_ana.proc_data_dict[
'analysis_params_dict'][self.name]['T2_echo']
try:
self.T2(T2_echo)
except AttributeError as e:
log.warning('%s. This parameter will not be '
'updated.'%e)
return
def find_qscale(self, qscales, label=None, for_ef=False,
last_ge_pulse=False, upload=True, analyze=True,
cal_states="auto", n_cal_points_per_state=2,
classified_ro=False, prep_params=None,
exp_metadata=None, update=False, **kw):
'''
Performs the QScale calibration measurement ( (xX)-(xY)-(xmY) ) and
extracts the optimal QScale parameter
from the fits (ma.QScale_Analysis).
WARNING: Does not automatically update the qubit qscale parameter. Set
update=True if you want this!
ma.QScale_Analysis:
1. The I and Q data are rotated and normalized based on the calibration
points. In most
analysis routines, the latter are typically 4: 2 X180 measurements,
and 2 identity measurements, which get averaged resulting in one
X180 point and one identity point.
2. The data points for the same qscale value are extracted (every other
3rd point because the sequence
used for this measurement applies the 3 sets of pulses
( (xX)-(xY)-(xmY) ) consecutively for each qscale value).
3. The xX data is fitted to a lmfit.models.ConstantModel(), and the
other 2 to an lmfit.models.LinearModel().
4. The data and the resulting fits are all plotted on the same graph
(self.make_figures).
5. The optimal qscale parameter is obtained from the point where the 2
linear fits intersect.
Other possible input parameters:
qscales
array of qscale values over which to sweep...
or qscales_mean and qscales_span
...or the mean qscale value and the span around it
(defaults to 3) as kw. Then the script will construct the sweep
points as np.linspace(qscales_mean - qscales_span/2,
qscales_mean + qscales_span/2, nr_points)
Keyword parameters:
label (default=none?)
label of the analysis routine
for_ef (default=False)
whether to obtain the drag_qscale_ef parameter
update (default=True)
whether or not to update the qubit drag_qscale parameter with
the found value
MC (default=self.MC)
the measurement control object
close_fig (default=True)
close the resulting figure
last_ge_pulse (default=True)
whether to apply an X180 ge pulse at the end
Keyword parameters:
qscale_mean (default=self.drag_qscale()
mean of the desired qscale sweep values
qscale_span (default=3)
span around the qscale mean
nr_points (default=30)
number of sweep points between mean-span/2 and mean+span/2
auto (default=True)
automatically perform the entire analysis upon call
folder (default=working folder)
Working folder
NoCalPoints (default=4)
Number of calibration points
cal_points (default=[[-4, -3], [-2, -1]])
The indices of the calibration points
show (default=True)
show the plot
show_guess (default=False)
plot with initial guess values
plot_title (default=measurementstring)
the title for the plot as a string
xlabel (default=self.xlabel)
the label for the x axis as a string
ylabel (default=r'$F|1\rangle$')
the label for the x axis as a string
close_file (default=True)
close the hdf5 file
Returns:
the optimal DRAG QScale parameter + its stderr as a dictionary with
keys 'qscale' and 'qscale_std'.
'''
if not update:
log.warning("Does not automatically update the qubit qscale "
"parameter. "
"Set update=True if you want this!")
qscales = np.repeat(qscales, 3)
#Perform the qscale calibration measurement
self.measure_qscale(qscales=qscales, upload=upload, label=label,
cal_states=cal_states, exp_metadata=exp_metadata,
n_cal_points_per_state=n_cal_points_per_state,
last_ge_pulse=last_ge_pulse, for_ef=for_ef,
classified_ro=classified_ro,
prep_params=prep_params, analyze=False)
# Perform analysis and extract the optimal qscale parameter
# Returns the optimal qscale parameter
if analyze:
qscale_ana = tda.QScaleAnalysis(qb_names=[self.name])
if update:
qscale = qscale_ana.proc_data_dict['analysis_params_dict'][
self.name]['qscale']
if for_ef:
self.ef_motzoi(qscale)
else:
self.ge_motzoi(qscale)
return
def calculate_anharmonicity(self, update=False):
"""
Computes the qubit anaharmonicity using f_ef (self.f_ef_qubit)
and f_ge (self.f_qubit).
It is assumed that the latter values exist.
WARNING: Does not automatically update the qubit anharmonicity
parameter. Set update=True if you want this!
"""
if not update:
log.warning("Does not automatically update the qubit "
"anharmonicity parameter. "
"Set update=True if you want this!")
if self.ge_freq() == 0:
log.warning('f_ge = 0. Run qubit spectroscopy or Ramsey.')
if self.ef_freq() == 0:
log.warning('f_ef = 0. Run qubit spectroscopy or Ramsey.')
anharmonicity = self.ef_freq() - self.ge_freq()
if update:
self.anharmonicity(anharmonicity)
return anharmonicity
def calculate_EC_EJ(self, update=True, **kw):
"""
Extracts EC and EJ from a least squares fit to the transmon
Hamiltonian solutions. It uses a_tools.calculate_transmon_transitions,
f_ge and f_ef.
WARNING: Does not automatically update the qubit EC and EJ parameters.
Set update=True if you want this!
Keyword Arguments:
asym: (default=0)
asymmetry d (Koch (2007), eqn 2.18) for asymmetric junctions
reduced_flux: (default=0)
reduced magnetic flux through SQUID
no_transitions (default=2)
how many transitions (levels) are you interested in
dim: (default=None)
dimension of Hamiltonian will be (2*dim+1,2*dim+1)
"""
if not update:
log.warning("Does not automatically update the qubit EC and EJ "
"parameters. "
"Set update=True if you want this!")
(EC,EJ) = a_tools.fit_EC_EJ(self.f_qubit(), self.f_ef_qubit(), **kw)
if update:
self.EC_qubit(EC)
self.EJ_qubit(EJ)
return EC, EJ
def find_readout_frequency(self, freqs=None, update=False, MC=None,
qutrit=False, **kw):
"""
Find readout frequency at which contrast between the states of the
qubit is the highest.
You need a working pi-pulse for this to work, as well as a pi_ef
pulse if you intend to use `for_3_level_ro`. Also, if your
readout pulse length is much longer than the T1, the results will not
be nice as the excited state spectrum will be mixed with the ground
state spectrum.
Args:
freqs: frequencies to sweep
qutrit (bool): find optimal frequency for 3-level readout.
Default is False.
**kw:
Returns:
"""
# FIXME: Make proper analysis class for this (Ants, 04.12.2017)
if not update:
log.info("Does not automatically update the RO resonator "
"parameters. Set update=True if you want this!")
if freqs is None:
if self.f_RO() is not None:
f_span = kw.pop('f_span', 20e6)
fmin = self.f_RO() - f_span
fmax = self.f_RO() + f_span
n_freq = kw.pop('n_freq', 401)
freqs = np.linspace(fmin, fmax, n_freq)
else:
raise ValueError("Unspecified frequencies for find_resonator_"
"frequency and no previous value exists")
if np.any(freqs < 500e6):
log.warning('Some of the values in the freqs array might be '
'too small. The units should be Hz.')
if MC is None:
MC = self.instr_mc.get_instr()
levels = ('g', 'e', 'f') if qutrit else ('g', 'e')
self.measure_dispersive_shift(freqs, states=levels, analyze=False)
labels = {l: '{}-spec'.format(l) + self.msmt_suffix for l in levels}
m_a = {l: ma.MeasurementAnalysis(label=labels[l]) for l in levels}
trace = {l: m_a[l].measured_values[0] *
np.exp(1j * np.pi * m_a[l].measured_values[1] / 180.)
for l in levels}
# FIXME: make something that doesn't require a conditional branching
if qutrit:
total_dist = np.abs(trace['e'] - trace['g']) + \
np.abs(trace['f'] - trace['g']) + \
np.abs(trace['f'] - trace['e'])
fmax = freqs[np.argmax(total_dist)]
# FIXME: just as debug plotting for now
fig, ax = plt.subplots(2)
ax[0].plot(freqs, np.abs(trace['g']), label='g')
ax[0].plot(freqs, np.abs(trace['e']), label='e')
ax[0].plot(freqs, np.abs(trace['f']), label='f')
ax[0].set_ylabel('Amplitude')
ax[0].legend()
ax[1].plot(freqs, np.abs(trace['e'] - trace['g']), label='eg')
ax[1].plot(freqs, np.abs(trace['f'] - trace['g']), label='fg')
ax[1].plot(freqs, np.abs(trace['e'] - trace['f']), label='ef')
ax[1].plot(freqs, total_dist, label='total distance')
ax[1].set_xlabel("Freq. [Hz]")
ax[1].set_ylabel('Distance in IQ plane')
ax[0].set_title("Current RO_freq: {} Hz\nOptimal Freq: {} Hz".format(
self.ro_freq(),
fmax))
plt.legend()
m_a['g'].save_fig(fig, 'IQplane_distance')
plt.show()
if kw.get('analyze', True):
sa.ResonatorSpectroscopy_v2(labels=[l for l in labels.values()])
else:
fmax = freqs[np.argmax(np.abs(trace['e'] - trace['g']))]
log.info("Optimal RO frequency to distinguish states {}: {} Hz"
.format(levels, fmax))
if kw.get('analyze', True):
SA = sa.ResonatorSpectroscopy(t_start=[m_a['g'].timestamp_string,
m_a['e'].timestamp_string],
options_dict=dict(simultan=True,
fit_options=dict(
model='hanger_with_pf'),
scan_label=''),
do_fitting=True)
# FIXME Nathan: remove 3 level dependency; fix this analysis:
# if qutrit:
# SA2 = sa.ResonatorSpectroscopy(t_start=m_a['f'].timestamp_string,
# options_dict=dict(simultan=False,
# fit_options = dict(
# model='hanger_with_pf'),
# scan_label=''),
# do_fitting=True)
if update:
# FIXME Nathan: update parameters accordingly
self.ro_freq(SA.f_RO if not qutrit else fmax)
self.chi(SA.chi)
self.f_RO_resonator(SA.f_RO_res)
self.f_RO_purcell(SA.f_PF)
self.RO_purcell_kappa(SA.kappa)
self.RO_J_coupling(SA.J_)
if kw.pop('get_CLEAR_params', False):
if self.ro_CLEAR_segment_length is None:
self.ro_CLEAR_segment_length = self.ro_length/10
if kw.get('max_amp_difference', False) :
'''this gives the ratio of the maximal hight for'''
'''the segments to the base amplitude'''
max_diff = kw.pop('max_amp_difference')
else:
max_diff = 3
self.ro_CLEAR_delta_amp_segment = \
sim_CLEAR.get_CLEAR_amplitudes(
self.f_RO_purcell, self.f_RO_resonator,
self.ro_freq, self.RO_purcell_kappa,
self.RO_J_coupling, self.chi, 1, self.ro_length,
length_segments=self.ro_CLEAR_segment_length,
sigma=self.ro_sigma,
max_amp_diff=max_diff) * self.ro_amp
def measure_dispersive_shift(self, freqs, analyze=True, close_fig=True,
upload=True, states=('g','e'), prep_params=None):
""" Varies the frequency of the microwave source to the resonator and
measures the transmittance """
if freqs is None:
raise ValueError("Unspecified frequencies for "
"measure_resonator_spectroscopy")
if np.any(freqs < 500e6):
log.warning(('Some of the values in the freqs array '
'might be too small. The units should be Hz.'))
if prep_params is None:
prep_params = self.preparation_params()
assert isinstance(states, tuple), \
"states should be a tuple, not {}".format(type(states))
self.prepare(drive='timedomain')
MC = self.instr_mc.get_instr()
for state in states:
sq.single_state_active_reset(
operation_dict=self.get_operation_dict(),
qb_name=self.name,
state=state, prep_params=prep_params, upload=upload)
MC.set_sweep_function(self.swf_ro_freq_lo())
MC.set_sweep_points(freqs)
MC.set_detector_function(self.int_avg_det_spec)
self.instr_pulsar.get_instr().start(exclude=[self.instr_uhf()])
MC.run(name=f"{state}-spec" + self.msmt_suffix)
self.instr_pulsar.get_instr().stop()
if analyze:
ma.MeasurementAnalysis(auto=True, close_fig=close_fig,
qb_name=self.name)
def calibrate_flux_pulse_timing(self, freqs=None, delays=None, MC=None,
analyze=False, update=False,**kw):
"""
flux pulse timing calibration
does a 2D measuement of the type:
-------|X180| ---------------- |RO|
<----->
| fluxpulse |
where the flux pulse delay and the drive pulse frequency of the X180 pulse
are swept.
Args:
MC: measurement control object
freqs: numpy array frequencies in Hz for the flux pulse scope type experiment
delays: numpy array with delays (in s) swept through
as delay of the drive pulse
analyze: bool, if True, then the measured data
gets analyzed (for detailed documentation of the analysis see in
the FluxPulse_timing_calibration class update: bool, if True,
the AWG channel delay gets corrected, such that single qubit
gates and flux pulses have no relative delay
Returns:
fitted_delay: float, only returned, if analyze is True.
"""
if MC is None:
MC = self.instr_mc.get_instr()
channel = self.flux_pulse_channel()
pulse_length = kw.pop('pulse_length', 100e-9)
self.flux_pulse_length(pulse_length)
amplitude = kw.pop('amplitude', 0.5)
self.flux_pulse_amp(amplitude)
measurement_string = 'Flux_pulse_delay_calibration_{}'.format(self.name)
if freqs is None:
freqs = self.f_qubit() + np.linspace(-50e6, 50e6, 20, endpoint=False)
if delays is None:
delays = np.linspace(-100e-9, pulse_length + 100e-9, 40, endpoint=False)
self.prepare(drive='timedomain')
detector_fun = self.int_avg_det
s1 = awg_swf.Fluxpulse_scope_swf(self)
s2 = awg_swf.Fluxpulse_scope_drive_freq_sweep(self)
MC.set_sweep_function(s1)
MC.set_sweep_points(delays)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(freqs)
MC.set_detector_function(detector_fun)
MC.run_2D(measurement_string)
if analyze:
flux_pulse_timing_ma = ma.FluxPulse_timing_calibration(
label=measurement_string,
flux_pulse_length=pulse_length,
qb_name=self.name,
auto=True,
plot=True)
if update:
new_delay = self.instr_pulsar.get_instr().get('{}_delay'.format(channel)) + \
flux_pulse_timing_ma.fitted_delay
self.instr_pulsar.get_instr().set('{}_delay'.format(channel), new_delay)
print('updated delay of channel {}.'.format(channel))
else:
log.warning('Not updated, since update was disabled.')
return flux_pulse_timing_ma.fitted_delay
else:
return
def calibrate_flux_pulse_frequency(self, MC=None, thetas=None, ampls=None,
analyze=False,
plot=False,
ampls_bidirectional = False,
**kw):
"""
flux pulse frequency calibration
does a 2D measuement of the type:
X90_separation
< -- ---- ----------- --->
|X90| -------------- |X90| --- |RO|
| fluxpulse |
where the flux pulse amplitude and the angle of the second X90 pulse
are swept.
Args:
MC: measurement control object
thetas: numpy array with angles (in rad) for the Ramsey type
ampls: numpy array with amplitudes (in V) swept through
as flux pulse amplitudes
ampls_bidirectional: bool, for use if the qubit is parked at sweetspot.
If true, the flux pulse amplitudes are swept to positive
and negative voltages and the frequency model fit is ]
performed on the combined dataset
analyze: bool, if True, then the measured data
gets analyzed ( ma.fit_qubit_frequency() )
"""
if MC is None:
MC = self.instr_mc.get_instr()
channel = self.flux_pulse_channel()
clock_rate = MC.station.pulsar.clock(channel)
X90_separation = kw.pop('X90_separation', 200e-9)
distorted = kw.pop('distorted', False)
distortion_dict = kw.pop('distortion_dict', None)
pulse_length = kw.pop('pulse_length', 30e-9)
self.flux_pulse_length(pulse_length)
pulse_delay = kw.pop('pulse_delay', 50e-9)
self.flux_pulse_delay(pulse_delay)
if thetas is None:
thetas = np.linspace(0, 2*np.pi, 8, endpoint=False)
if ampls is None:
ampls = np.linspace(0, 1, 21)
ampls_flag = True
self.prepare(drive='timedomain')
detector_fun = self.int_avg_det
s1 = awg_swf.Ramsey_interleaved_fluxpulse_sweep(
self,
X90_separation=X90_separation,
distorted=distorted,
distortion_dict=distortion_dict)
s2 = awg_swf.Ramsey_fluxpulse_ampl_sweep(self, s1)
MC.set_sweep_function(s1)
MC.set_sweep_points(thetas)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(ampls)
MC.set_detector_function(detector_fun)
measurement_string_1 = 'Flux_pulse_frequency_calibration_{}_1'.format(self.name)
MC.run_2D(measurement_string_1)
if ampls_bidirectional:
MC.set_sweep_function(s1)
MC.set_sweep_points(thetas)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(-ampls)
MC.set_detector_function(detector_fun)
measurement_string_2 = 'Flux_pulse_frequency_calibration_{}_2'.format(self.name)
MC.run_2D(measurement_string_2)
if analyze:
flux_pulse_ma_1 = ma.Fluxpulse_Ramsey_2D_Analysis(
label=measurement_string_1,
X90_separation=X90_separation,
flux_pulse_length=pulse_length,
qb_name=self.name,
auto=False)
flux_pulse_ma_1.fit_all(extrapolate_phase=True, plot=True)
phases = flux_pulse_ma_1.fitted_phases
ampls = flux_pulse_ma_1.sweep_points_2D
if ampls_bidirectional:
flux_pulse_ma_2 = ma.Fluxpulse_Ramsey_2D_Analysis(
label=measurement_string_2,
X90_separation=X90_separation,
flux_pulse_length=pulse_length,
qb_name=self.name,
auto=False)
flux_pulse_ma_2.fit_all(extrapolate_phase=True, plot=True)
phases = np.concatenate(flux_pulse_ma_2.fitted_phases[-1:0:-1],
flux_pulse_ma_1.fitted_phases)
ampls = np.concatenate(flux_pulse_ma_2.sweep_points_2D[-1:0:-1],
flux_pulse_ma_1.sweep_points_2D)
instrument_settings = flux_pulse_ma_1.data_file['Instrument settings']
qubit_attrs = instrument_settings[self.name].attrs
E_c = kw.pop('E_c', qubit_attrs.get('E_c', 0.3e9))
f_max = kw.pop('f_max', qubit_attrs.get('f_max', self.f_qubit()))
V_per_phi0 = kw.pop('V_per_phi0',
qubit_attrs.get('V_per_phi0', 1.))
dac_sweet_spot = kw.pop('dac_sweet_spot',
qubit_attrs.get('dac_sweet_spot', 0))
freqs = f_max - phases/(2*np.pi*pulse_length)
fit_res = ma.fit_qubit_frequency(ampls, freqs, E_c=E_c, f_max=f_max,
V_per_phi0=V_per_phi0,
dac_sweet_spot=dac_sweet_spot
)
print(fit_res.fit_report())
if plot and ampls_bidirectional:
fit_res.plot()
if ampls_bidirectional:
return fit_res
def calibrate_CPhase_dynamic_phases(self,
flux_pulse_length=None,
flux_pulse_amp=None,
flux_pulse_delay=None,
thetas=None,
X90_separation=None,
flux_pulse_channel=None,
MC=None, label=None,
analyze=True, update=True, **kw):
"""
CPhase dynamic phase calibration
does a measuement of the type:
X90_separation
< -- ---- ----------- --->
|X90| -------------- |X90| --- |RO|
| fluxpulse |
where the angle of the second X90 pulse is swept for
the flux pulse amplitude in [0,cphase_ampl].
Args:
MC: measurement control object
thetas: numpy array with angles (in rad) for the Ramsey type
ampls: numpy array with amplitudes (in V) swept through
as flux pulse amplitudes
analyze: bool, if True, then the measured data
gets analyzed (
"""
if MC is None:
MC = self.instr_mc.get_instr()
if flux_pulse_amp is None:
flux_pulse_amp = self.flux_pulse_amp()
log.warning('flux_pulse_amp is not specified. Using the value'
'in the flux_pulse_amp parameter.')
if flux_pulse_length is None:
flux_pulse_length = self.flux_pulse_length()
log.warning('flux_pulse_length is not specified. Using the value'
'in the flux_pulse_length parameter.')
if flux_pulse_delay is None:
flux_pulse_delay = self.flux_pulse_delay()
log.warning('flux_pulse_delay is not specified. Using the value'
'in the flux_pulse_delay parameter.')
if flux_pulse_channel is None:
flux_pulse_channel = self.flux_pulse_channel()
log.warning('flux_pulse_channel is not specified. Using the value'
'in the flux_pulse_channel parameter.')
if thetas is None:
thetas = np.linspace(0, 4*np.pi, 16)
print('Sweeping over phases thata=np.linspace(0, 4*np.pi, 16).')
if label is None:
label = 'Dynamic_phase_measurement_{}_{}_filter'.format(
self.name, self.flux_pulse_channel())
self.measure_dynamic_phase(flux_pulse_length=flux_pulse_length,
flux_pulse_amp=flux_pulse_amp,
flux_pulse_channel=flux_pulse_channel,
flux_pulse_delay=flux_pulse_delay,
X90_separation=X90_separation,
thetas=thetas,
MC=MC,
label=label)
if analyze:
MA = ma.Dynamic_phase_Analysis(
TwoD=True,
flux_pulse_amp=flux_pulse_amp,
flux_pulse_length=flux_pulse_length,
qb_name=self.name, **kw)
dynamic_phase = MA.dyn_phase
print('fitted dynamic phase on {}: {:0.3f} [deg]'.format(self.name,
dynamic_phase))
if update:
try:
self.dynamic_phase(dynamic_phase)
except Exception:
log.warning('Could not update '
'{}.dynamic_phase().'.format(self.name))
return dynamic_phase
else:
return
def measure_flux_pulse_scope(self, freqs, delays, cz_pulse_name,
analyze=True, cal_points=True,
upload=True, label=None,
n_cal_points_per_state=2, cal_states='auto',
prep_params=None, exp_metadata=None):
'''
flux pulse scope measurement used to determine the shape of flux pulses
set up as a 2D measurement (delay and drive pulse frequecy are
being swept)
pulse sequence:
<- delay ->
| ------------- |X180| --------------------- |RO|
| --- | ---- fluxpulse ----- |
Args:
freqs (numpy array): array of drive frequencies
delays (numpy array): array of delays of the drive pulse w.r.t
the flux pulse
pulse_length (float): flux pulse length (if not specified, the
self.flux_pulse_length() is taken)
pulse_amp (float): flux pulse amplitude (if not specified, the
self.flux_pulse_amp() is taken)
pulse_delay (float): flux pulse delay
MC (MeasurementControl): if None, then the self.MC is taken
Returns: None
'''
if label is None:
label = 'Flux_scope_{}'.format(self.name)
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
if cal_points:
cal_states = CalibrationPoints.guess_cal_states(cal_states)
cp = CalibrationPoints.single_qubit(
self.name, cal_states, n_per_state=n_cal_points_per_state)
else:
cp = None
if prep_params is None:
prep_params = self.preparation_params()
seq, sweep_points, sweep_points_2D = \
fsqs.fluxpulse_scope_sequence(
delays=delays, freqs=freqs, qb_name=self.name,
operation_dict=self.get_operation_dict(),
cz_pulse_name=cz_pulse_name, cal_points=cp,
prep_params=prep_params, upload=False)
MC.set_sweep_function(awg_swf.SegmentHardSweep(
sequence=seq, upload=upload, parameter_name='Delay', unit='s'))
MC.set_sweep_points(sweep_points)
MC.set_sweep_function_2D(swf.Offset_Sweep(
self.instr_ge_lo.get_instr().frequency,
-self.ge_mod_freq(),
name='Drive frequency',
parameter_name='Drive frequency', unit='Hz'))
MC.set_sweep_points_2D(sweep_points_2D)
MC.set_detector_function(self.int_avg_det)
if exp_metadata is None:
exp_metadata = {}
exp_metadata.update({'sweep_points_dict': {self.name: delays},
'sweep_points_dict_2D': {self.name: freqs},
'use_cal_points': cal_points,
'preparation_params': prep_params,
'cal_points': repr(cp),
'rotate': cal_points,
'data_to_fit': {self.name: 'pe'},
"sweep_name": "Delay",
"sweep_unit": "s"})
MC.run_2D(label, exp_metadata=exp_metadata)
if analyze:
try:
tda.MultiQubit_TimeDomain_Analysis(qb_names=[self.name],
options_dict=dict(TwoD=True))
except Exception:
ma.MeasurementAnalysis(TwoD=True)
def measure_flux_pulse_scope_nzcz_alpha(
self, nzcz_alphas, delays, CZ_pulse_name=None,
cal_points=True, upload=True, upload_all=True,
spacing=30e-9, MC=None):
if MC is None:
MC = self.instr_mc.get_instr()
self.prepare(drive='timedomain')
if cal_points:
step = np.abs(delays[-1] - delays[-2])
sweep_points = np.concatenate(
[delays, [delays[-1]+step, delays[-1]+2*step,
delays[-1]+3*step, delays[-1]+4*step]])
else:
sweep_points = delays
s1 = awg_swf.Fluxpulse_scope_nzcz_alpha_hard_swf(
qb_name=self.name, nzcz_alpha=nzcz_alphas[0],
CZ_pulse_name=CZ_pulse_name,
operation_dict=self.get_operation_dict(),
cal_points=cal_points, upload=False,
upload_all=upload_all, spacing=spacing)
s2 = awg_swf.Fluxpulse_scope_nzcz_alpha_soft_sweep(
s1, upload=upload)
MC.set_sweep_function(s1)
MC.set_sweep_points(sweep_points)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(nzcz_alphas)
MC.set_detector_function(self.int_avg_det)
MC.run_2D('Flux_scope_nzcz_alpha' + self.msmt_suffix)
ma.MeasurementAnalysis(TwoD=True)
def add_CZ_pulse(qbc, qbt):
"""
Args:
qbc: Control qubit. A QudevTransmon object corresponding to the qubit
that we apply the flux pulse on.
qbt: Target qubit. A QudevTransmon object corresponding to the qubit
we induce the conditional phase on.
"""
# add flux pulse parameters
op_name = 'upCZ ' + qbt.name
ps_name = 'upCZ_' + qbt.name
if np.any([op_name == i for i in qbc.get_operation_dict().keys()]):
# do not try to add it again if operation already exists
raise ValueError('Operation {} already exists.'.format(op_name))
else:
qbc.add_operation(op_name)
qbc.add_pulse_parameter(op_name, ps_name + '_cz_target_qb',
'cz_target_qb',
initial_value=qbt.name,
vals=vals.Enum(qbt.name))
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_type', 'pulse_type',
initial_value='BufferedCZPulse',
vals=vals.Enum('BufferedSquarePulse',
'BufferedCZPulse',
'NZBufferedCZPulse',
'BufferedCZPulseEffectiveTime'))
qbc.add_pulse_parameter(op_name, ps_name + '_channel', 'channel',
initial_value='', vals=vals.Strings())
qbc.add_pulse_parameter(op_name, ps_name + '_aux_channels_dict',
'aux_channels_dict',
initial_value={}, vals=vals.Dict())
qbc.add_pulse_parameter(op_name, ps_name + '_amplitude', 'amplitude',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_frequency', 'frequency',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_phase', 'phase',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_length',
'pulse_length',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_alpha', 'alpha',
initial_value=1, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_buffer_length_start',
'buffer_length_start', initial_value=10e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_buffer_length_end',
'buffer_length_end', initial_value=10e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_extra_buffer_aux_pulse',
'extra_buffer_aux_pulse', initial_value=5e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_delay',
'pulse_delay',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_basis_rotation',
'basis_rotation', initial_value={},
vals=vals.Dict())
qbc.add_pulse_parameter(op_name, ps_name + '_gaussian_filter_sigma',
'gaussian_filter_sigma', initial_value=2e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_chevron_func',
'chevron_func', initial_value=None,
vals=vals.Callable(),
docstring="Callable required when using "
"effective time CZ pulse to "
"straighten Chevron.")
def add_CZ_MG_pulse(qbc, qbt):
"""
Args:
qbc: Control qubit. A QudevTransmon object corresponding to the qubit
that we apply the flux pulse on.
qbt: Target qubit. A QudevTransmon object corresponding to the qubit
we induce the conditional phase on.
"""
# add flux pulse parameters
op_name = 'CZ ' + qbt.name
ps_name = 'CZ_' + qbt.name
if np.any([op_name == i for i in qbc.get_operation_dict().keys()]):
# do not try to add it again if operation already exists
raise ValueError('Operation {} already exists.'.format(op_name))
else:
qbc.add_operation(op_name)
qbc.add_pulse_parameter(op_name, ps_name + '_cz_target_qb',
'cz_target_qb',
initial_value=qbt.name,
vals=vals.Enum(qbt.name))
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_type', 'pulse_type',
initial_value='NZMartinisGellarPulse',
vals=vals.Enum('NZMartinisGellarPulse',
'BufferedCZPulse'))
qbc.add_pulse_parameter(op_name, ps_name + '_channel', 'channel',
initial_value='', vals=vals.Strings())
qbc.add_pulse_parameter(op_name, ps_name + '_aux_channels_dict',
'aux_channels_dict',
initial_value={}, vals=vals.Dict())
qbc.add_pulse_parameter(op_name, ps_name + '_theta_f', 'theta_f',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_lambda_2', 'lambda_2',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_qbc_freq', 'qbc_freq',
initial_value=qbc.ge_freq(),
vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_qbt_freq', 'qbt_freq',
initial_value=qbt.ge_freq(),
vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_length',
'pulse_length',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_alpha', 'alpha',
initial_value=1, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_buffer_length_start',
'buffer_length_start', initial_value=10e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_buffer_length_end',
'buffer_length_end', initial_value=10e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_extra_buffer_aux_pulse',
'extra_buffer_aux_pulse', initial_value=5e-9,
vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_anharmonicity',
'anharmonicity',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_J', 'J',
initial_value=0, vals=vals.Numbers())
qbc.add_pulse_parameter(op_name, ps_name + '_basis_rotation',
'basis_rotation', initial_value={},
vals=vals.Dict())
qbc.add_pulse_parameter(op_name, ps_name + '_dv_dphi', 'dv_dphi',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_loop_asym', 'loop_asym',
initial_value=0, vals=vals.Numbers(0))
qbc.add_pulse_parameter(op_name, ps_name + '_wave_generation_func',
'wave_generation_func', initial_value=None,
vals=vals.Callable())
qbc.add_pulse_parameter(op_name, ps_name + '_pulse_delay',
'pulse_delay',
initial_value=0, vals=vals.Numbers())
|
"""
Serve test runner pages and included JavaScript files on a local port.
"""
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import threading
import re
import pkg_resources
import os.path
import logging
import json
import time
import mimetypes
import shutil
import socket
from StringIO import StringIO
from abc import ABCMeta, abstractmethod
from js_test_tool.coverage import SrcInstrumenter, SrcInstrumenterError, CoverageData
LOGGER = logging.getLogger(__name__)
class TimeoutError(Exception):
"""
The server timed out while waiting.
"""
pass
class DuplicateSuiteNameError(Exception):
"""
Two or more suites have the same name.
"""
pass
class RequestRangeError(Exception):
"""
Client requested an invalid byte range.
(e.g. starting byte > ending byte)
"""
pass
class SuitePageServer(ThreadingMixIn, HTTPServer):
"""
Serve test suite pages and included JavaScript files.
"""
protocol_version = 'HTTP/1.1'
# Request response timeout
timeout = 5
# Amount of time to wait for clients to POST coverage info
# back to the server before timing out.
COVERAGE_TIMEOUT = 2.0
# Amount of time to wait between checks that the we
# have all the coverage info
COVERAGE_WAIT_TIME = 0.1
# Returns the `CoverageData` instance used by the server
# to store coverage data received from the test suites.
# Since `CoverageData` is thread-safe, it is okay for
# other processes to write to it asynchronously.
coverage_data = None
def __init__(self, suite_desc_list, suite_renderer, jscover_path=None):
"""
Initialize the server to serve test runner pages
and dependencies described by `suite_desc_list`
(list of `SuiteDescription` instances).
`jscover_path` is the path to the JSCover JAR file. If not
specified, no coverage information will be collected.
Use `suite_renderer` (a `SuiteRenderer` instance) to
render the test suite pages.
"""
# Store dependencies
self.desc_dict = self._suite_dict_from_list(suite_desc_list)
self.renderer = suite_renderer
self._jscover_path = jscover_path
# Create a dict for source instrumenter services
# (One for each suite description)
self.src_instr_dict = {}
# Using port 0 assigns us an unused port
address = ('127.0.0.1', 0)
HTTPServer.__init__(self, address, SuitePageRequestHandler)
def start(self):
"""
Start serving pages on an open local port.
"""
server_thread = threading.Thread(target=self.serve_forever)
server_thread.daemon = True
server_thread.start()
# If we're collecting coverage information
if self._jscover_path is not None:
# Create an object to store coverage data we receive
self.coverage_data = CoverageData()
# Start each SrcInstrumenter instance if we know where JSCover is
for suite_name, desc in self.desc_dict.iteritems():
# Inform the coverage data that we expect this source
# (report it as 0% if no info received).
for rel_path in desc.src_paths():
self.coverage_data.add_expected_src(desc.root_dir(), rel_path)
# Create an instrumenter serving files
# in the suite description root directory
instr = SrcInstrumenter(desc.root_dir(),
tool_path=self._jscover_path)
# Start the instrumenter service
instr.start()
# Associate the instrumenter with its suite description
self.src_instr_dict[suite_name] = instr
else:
self.src_instr_dict = {}
def stop(self):
"""
Stop the server and free the port.
"""
# Stop each instrumenter service that we started
for instr in self.src_instr_dict.values():
instr.stop()
# Stop the page server and free the port
self.shutdown()
self.socket.close()
def suite_url_list(self):
"""
Return a list of URLs (unicode strings), where each URL
is a test suite page containing the JS code to run
the JavaScript tests.
"""
return [self.root_url() + u'suite/{}'.format(suite_name)
for suite_name in self.desc_dict.keys()]
def root_url(self):
"""
Return the root URL (including host and port) for the server
as a unicode string.
"""
host, port = self.server_address
return u"http://{}:{}/".format(host, port)
def all_coverage_data(self):
"""
Returns a `CoverageData` instance containing all coverage data
received from running the tests.
Blocks until all suites have reported coverage data. If it
times out waiting for all data, raises a `TimeoutException`.
If we are not collecting coverage, returns None.
"""
if self.coverage_data is not None:
self._block_until(self._has_all_coverage)
return self.coverage_data
else:
return None
def _block_until(self, success_func):
"""
Block until `success_func` returns True.
`success_func` should be a lambda with no argument.
"""
# Remember when we started
start_time = time.time()
# Until we are successful
while not success_func():
# See if we've timed out
if time.time() - start_time > self.COVERAGE_TIMEOUT:
raise TimeoutError()
# Wait a little bit before checking again
time.sleep(self.COVERAGE_WAIT_TIME)
def _has_all_coverage(self):
"""
Returns True if and only if every suite
has coverage information.
"""
# Retrieve the indices of each suite for which coverage
# information was reported.
suite_name_list = self.coverage_data.suite_name_list()
# Check that we have an index for every suite
# (This is not the most efficient way to do this --
# if it becomes a bottleneck, we can revisit.)
return (sorted(suite_name_list) == sorted(self.desc_dict.keys()))
@staticmethod
def _suite_dict_from_list(suite_desc_list):
"""
Given a list of `SuiteDescription` instances, construct
a dictionary mapping suite names to the instances.
Raises a `DuplicateSuiteNameError` if two suites have
the same name.
"""
suite_dict = {
suite.suite_name(): suite
for suite in suite_desc_list
}
# Check that we haven't repeated keys
if len(suite_dict) < len(suite_desc_list):
raise DuplicateSuiteNameError("Two or more test suites have the same name")
return suite_dict
class BasePageHandler(object):
"""
Abstract base class for page handler. Checks whether
it can handle a given URL path. If it can, it then generates
the page contents.
"""
__metaclass__ = ABCMeta
# HTTP methods handled by this class
# The default is to handle only GET methods
HTTP_METHODS = ["GET"]
# Subclasses override this to provide a regex that matches
# URL paths. Should be a `re` module compiled regex.
PATH_REGEX = None
def page_contents(self, path, method, content):
"""
Returns a `(content, mime_type)` tuple if the page
could be loaded. Otherwise, returns `(None, None)`.
`content` is a file-like object representing the page contents.
`mime_type` is the MIME type to send as the Content-Header
in the response.
`method` is the HTTP method used to load the page (e.g. "GET" or "POST")
`content` is the content of the HTTP request.
"""
# Check that we handle this kind of request
if method in self.HTTP_METHODS:
# Check whether this handler matches the URL path
result = self.PATH_REGEX.match(path)
# If this is not a match, return None
if result is None:
return (None, None)
# If we do match, attempt to load the page.
else:
page_contents = self.load_page(method, content, *result.groups())
mime_type = self.mime_type(method, content, *result.groups())
return (page_contents, mime_type)
else:
return (None, None)
@abstractmethod
def load_page(self, method, content, *args):
"""
Subclasses override this to load the page.
`args` is a list of arguments parsed using the regular expression.
If the page cannot be loaded (e.g. accessing a file that
does not exist), then return None.
`method` is the HTTP method used to load the page (e.g. "GET" or "POST")
`content` is the content of the HTTP request.
Returns a file-like object from which to read the page content.
"""
pass
@abstractmethod
def mime_type(self, method, content, *args):
"""
Subclasses override this to return the MIME type
for the page.
Arguments have the same meaning as in `load_page()`.
"""
pass
@staticmethod
def guess_mime_type(url):
"""
Guess the mime type for a given URL by its
extension; default to text/plain.
"""
mime_type, _ = mimetypes.guess_type(url)
if mime_type is None:
mime_type = 'text/plain'
return mime_type
@staticmethod
def safe_str_buffer(content):
"""
Return a file-like object containing the contents of `content`.
If `content` is unicode, it will be first encoded as UTF-8 bytestring.
"""
# If content is unicode, encode it
if isinstance(content, unicode):
content = content.encode('utf-8')
# At this point, content should be a byte string,
# so we can create the buffer.
return StringIO(content)
class SuitePageHandler(BasePageHandler):
"""
Handle requests for paths of the form `/suite/SUITE_NAME`, where
`SUITE_NAME` is the name of the test suite description.
Serves the suite runner page.
"""
# Handle requests to /suite/NAME/
# Ignore GET parameters
PATH_REGEX = re.compile(r'^/suite/([^?/]+)/?(\?.*)?$')
def __init__(self, renderer, desc_dict):
"""
Initialize the `SuitePageHandler` to use `renderer`
(a `SuiteRenderer` instance) and `desc_dict` (a dict
mapping suite names to `SuiteDescription` instances).
"""
super(SuitePageHandler, self).__init__()
self._renderer = renderer
self._desc_dict = desc_dict
def load_page(self, method, content, *args):
"""
Render the suite runner page.
"""
# The only arg should be the suite name
suite_name = args[0]
# Try to find the suite description
suite_desc = self._desc_dict.get(suite_name)
# If we can't find it, don't serve it
if suite_desc is None:
return None
# Otherwise, render the page
else:
page = self._renderer.render_to_string(suite_name, suite_desc)
return self.safe_str_buffer(page)
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
return 'text/html'
class RunnerPageHandler(BasePageHandler):
"""
Handle requests for paths of the form '/runner/RUNNER_PATH', where
`RUNNER_PATH` is a page that runs JavaScript tests.
"""
# Handle requests to /runner/ pages, ignoring
# GET parameters
PATH_REGEX = re.compile(r'^/runner/([^\?]+).*$')
def load_page(self, method, content, *args):
"""
Load the runner file from this package's resources.
"""
# Only arg should be the relative path
rel_path = os.path.join('runner', args[0])
# Attempt to load the package resource
try:
content = pkg_resources.resource_string('js_test_tool', rel_path)
# If we could not load it, return None
except BaseException:
return None
# If we successfully loaded it, return the content
# as a file-like object.
else:
return self.safe_str_buffer(content)
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
return self.guess_mime_type(args[0])
class DependencyPageHandler(BasePageHandler):
"""
Load dependencies required by the test suite description.
"""
# Parse the suite name and relative path,
# ignoring any GET parameters in the URL.
PATH_REGEX = re.compile('^/suite/([^/]+)/include/([^?]+).*$')
# MIME types (in addition to text/* that we serve as UTF-8 encoded)
TEXT_MIME_TYPES = [
'application/json',
'application/javascript',
'application/ecmascript',
'application/xml',
]
def __init__(self, desc_dict):
"""
Initialize the dependency page handler to serve dependencies
specified by `desc_dict` (a dict mapping suite names to
`SuiteDescription` instances).
"""
super(DependencyPageHandler, self).__init__()
self._desc_dict = desc_dict
def load_page(self, method, content, *args):
"""
Load the test suite dependency file, using a path relative
to the description file.
Returns the handle to the dependency file.
"""
# Interpret the arguments (from the regex)
suite_name, rel_path = args
# Retrieve the full path to the dependency, if it exists
# and is specified in the test suite description
full_path = self._dependency_path(suite_name, rel_path)
if full_path is not None:
# Load the file
try:
return open(full_path, 'rb')
# If we cannot load the file (probably because it doesn't exist)
# then do not handle this request.
except IOError:
return None
# If this is not one of our listed dependencies,
# then do not handle this request.
else:
return None
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
_, rel_path = args
return self.guess_mime_type(rel_path)
def _dependency_path(self, suite_name, path):
"""
Return the full filesystem path to the dependency, if it
is specified in the test suite description with name `suite_name`.
Otherwise, return None.
"""
# Try to find the suite description with `suite_name`
suite_desc = self._desc_dict.get(suite_name)
# If we can't find it, give up
if suite_desc is None:
return None
# Get all dependency paths
all_paths = (suite_desc.lib_paths() +
suite_desc.src_paths() +
suite_desc.spec_paths() +
suite_desc.fixture_paths())
# If the path is in our listed dependencies, we can serve it
if path in all_paths:
# Resolve the full filesystem path
return os.path.join(suite_desc.root_dir(), path)
else:
# If we did not find the path, we cannot serve it
return None
class InstrumentedSrcPageHandler(BasePageHandler):
"""
Instrument the JavaScript source file to collect coverage information.
"""
PATH_REGEX = re.compile('^/suite/([^/]+)/include/([^?]+).*$')
def __init__(self, desc_dict, instr_dict):
"""
Initialize the dependency page handler to serve dependencies
specified by `desc_dict` (a dict mapping suite names
to `SuiteDescription` instances).
`instr_dict` is a dict mapping suite names to
`SrcInstrumenter` instances. There should be one
instrumenter for each suite.
"""
super(InstrumentedSrcPageHandler, self).__init__()
self._desc_dict = desc_dict
self._instr_dict = instr_dict
def load_page(self, method, content, *args):
"""
Load an instrumented version of the JS source file.
"""
# Interpret the arguments (from the regex)
suite_name, rel_path = args
# Check that this is a source file (not a lib or spec)
if self._is_src_file(suite_name, rel_path):
# Send the instrumented source (delegating to JSCover)
contents = self._send_instrumented_src(suite_name, rel_path)
return self.safe_str_buffer(contents)
# If not a source file, do not handle it.
# Expect the non-instrumenting page handler to serve
# the page instead
else:
return None
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
_, rel_path = args
return self.guess_mime_type(rel_path)
def _send_instrumented_src(self, suite_name, rel_path):
"""
Return an instrumented version of the JS source file at `rel_path`
for the suite with name `suite_name`, or None if the source
could not be loaded.
"""
# Try to retrieve the instrumenter
instr = self._instr_dict.get(suite_name)
if instr is None:
msg = "Could not find instrumenter for '{}'".format(suite_name)
LOGGER.warning(msg)
return None
try:
# This performs a synchronous call to the instrumenter
# service, raising an exception if it cannot retrieve
# the instrumented version of the source.
return instr.instrumented_src(rel_path)
# If we cannot get the instrumented source,
# return None. This should cause the un-instrumented
# version of the source to be served (when another
# handler matches the URL regex)
except SrcInstrumenterError as err:
msg = "Could not retrieve instrumented version of '{}': {}".format(rel_path, err)
LOGGER.warning(msg)
return None
def _is_src_file(self, suite_name, rel_path):
"""
Returns True only if the file at `rel_path` is a source file
in the suite named `suite_name`.
"""
suite_desc = self._desc_dict.get(suite_name)
if suite_desc is None:
return False
return (rel_path in suite_desc.src_paths())
class StoreCoveragePageHandler(BasePageHandler):
"""
Store coverage reports POSTed back to the server
by clients running instrumented JavaScript sources.
"""
PATH_REGEX = re.compile('^/jscoverage-store/([^/]+)/?$')
# Handle only POST
HTTP_METHODS = ["POST"]
def __init__(self, desc_dict, coverage_data):
"""
Initialize the dependency page handler to serve dependencies
specified by `desc_dict` (a dict mapping suite names to
`SuiteDescription` instances).
`coverage_data` is the `CoverageData` instance to send
any received coverage data to.
"""
super(StoreCoveragePageHandler, self).__init__()
self._desc_dict = desc_dict
self._coverage_data = coverage_data
def load_page(self, method, content, *args):
"""
Send the coverage information to the server.
"""
# Retrieve the suite name from the URL
suite_name = args[0]
# Store the coverage data
return self._store_coverage_data(suite_name, content)
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
return 'text/plain'
def _store_coverage_data(self, suite_name, request_content):
"""
Store received coverage data for the JS source file
in the suite with name `suite_name`.
`request_content` is the content of the HTTP POST request.
Returns None if any errors occur; returns a success method if successful.
"""
# Record that we got a coverage report for this suite
self._coverage_data.add_suite_name(suite_name)
# Retrieve the root directory for this suite
suite_desc = self._desc_dict.get(suite_name)
# If we can't find the suite description, give up
if suite_desc is None:
return None
try:
# Parse the request content as JSON
coverage_dict = json.loads(request_content)
if not isinstance(coverage_dict, dict):
raise ValueError()
# `CoverageData.load_from_dict()` is thread-safe, so it
# is okay to write to this, even if the request handler
# is running asynchronously.
self._coverage_data.load_from_dict(suite_desc.root_dir(),
suite_desc.prepend_path(),
coverage_dict)
except ValueError:
msg = ("Could not interpret coverage data in POST request " +
"to suite {}: {}".format(suite_name, request_content))
LOGGER.warning(msg)
return None
else:
return StringIO("Success: coverage data received")
class SuitePageRequestHandler(BaseHTTPRequestHandler):
"""
Handle HTTP requsts to the `SuitePageServer`.
"""
protocol = "HTTP/1.0"
def __init__(self, request, client_address, server):
# Initialize the page handlers
# We always handle suite runner pages, and
# the runner dependencies (e.g. jasmine.js)
self._page_handlers = [SuitePageHandler(server.renderer, server.desc_dict),
RunnerPageHandler()]
# If we are configured for coverage, add another handler
# to serve instrumented versions of the source files.
if len(server.src_instr_dict) > 0:
# Create the handler to serve instrumented JS pages
instr_src_handler = InstrumentedSrcPageHandler(server.desc_dict,
server.src_instr_dict)
self._page_handlers.append(instr_src_handler)
# Create a handler to store coverage data POSTed back
# to the server from the client.
store_coverage_handler = StoreCoveragePageHandler(server.desc_dict,
server.coverage_data)
self._page_handlers.append(store_coverage_handler)
# We always serve dependencies. If running with coverage,
# the instrumented src handler will intercept source files.
# Serving the un-instrumented version is the fallback, and
# will still be used for library/spec dependencies.
self._page_handlers.append(DependencyPageHandler(server.desc_dict))
# Call the superclass implementation
# This will immediately call do_GET() if the request is a GET
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def finish(self):
"""
Finish processing a request.
Override the superclass implementation to silence disconnect errors.
"""
try:
BaseHTTPRequestHandler.finish(self)
except socket.error:
LOGGER.debug('client disconnected: {}'.format(self.path))
def handle_one_request(self):
"""
Handle a request.
Override the superclass implementation to silence disconnect errors.
"""
try:
BaseHTTPRequestHandler.handle_one_request(self)
except socket.error:
LOGGER.debug('client disconnected: {}'.format(self.path))
def do_GET(self):
"""
Serve suite runner pages and JavaScript dependencies.
"""
self._handle_request("GET")
def do_POST(self):
"""
Respond to POST requests providing coverage information.
"""
self._handle_request("POST")
def log_message(self, format_str, *args):
"""
Override the base-class logger to avoid
spamming the console.
"""
LOGGER.debug("{} -- [{}] {}".format(self.client_address[0],
self.log_date_time_string(),
format_str % args))
def _handle_request(self, method):
"""
Handle an HTTP request of type `method` (e.g. "GET" or "POST")
"""
# Get the request content
request_content = self._content()
for handler in self._page_handlers:
# Try to retrieve the page
content, mime_type = handler.page_contents(
self.path, method, request_content
)
# If we got a page, send the contents
if content is not None:
try:
byte_range = self._requested_byte_range(self.headers, content)
# The requested range is not satisfiable; send a 406
except RequestRangeError:
self._send_response(406, None, 'text/plain')
return
# If no byte range requested, send all the content
if byte_range is None:
self._send_response(200, content, mime_type)
return
# If a byte range was requested, send partial content
else:
self._send_response(
206, content, mime_type,
byte_range=byte_range
)
return
# If we could not retrieve the contents (e.g. because
# the file does not exist), send an error response
self._send_response(404, None, 'text/plain')
def _requested_byte_range(self, headers, content_file):
"""
Parse the requested byte range ('Range' header)
and return a `(start_pos, end_pos)` tuple indicating
the start/end bytes to transmit (inclusive).
`headers` represents the request headers
(a `mimetools.Message` instance).
`content_file` is the file to transmit
(used to determine the file size).
If no byte range requested, returns None.
Raises a `RequestRangeError` if the byte range is not satisfiable.
(in which case the server should send a 416 response).
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1
"""
range_header = headers.get('Range')
# No byte range specified, so send the whole file
if range_header is None or range_header == '':
return None
# Otherwise, parse the header
# Expect it to have the form: "bytes=byte-ranges"
elif range_header.startswith('bytes='):
# Get the file size
file_size = self._file_size(content_file)
# Chop off the "bytes=" part, so we just get the ranges
# Then split into individual ranges
# Example: "bytes=0-10,22-43" --> ["0-10", "22-43"]
range_str_list = range_header[len('bytes='):].split(",")
# We don't implement multiple byte ranges
# Just respond with a 200 and the full file instead
if len(range_str_list) > 1:
return None
# Parse the range
return self._parse_byte_range(range_str_list[0], file_size)
# Don't recognize the format
# The RFC says to return a 200 with the full file
else:
return None
@staticmethod
def _file_size(file_handle):
"""
Return the size of `file_handle` (a file-like object) in bytes.
"""
old_pos = file_handle.tell()
# Seek to the end of the file to find the last byte position
# (2 means relative to the end of the file)
file_handle.seek(0, 2)
size = file_handle.tell()
# Reset the old position
file_handle.seek(old_pos)
return size
def _parse_byte_range(self, range_str, file_size):
"""
Return a `(start_pos, end_pos)` tuple by parsing `range_str`
which can take the form:
* START-END
* START-
* -LENGTH
* Comma-separated list of above options
`file_size` is the size of the file to serve in bytes.
Raises a `RequestRangeError` if the byte range is not satisfiable.
Returns None if the byte range could not be parsed (invalid format),
triggering a 200 with the full file.
"""
try:
start_pos, end_pos = range_str.split("-")
start_pos = int(start_pos) if start_pos != '' else None
end_pos = int(end_pos) if end_pos != '' else None
# Can't interpret the start/end position,
# so trigger a 200 with the full file instead.
except ValueError:
return None
# There are three cases to handle here:
# 1) Both start and end specified: "0-10" (interpret as start/end byte indices)
# 2) Only start specified: "0-" (interpret as start index to the end of the file)
# 3) Only end specified: "-10" (interpret as length from the end)
if start_pos is not None and end_pos is not None:
# Verify that start <= end
if start_pos > end_pos:
msg = "Start byte > end byte in range {0}".format(range_str)
raise RequestRangeError(msg)
return (start_pos, min(end_pos, file_size - 1))
elif start_pos is not None and end_pos is None:
return (start_pos, file_size - 1)
elif start_pos is None and end_pos is not None:
# Interpret `end_pos` as length from end when only end position is provided
len_from_end = end_pos
return (file_size - len_from_end, file_size - 1)
# Neither start nor end specified -- invalid byte range, so
# trigger a 200 with the full file instead.
else:
return None
def _send_response(self, status_code, content, mime_type, byte_range=None):
"""
Send a response to an HTTP request.
`content` is a file-like object.
`mime_type` is sent as the Content-Type header.
Supports byte-ranges (send partial content requested by the client).
`byte_range` is a `(start_pos, end_pos)` tuple indicating the first and last
byte (indexed from 0) to send to the client.
If no byte range is specified, sends the entire file.
If content is None, send a response with no content.
"""
self.send_response(status_code)
self.send_header('Content-Type', mime_type + '; charset=utf-8')
self.send_header('Content-Language', 'en')
self.send_header('Accept-Ranges', 'bytes')
if byte_range is not None:
start_pos, end_pos = byte_range
self.send_header(
'Content-Range',
'bytes {0}-{1}/{2}'.format(start_pos, end_pos, self._file_size(content))
)
self.send_header('Content-Length', end_pos - start_pos + 1)
else:
content_length = self._file_size(content) if content is not None else 0
self.send_header('Content-Length', content_length)
self.end_headers()
# Send the content
# Copying the file objects ensures that
# (a) we don't store huge files in memory, and
# (b) we don't overload the network buffer
if content:
# If no byte range specified, send the whole file
if byte_range is None:
shutil.copyfileobj(content, self.wfile)
# Otherwise, send just the range requested
else:
start_pos, end_pos = byte_range
copy_len = end_pos - start_pos
# Seek to the start of the file and send just the length requested
content.seek(start_pos)
shutil.copyfileobj(content, self.wfile, copy_len)
def _content(self):
"""
Retrieve the content of the request.
"""
try:
length = int(self.headers.getheader('content-length'))
except (TypeError, ValueError):
return ""
else:
return self.rfile.read(length)
Better error message for duplicate suite names
"""
Serve test runner pages and included JavaScript files on a local port.
"""
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import threading
import re
import pkg_resources
import os.path
import logging
import json
import time
import mimetypes
import shutil
import socket
from StringIO import StringIO
from abc import ABCMeta, abstractmethod
from js_test_tool.coverage import SrcInstrumenter, SrcInstrumenterError, CoverageData
LOGGER = logging.getLogger(__name__)
class TimeoutError(Exception):
"""
The server timed out while waiting.
"""
pass
class DuplicateSuiteNameError(Exception):
"""
Two or more suites have the same name.
"""
pass
class RequestRangeError(Exception):
"""
Client requested an invalid byte range.
(e.g. starting byte > ending byte)
"""
pass
class SuitePageServer(ThreadingMixIn, HTTPServer):
"""
Serve test suite pages and included JavaScript files.
"""
protocol_version = 'HTTP/1.1'
# Request response timeout
timeout = 5
# Amount of time to wait for clients to POST coverage info
# back to the server before timing out.
COVERAGE_TIMEOUT = 2.0
# Amount of time to wait between checks that the we
# have all the coverage info
COVERAGE_WAIT_TIME = 0.1
# Returns the `CoverageData` instance used by the server
# to store coverage data received from the test suites.
# Since `CoverageData` is thread-safe, it is okay for
# other processes to write to it asynchronously.
coverage_data = None
def __init__(self, suite_desc_list, suite_renderer, jscover_path=None):
"""
Initialize the server to serve test runner pages
and dependencies described by `suite_desc_list`
(list of `SuiteDescription` instances).
`jscover_path` is the path to the JSCover JAR file. If not
specified, no coverage information will be collected.
Use `suite_renderer` (a `SuiteRenderer` instance) to
render the test suite pages.
"""
# Store dependencies
self.desc_dict = self._suite_dict_from_list(suite_desc_list)
self.renderer = suite_renderer
self._jscover_path = jscover_path
# Create a dict for source instrumenter services
# (One for each suite description)
self.src_instr_dict = {}
# Using port 0 assigns us an unused port
address = ('127.0.0.1', 0)
HTTPServer.__init__(self, address, SuitePageRequestHandler)
def start(self):
"""
Start serving pages on an open local port.
"""
server_thread = threading.Thread(target=self.serve_forever)
server_thread.daemon = True
server_thread.start()
# If we're collecting coverage information
if self._jscover_path is not None:
# Create an object to store coverage data we receive
self.coverage_data = CoverageData()
# Start each SrcInstrumenter instance if we know where JSCover is
for suite_name, desc in self.desc_dict.iteritems():
# Inform the coverage data that we expect this source
# (report it as 0% if no info received).
for rel_path in desc.src_paths():
self.coverage_data.add_expected_src(desc.root_dir(), rel_path)
# Create an instrumenter serving files
# in the suite description root directory
instr = SrcInstrumenter(desc.root_dir(),
tool_path=self._jscover_path)
# Start the instrumenter service
instr.start()
# Associate the instrumenter with its suite description
self.src_instr_dict[suite_name] = instr
else:
self.src_instr_dict = {}
def stop(self):
"""
Stop the server and free the port.
"""
# Stop each instrumenter service that we started
for instr in self.src_instr_dict.values():
instr.stop()
# Stop the page server and free the port
self.shutdown()
self.socket.close()
def suite_url_list(self):
"""
Return a list of URLs (unicode strings), where each URL
is a test suite page containing the JS code to run
the JavaScript tests.
"""
return [self.root_url() + u'suite/{}'.format(suite_name)
for suite_name in self.desc_dict.keys()]
def root_url(self):
"""
Return the root URL (including host and port) for the server
as a unicode string.
"""
host, port = self.server_address
return u"http://{}:{}/".format(host, port)
def all_coverage_data(self):
"""
Returns a `CoverageData` instance containing all coverage data
received from running the tests.
Blocks until all suites have reported coverage data. If it
times out waiting for all data, raises a `TimeoutException`.
If we are not collecting coverage, returns None.
"""
if self.coverage_data is not None:
self._block_until(self._has_all_coverage)
return self.coverage_data
else:
return None
def _block_until(self, success_func):
"""
Block until `success_func` returns True.
`success_func` should be a lambda with no argument.
"""
# Remember when we started
start_time = time.time()
# Until we are successful
while not success_func():
# See if we've timed out
if time.time() - start_time > self.COVERAGE_TIMEOUT:
raise TimeoutError()
# Wait a little bit before checking again
time.sleep(self.COVERAGE_WAIT_TIME)
def _has_all_coverage(self):
"""
Returns True if and only if every suite
has coverage information.
"""
# Retrieve the indices of each suite for which coverage
# information was reported.
suite_name_list = self.coverage_data.suite_name_list()
# Check that we have an index for every suite
# (This is not the most efficient way to do this --
# if it becomes a bottleneck, we can revisit.)
return (sorted(suite_name_list) == sorted(self.desc_dict.keys()))
@classmethod
def _suite_dict_from_list(cls, suite_desc_list):
"""
Given a list of `SuiteDescription` instances, construct
a dictionary mapping suite names to the instances.
Raises a `DuplicateSuiteNameError` if two suites have
the same name.
"""
suite_dict = {
suite.suite_name(): suite
for suite in suite_desc_list
}
# Check that we haven't repeated keys
duplicates = cls._duplicates([suite.suite_name() for suite in suite_desc_list])
if len(duplicates) > 0:
msg = "Duplicate suite name(s): {}".format(",".join(duplicates))
raise DuplicateSuiteNameError(msg)
return suite_dict
@classmethod
def _duplicates(cls, name_list):
"""
Given a list of strings, return a set of duplicates in the list.
"""
seen = set()
duplicates = set()
for name in name_list:
# Check if we've already seen the name; if so, add it
# to the list of duplicates
if name in seen:
duplicates.add(name)
# Add the name to the list of names we've already seen
else:
seen.add(name)
return duplicates
class BasePageHandler(object):
"""
Abstract base class for page handler. Checks whether
it can handle a given URL path. If it can, it then generates
the page contents.
"""
__metaclass__ = ABCMeta
# HTTP methods handled by this class
# The default is to handle only GET methods
HTTP_METHODS = ["GET"]
# Subclasses override this to provide a regex that matches
# URL paths. Should be a `re` module compiled regex.
PATH_REGEX = None
def page_contents(self, path, method, content):
"""
Returns a `(content, mime_type)` tuple if the page
could be loaded. Otherwise, returns `(None, None)`.
`content` is a file-like object representing the page contents.
`mime_type` is the MIME type to send as the Content-Header
in the response.
`method` is the HTTP method used to load the page (e.g. "GET" or "POST")
`content` is the content of the HTTP request.
"""
# Check that we handle this kind of request
if method in self.HTTP_METHODS:
# Check whether this handler matches the URL path
result = self.PATH_REGEX.match(path)
# If this is not a match, return None
if result is None:
return (None, None)
# If we do match, attempt to load the page.
else:
page_contents = self.load_page(method, content, *result.groups())
mime_type = self.mime_type(method, content, *result.groups())
return (page_contents, mime_type)
else:
return (None, None)
@abstractmethod
def load_page(self, method, content, *args):
"""
Subclasses override this to load the page.
`args` is a list of arguments parsed using the regular expression.
If the page cannot be loaded (e.g. accessing a file that
does not exist), then return None.
`method` is the HTTP method used to load the page (e.g. "GET" or "POST")
`content` is the content of the HTTP request.
Returns a file-like object from which to read the page content.
"""
pass
@abstractmethod
def mime_type(self, method, content, *args):
"""
Subclasses override this to return the MIME type
for the page.
Arguments have the same meaning as in `load_page()`.
"""
pass
@staticmethod
def guess_mime_type(url):
"""
Guess the mime type for a given URL by its
extension; default to text/plain.
"""
mime_type, _ = mimetypes.guess_type(url)
if mime_type is None:
mime_type = 'text/plain'
return mime_type
@staticmethod
def safe_str_buffer(content):
"""
Return a file-like object containing the contents of `content`.
If `content` is unicode, it will be first encoded as UTF-8 bytestring.
"""
# If content is unicode, encode it
if isinstance(content, unicode):
content = content.encode('utf-8')
# At this point, content should be a byte string,
# so we can create the buffer.
return StringIO(content)
class SuitePageHandler(BasePageHandler):
"""
Handle requests for paths of the form `/suite/SUITE_NAME`, where
`SUITE_NAME` is the name of the test suite description.
Serves the suite runner page.
"""
# Handle requests to /suite/NAME/
# Ignore GET parameters
PATH_REGEX = re.compile(r'^/suite/([^?/]+)/?(\?.*)?$')
def __init__(self, renderer, desc_dict):
"""
Initialize the `SuitePageHandler` to use `renderer`
(a `SuiteRenderer` instance) and `desc_dict` (a dict
mapping suite names to `SuiteDescription` instances).
"""
super(SuitePageHandler, self).__init__()
self._renderer = renderer
self._desc_dict = desc_dict
def load_page(self, method, content, *args):
"""
Render the suite runner page.
"""
# The only arg should be the suite name
suite_name = args[0]
# Try to find the suite description
suite_desc = self._desc_dict.get(suite_name)
# If we can't find it, don't serve it
if suite_desc is None:
return None
# Otherwise, render the page
else:
page = self._renderer.render_to_string(suite_name, suite_desc)
return self.safe_str_buffer(page)
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
return 'text/html'
class RunnerPageHandler(BasePageHandler):
"""
Handle requests for paths of the form '/runner/RUNNER_PATH', where
`RUNNER_PATH` is a page that runs JavaScript tests.
"""
# Handle requests to /runner/ pages, ignoring
# GET parameters
PATH_REGEX = re.compile(r'^/runner/([^\?]+).*$')
def load_page(self, method, content, *args):
"""
Load the runner file from this package's resources.
"""
# Only arg should be the relative path
rel_path = os.path.join('runner', args[0])
# Attempt to load the package resource
try:
content = pkg_resources.resource_string('js_test_tool', rel_path)
# If we could not load it, return None
except BaseException:
return None
# If we successfully loaded it, return the content
# as a file-like object.
else:
return self.safe_str_buffer(content)
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
return self.guess_mime_type(args[0])
class DependencyPageHandler(BasePageHandler):
"""
Load dependencies required by the test suite description.
"""
# Parse the suite name and relative path,
# ignoring any GET parameters in the URL.
PATH_REGEX = re.compile('^/suite/([^/]+)/include/([^?]+).*$')
# MIME types (in addition to text/* that we serve as UTF-8 encoded)
TEXT_MIME_TYPES = [
'application/json',
'application/javascript',
'application/ecmascript',
'application/xml',
]
def __init__(self, desc_dict):
"""
Initialize the dependency page handler to serve dependencies
specified by `desc_dict` (a dict mapping suite names to
`SuiteDescription` instances).
"""
super(DependencyPageHandler, self).__init__()
self._desc_dict = desc_dict
def load_page(self, method, content, *args):
"""
Load the test suite dependency file, using a path relative
to the description file.
Returns the handle to the dependency file.
"""
# Interpret the arguments (from the regex)
suite_name, rel_path = args
# Retrieve the full path to the dependency, if it exists
# and is specified in the test suite description
full_path = self._dependency_path(suite_name, rel_path)
if full_path is not None:
# Load the file
try:
return open(full_path, 'rb')
# If we cannot load the file (probably because it doesn't exist)
# then do not handle this request.
except IOError:
return None
# If this is not one of our listed dependencies,
# then do not handle this request.
else:
return None
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
_, rel_path = args
return self.guess_mime_type(rel_path)
def _dependency_path(self, suite_name, path):
"""
Return the full filesystem path to the dependency, if it
is specified in the test suite description with name `suite_name`.
Otherwise, return None.
"""
# Try to find the suite description with `suite_name`
suite_desc = self._desc_dict.get(suite_name)
# If we can't find it, give up
if suite_desc is None:
return None
# Get all dependency paths
all_paths = (suite_desc.lib_paths() +
suite_desc.src_paths() +
suite_desc.spec_paths() +
suite_desc.fixture_paths())
# If the path is in our listed dependencies, we can serve it
if path in all_paths:
# Resolve the full filesystem path
return os.path.join(suite_desc.root_dir(), path)
else:
# If we did not find the path, we cannot serve it
return None
class InstrumentedSrcPageHandler(BasePageHandler):
"""
Instrument the JavaScript source file to collect coverage information.
"""
PATH_REGEX = re.compile('^/suite/([^/]+)/include/([^?]+).*$')
def __init__(self, desc_dict, instr_dict):
"""
Initialize the dependency page handler to serve dependencies
specified by `desc_dict` (a dict mapping suite names
to `SuiteDescription` instances).
`instr_dict` is a dict mapping suite names to
`SrcInstrumenter` instances. There should be one
instrumenter for each suite.
"""
super(InstrumentedSrcPageHandler, self).__init__()
self._desc_dict = desc_dict
self._instr_dict = instr_dict
def load_page(self, method, content, *args):
"""
Load an instrumented version of the JS source file.
"""
# Interpret the arguments (from the regex)
suite_name, rel_path = args
# Check that this is a source file (not a lib or spec)
if self._is_src_file(suite_name, rel_path):
# Send the instrumented source (delegating to JSCover)
contents = self._send_instrumented_src(suite_name, rel_path)
return self.safe_str_buffer(contents)
# If not a source file, do not handle it.
# Expect the non-instrumenting page handler to serve
# the page instead
else:
return None
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
_, rel_path = args
return self.guess_mime_type(rel_path)
def _send_instrumented_src(self, suite_name, rel_path):
"""
Return an instrumented version of the JS source file at `rel_path`
for the suite with name `suite_name`, or None if the source
could not be loaded.
"""
# Try to retrieve the instrumenter
instr = self._instr_dict.get(suite_name)
if instr is None:
msg = "Could not find instrumenter for '{}'".format(suite_name)
LOGGER.warning(msg)
return None
try:
# This performs a synchronous call to the instrumenter
# service, raising an exception if it cannot retrieve
# the instrumented version of the source.
return instr.instrumented_src(rel_path)
# If we cannot get the instrumented source,
# return None. This should cause the un-instrumented
# version of the source to be served (when another
# handler matches the URL regex)
except SrcInstrumenterError as err:
msg = "Could not retrieve instrumented version of '{}': {}".format(rel_path, err)
LOGGER.warning(msg)
return None
def _is_src_file(self, suite_name, rel_path):
"""
Returns True only if the file at `rel_path` is a source file
in the suite named `suite_name`.
"""
suite_desc = self._desc_dict.get(suite_name)
if suite_desc is None:
return False
return (rel_path in suite_desc.src_paths())
class StoreCoveragePageHandler(BasePageHandler):
"""
Store coverage reports POSTed back to the server
by clients running instrumented JavaScript sources.
"""
PATH_REGEX = re.compile('^/jscoverage-store/([^/]+)/?$')
# Handle only POST
HTTP_METHODS = ["POST"]
def __init__(self, desc_dict, coverage_data):
"""
Initialize the dependency page handler to serve dependencies
specified by `desc_dict` (a dict mapping suite names to
`SuiteDescription` instances).
`coverage_data` is the `CoverageData` instance to send
any received coverage data to.
"""
super(StoreCoveragePageHandler, self).__init__()
self._desc_dict = desc_dict
self._coverage_data = coverage_data
def load_page(self, method, content, *args):
"""
Send the coverage information to the server.
"""
# Retrieve the suite name from the URL
suite_name = args[0]
# Store the coverage data
return self._store_coverage_data(suite_name, content)
def mime_type(self, method, content, *args):
"""
Return the MIME type for the page.
"""
return 'text/plain'
def _store_coverage_data(self, suite_name, request_content):
"""
Store received coverage data for the JS source file
in the suite with name `suite_name`.
`request_content` is the content of the HTTP POST request.
Returns None if any errors occur; returns a success method if successful.
"""
# Record that we got a coverage report for this suite
self._coverage_data.add_suite_name(suite_name)
# Retrieve the root directory for this suite
suite_desc = self._desc_dict.get(suite_name)
# If we can't find the suite description, give up
if suite_desc is None:
return None
try:
# Parse the request content as JSON
coverage_dict = json.loads(request_content)
if not isinstance(coverage_dict, dict):
raise ValueError()
# `CoverageData.load_from_dict()` is thread-safe, so it
# is okay to write to this, even if the request handler
# is running asynchronously.
self._coverage_data.load_from_dict(suite_desc.root_dir(),
suite_desc.prepend_path(),
coverage_dict)
except ValueError:
msg = ("Could not interpret coverage data in POST request " +
"to suite {}: {}".format(suite_name, request_content))
LOGGER.warning(msg)
return None
else:
return StringIO("Success: coverage data received")
class SuitePageRequestHandler(BaseHTTPRequestHandler):
"""
Handle HTTP requsts to the `SuitePageServer`.
"""
protocol = "HTTP/1.0"
def __init__(self, request, client_address, server):
# Initialize the page handlers
# We always handle suite runner pages, and
# the runner dependencies (e.g. jasmine.js)
self._page_handlers = [SuitePageHandler(server.renderer, server.desc_dict),
RunnerPageHandler()]
# If we are configured for coverage, add another handler
# to serve instrumented versions of the source files.
if len(server.src_instr_dict) > 0:
# Create the handler to serve instrumented JS pages
instr_src_handler = InstrumentedSrcPageHandler(server.desc_dict,
server.src_instr_dict)
self._page_handlers.append(instr_src_handler)
# Create a handler to store coverage data POSTed back
# to the server from the client.
store_coverage_handler = StoreCoveragePageHandler(server.desc_dict,
server.coverage_data)
self._page_handlers.append(store_coverage_handler)
# We always serve dependencies. If running with coverage,
# the instrumented src handler will intercept source files.
# Serving the un-instrumented version is the fallback, and
# will still be used for library/spec dependencies.
self._page_handlers.append(DependencyPageHandler(server.desc_dict))
# Call the superclass implementation
# This will immediately call do_GET() if the request is a GET
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def finish(self):
"""
Finish processing a request.
Override the superclass implementation to silence disconnect errors.
"""
try:
BaseHTTPRequestHandler.finish(self)
except socket.error:
LOGGER.debug('client disconnected: {}'.format(self.path))
def handle_one_request(self):
"""
Handle a request.
Override the superclass implementation to silence disconnect errors.
"""
try:
BaseHTTPRequestHandler.handle_one_request(self)
except socket.error:
LOGGER.debug('client disconnected: {}'.format(self.path))
def do_GET(self):
"""
Serve suite runner pages and JavaScript dependencies.
"""
self._handle_request("GET")
def do_POST(self):
"""
Respond to POST requests providing coverage information.
"""
self._handle_request("POST")
def log_message(self, format_str, *args):
"""
Override the base-class logger to avoid
spamming the console.
"""
LOGGER.debug("{} -- [{}] {}".format(self.client_address[0],
self.log_date_time_string(),
format_str % args))
def _handle_request(self, method):
"""
Handle an HTTP request of type `method` (e.g. "GET" or "POST")
"""
# Get the request content
request_content = self._content()
for handler in self._page_handlers:
# Try to retrieve the page
content, mime_type = handler.page_contents(
self.path, method, request_content
)
# If we got a page, send the contents
if content is not None:
try:
byte_range = self._requested_byte_range(self.headers, content)
# The requested range is not satisfiable; send a 406
except RequestRangeError:
self._send_response(406, None, 'text/plain')
return
# If no byte range requested, send all the content
if byte_range is None:
self._send_response(200, content, mime_type)
return
# If a byte range was requested, send partial content
else:
self._send_response(
206, content, mime_type,
byte_range=byte_range
)
return
# If we could not retrieve the contents (e.g. because
# the file does not exist), send an error response
self._send_response(404, None, 'text/plain')
def _requested_byte_range(self, headers, content_file):
"""
Parse the requested byte range ('Range' header)
and return a `(start_pos, end_pos)` tuple indicating
the start/end bytes to transmit (inclusive).
`headers` represents the request headers
(a `mimetools.Message` instance).
`content_file` is the file to transmit
(used to determine the file size).
If no byte range requested, returns None.
Raises a `RequestRangeError` if the byte range is not satisfiable.
(in which case the server should send a 416 response).
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1
"""
range_header = headers.get('Range')
# No byte range specified, so send the whole file
if range_header is None or range_header == '':
return None
# Otherwise, parse the header
# Expect it to have the form: "bytes=byte-ranges"
elif range_header.startswith('bytes='):
# Get the file size
file_size = self._file_size(content_file)
# Chop off the "bytes=" part, so we just get the ranges
# Then split into individual ranges
# Example: "bytes=0-10,22-43" --> ["0-10", "22-43"]
range_str_list = range_header[len('bytes='):].split(",")
# We don't implement multiple byte ranges
# Just respond with a 200 and the full file instead
if len(range_str_list) > 1:
return None
# Parse the range
return self._parse_byte_range(range_str_list[0], file_size)
# Don't recognize the format
# The RFC says to return a 200 with the full file
else:
return None
@staticmethod
def _file_size(file_handle):
"""
Return the size of `file_handle` (a file-like object) in bytes.
"""
old_pos = file_handle.tell()
# Seek to the end of the file to find the last byte position
# (2 means relative to the end of the file)
file_handle.seek(0, 2)
size = file_handle.tell()
# Reset the old position
file_handle.seek(old_pos)
return size
def _parse_byte_range(self, range_str, file_size):
"""
Return a `(start_pos, end_pos)` tuple by parsing `range_str`
which can take the form:
* START-END
* START-
* -LENGTH
* Comma-separated list of above options
`file_size` is the size of the file to serve in bytes.
Raises a `RequestRangeError` if the byte range is not satisfiable.
Returns None if the byte range could not be parsed (invalid format),
triggering a 200 with the full file.
"""
try:
start_pos, end_pos = range_str.split("-")
start_pos = int(start_pos) if start_pos != '' else None
end_pos = int(end_pos) if end_pos != '' else None
# Can't interpret the start/end position,
# so trigger a 200 with the full file instead.
except ValueError:
return None
# There are three cases to handle here:
# 1) Both start and end specified: "0-10" (interpret as start/end byte indices)
# 2) Only start specified: "0-" (interpret as start index to the end of the file)
# 3) Only end specified: "-10" (interpret as length from the end)
if start_pos is not None and end_pos is not None:
# Verify that start <= end
if start_pos > end_pos:
msg = "Start byte > end byte in range {0}".format(range_str)
raise RequestRangeError(msg)
return (start_pos, min(end_pos, file_size - 1))
elif start_pos is not None and end_pos is None:
return (start_pos, file_size - 1)
elif start_pos is None and end_pos is not None:
# Interpret `end_pos` as length from end when only end position is provided
len_from_end = end_pos
return (file_size - len_from_end, file_size - 1)
# Neither start nor end specified -- invalid byte range, so
# trigger a 200 with the full file instead.
else:
return None
def _send_response(self, status_code, content, mime_type, byte_range=None):
"""
Send a response to an HTTP request.
`content` is a file-like object.
`mime_type` is sent as the Content-Type header.
Supports byte-ranges (send partial content requested by the client).
`byte_range` is a `(start_pos, end_pos)` tuple indicating the first and last
byte (indexed from 0) to send to the client.
If no byte range is specified, sends the entire file.
If content is None, send a response with no content.
"""
self.send_response(status_code)
self.send_header('Content-Type', mime_type + '; charset=utf-8')
self.send_header('Content-Language', 'en')
self.send_header('Accept-Ranges', 'bytes')
if byte_range is not None:
start_pos, end_pos = byte_range
self.send_header(
'Content-Range',
'bytes {0}-{1}/{2}'.format(start_pos, end_pos, self._file_size(content))
)
self.send_header('Content-Length', end_pos - start_pos + 1)
else:
content_length = self._file_size(content) if content is not None else 0
self.send_header('Content-Length', content_length)
self.end_headers()
# Send the content
# Copying the file objects ensures that
# (a) we don't store huge files in memory, and
# (b) we don't overload the network buffer
if content:
# If no byte range specified, send the whole file
if byte_range is None:
shutil.copyfileobj(content, self.wfile)
# Otherwise, send just the range requested
else:
start_pos, end_pos = byte_range
copy_len = end_pos - start_pos
# Seek to the start of the file and send just the length requested
content.seek(start_pos)
shutil.copyfileobj(content, self.wfile, copy_len)
def _content(self):
"""
Retrieve the content of the request.
"""
try:
length = int(self.headers.getheader('content-length'))
except (TypeError, ValueError):
return ""
else:
return self.rfile.read(length)
|
Need some works before publishing this
|
#! /usr/bin/env python
import praw
import re
import os
def reply(comment, text):
text += "\n\n Got any problems with this bot? Message /u/faerbit"
print("Replying:")
print(text)
comment.reply(text)
already_done.add(comment.id)
user_agent=("Non-mobile link 0.1 by /u/faerbit")
r = praw.Reddit(user_agent=user_agent)
already_done = set()
find_expression = re.compile("https?://[a-zA-Z]*\.m\.wikipedia\.org/wiki/[a-zA-Z0-9_#%]*")
comments = r.get_subreddit('test').get_comments(limit=500)
r.login("non-mobile-linkbot", os.environ["NON_MOBILE_LINKBOT_PASSWORD"])
for comment in comments:
if comment.id not in already_done:
text =""
links = []
for i in re.findall(find_expression, comment.body):
i = re.sub("m\.", "", i)
links.append(i)
if len(links) == 1:
text = "Non-mobile link: "
text += links[0]
reply(comment, text)
elif len(links) > 1:
text = "Non-mobile links:"
for i in links:
text += i
text += "\n\n"
reply(comment, text)
Changed disclaimer
#! /usr/bin/env python
import praw
import re
import os
def reply(comment, text):
text += "\n\n ^Got ^any ^problems/suggestions ^with ^this ^bot? ^Message ^/u/faerbit ^or ^check ^out ^the ^[code](https://github.com/Faerbit/non-mobile-link)!"
print("Replying:")
print(text)
comment.reply(text)
already_done.add(comment.id)
user_agent=("Non-mobile link 0.1 by /u/faerbit")
r = praw.Reddit(user_agent=user_agent)
already_done = set()
find_expression = re.compile("https?://[a-zA-Z]*\.m\.wikipedia\.org/wiki/[a-zA-Z0-9_#%]*")
comments = r.get_subreddit('test').get_comments(limit=500)
r.login("non-mobile-linkbot", os.environ["NON_MOBILE_LINKBOT_PASSWORD"])
for comment in comments:
if comment.id not in already_done:
text =""
links = []
for i in re.findall(find_expression, comment.body):
i = re.sub("m\.", "", i)
links.append(i)
if len(links) == 1:
text = "Non-mobile link: "
text += links[0]
reply(comment, text)
elif len(links) > 1:
text = "Non-mobile links:"
for i in links:
text += i
text += "\n\n"
reply(comment, text)
|
"""
Python representations of the JSON Schema Test Suite tests.
"""
import json
import os
import re
import subprocess
import sys
from bp.filepath import FilePath
from pyrsistent import pmap
import attr
from jsonschema.compat import PY3
from jsonschema.validators import validators
import jsonschema
def _find_suite():
root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
if root is not None:
return FilePath(root)
root = FilePath(jsonschema.__file__).parent().sibling("json")
if not root.isdir():
raise ValueError(
(
"Can't find the JSON-Schema-Test-Suite directory. "
"Set the 'JSON_SCHEMA_TEST_SUITE' environment "
"variable or run the tests from alongside a checkout "
"of the suite."
),
)
return root
@attr.s(hash=True)
class Suite(object):
_root = attr.ib(default=attr.Factory(_find_suite))
def _remotes(self):
jsonschema_suite = self._root.descendant(["bin", "jsonschema_suite"])
remotes = subprocess.check_output(
[sys.executable, jsonschema_suite.path, "remotes"],
)
return {
"http://localhost:1234/" + name: schema
for name, schema in json.loads(remotes.decode("utf-8")).items()
}
def benchmark(self, runner):
for name in validators:
self.collection(name=name).benchmark(runner=runner)
def collection(self, name):
return Collection(
name=name,
path=self._root.descendant(["tests", name]),
remotes=self._remotes(),
)
@attr.s(hash=True)
class Collection(object):
_path = attr.ib()
_remotes = attr.ib()
name = attr.ib()
def benchmark(self, runner):
for test in self.tests():
runner.bench_func(
name=test.fully_qualified_name,
func=test.validate_ignoring_errors,
)
def tests(self):
return (
test
for child in self._path.globChildren("*.json")
for test in self._tests_in(
subject=child.basename()[:-5],
path=child,
)
)
def format_tests(self):
path = self._path.descendant(["optional", "format"])
return (
test
for child in path.globChildren("*.json")
for test in self._tests_in(
subject=child.basename()[:-5],
path=child,
)
)
def tests_of(self, name):
return self._tests_in(
subject=name,
path=self._path.child(name + ".json"),
)
def optional_tests_of(self, name):
return self._tests_in(
subject=name,
path=self._path.descendant(["optional", name + ".json"]),
)
def _tests_in(self, subject, path):
for each in json.loads(path.getContent().decode("utf-8")):
for test in each["tests"]:
yield _Test(
collection=self,
subject=subject,
case_description=each["description"],
schema=each["schema"],
remotes=self._remotes,
**test
)
@attr.s(hash=True, repr=False)
class _Test(object):
collection = attr.ib()
subject = attr.ib()
case_description = attr.ib()
description = attr.ib()
data = attr.ib()
schema = attr.ib(repr=False)
valid = attr.ib()
_remotes = attr.ib()
def __repr__(self): # pragma: no cover
return "<Test {}>".format(self.fully_qualified_name)
@property
def fully_qualified_name(self): # pragma: no cover
return " > ".join(
[
self.collection.name,
self.subject,
self.case_description,
self.description,
]
)
def to_unittest_method(self, **kwargs):
name = "test_%s_%s_%s" % (
self.subject,
re.sub(r"[\W ]+", "_", self.case_description),
re.sub(r"[\W ]+", "_", self.description),
)
if not PY3: # pragma: no cover
name = name.encode("utf-8")
if self.valid:
def fn(this):
self.validate(**kwargs)
else:
def fn(this):
with this.assertRaises(jsonschema.ValidationError):
self.validate(**kwargs)
fn.__name__ = name
return fn
def validate(self, Validator=None, **kwargs):
resolver = jsonschema.RefResolver.from_schema(
schema=self.schema, store=self._remotes,
)
jsonschema.validate(
instance=self.data,
schema=self.schema,
cls=Validator,
resolver=resolver,
**kwargs
)
def validate_ignoring_errors(self, **kwargs):
try:
self.validate(**kwargs)
except jsonschema.ValidationError:
pass
Split this up in prep.
"""
Python representations of the JSON Schema Test Suite tests.
"""
import json
import os
import re
import subprocess
import sys
from bp.filepath import FilePath
from pyrsistent import pmap
import attr
from jsonschema.compat import PY3
from jsonschema.validators import validators
import jsonschema
def _find_suite():
root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
if root is not None:
return FilePath(root)
root = FilePath(jsonschema.__file__).parent().sibling("json")
if not root.isdir():
raise ValueError(
(
"Can't find the JSON-Schema-Test-Suite directory. "
"Set the 'JSON_SCHEMA_TEST_SUITE' environment "
"variable or run the tests from alongside a checkout "
"of the suite."
),
)
return root
@attr.s(hash=True)
class Suite(object):
_root = attr.ib(default=attr.Factory(_find_suite))
def _remotes(self):
jsonschema_suite = self._root.descendant(["bin", "jsonschema_suite"])
remotes = subprocess.check_output(
[sys.executable, jsonschema_suite.path, "remotes"],
)
return {
"http://localhost:1234/" + name: schema
for name, schema in json.loads(remotes.decode("utf-8")).items()
}
def benchmark(self, runner):
for name in validators:
self.collection(name=name).benchmark(runner=runner)
def collection(self, name):
return Collection(
name=name,
path=self._root.descendant(["tests", name]),
remotes=self._remotes(),
)
@attr.s(hash=True)
class Collection(object):
_path = attr.ib()
_remotes = attr.ib()
name = attr.ib()
def benchmark(self, runner):
for test in self.tests():
runner.bench_func(
name=test.fully_qualified_name,
func=test.validate_ignoring_errors,
)
def tests(self):
return (
test
for child in self._path.globChildren("*.json")
for test in self._tests_in(
subject=child.basename()[:-5],
path=child,
)
)
def format_tests(self):
path = self._path.descendant(["optional", "format"])
return (
test
for child in path.globChildren("*.json")
for test in self._tests_in(
subject=child.basename()[:-5],
path=child,
)
)
def tests_of(self, name):
return self._tests_in(
subject=name,
path=self._path.child(name + ".json"),
)
def optional_tests_of(self, name):
return self._tests_in(
subject=name,
path=self._path.descendant(["optional", name + ".json"]),
)
def _tests_in(self, subject, path):
for each in json.loads(path.getContent().decode("utf-8")):
for test in each["tests"]:
yield _Test(
collection=self,
subject=subject,
case_description=each["description"],
schema=each["schema"],
remotes=self._remotes,
**test
)
@attr.s(hash=True, repr=False)
class _Test(object):
collection = attr.ib()
subject = attr.ib()
case_description = attr.ib()
description = attr.ib()
data = attr.ib()
schema = attr.ib(repr=False)
valid = attr.ib()
_remotes = attr.ib()
def __repr__(self): # pragma: no cover
return "<Test {}>".format(self.fully_qualified_name)
@property
def fully_qualified_name(self): # pragma: no cover
return " > ".join(
[
self.collection.name,
self.subject,
self.case_description,
self.description,
]
)
@property
def method_name(self):
name = "test_%s_%s_%s" % (
self.subject,
re.sub(r"[\W ]+", "_", self.case_description),
re.sub(r"[\W ]+", "_", self.description),
)
if not PY3: # pragma: no cover
name = name.encode("utf-8")
return name
def to_unittest_method(self, **kwargs):
if self.valid:
def fn(this):
self.validate(**kwargs)
else:
def fn(this):
with this.assertRaises(jsonschema.ValidationError):
self.validate(**kwargs)
fn.__name__ = self.method_name
return fn
def validate(self, Validator=None, **kwargs):
resolver = jsonschema.RefResolver.from_schema(
schema=self.schema, store=self._remotes,
)
jsonschema.validate(
instance=self.data,
schema=self.schema,
cls=Validator,
resolver=resolver,
**kwargs
)
def validate_ignoring_errors(self, **kwargs):
try:
self.validate(**kwargs)
except jsonschema.ValidationError:
pass
|
import collections
import os
import h5py
import numpy as np
from shutil import copyfile
from annotypes import add_call_types, TYPE_CHECKING, Anno
from malcolm.core import APartName, Info, PartRegistrar
from malcolm.modules import builtin, scanning, pmac, pandablocks
if TYPE_CHECKING:
from typing import List, Dict
PartInfo = Dict[str, List[Info]]
# Pull re-used annotypes into our namespace in case we are subclassed
APartName = APartName
AMri = builtin.parts.AMri
with Anno("name of CS port"):
ACsPort = str
with Anno("mri suffix of malcolm CS block [$(pmac_mri):$(suffix)]"):
ACsMriSuffix = str
with Anno("mri suffix of malcolm Status block [$(pmac_mri):$(suffix)]"):
AStatusMriSuffix = str
PVar = collections.namedtuple('PVar', 'path file p_number')
def MRES_VAR(axis):
return "P48%02d" % axis
def OFFSET_VAR(axis):
return "P49%02d" % axis
# We will set these attributes on the child block, so don't save them
class KinematicsSavuPart(builtin.parts.ChildPart):
"""Part for writing out files to send to Savu for post processing
of forward kinematics. Creates the following files:
- <ID>-savu.nxs - Input data file for Savu. Links to Panda data, and
datasets which contain the kinematics code and variables.
- <ID>-savu_pl.nxs - Savu process list, copied from /kinematics directory
- <ID>-vds.nxs - VDS file linking to Savu processed data (when processed)
"""
def __init__(self, name, mri, cs_port=None, cs_mri_suffix=":CS", status_mri_suffix=":STATUS"):
# type: (APartName, AMri, ACsPort, ACsMriSuffix, AStatusMriSuffix) -> None
super(KinematicsSavuPart, self).__init__(name, mri, stateful=False)
self.nxs_full_filename = ""
self.vds_full_filename = ""
self.savu_pl_filename = ""
self.savu_full_filename = ""
self.savu_code_lines = []
self.savu_variables = {}
self.q_value_mapping = {}
self.p_vars = []
self.use_min_max = True
self.savu_file = None
self.layout_table = None
self.pos_table = None
self.cs_port = cs_port
self.cs_mri_suffix = cs_mri_suffix
self.status_mri_suffix = status_mri_suffix
self.shape = None
self.pmac_mri = None
self.panda_mri = None
self.axis_numbers = {}
self.generator = None
def setup(self, registrar):
# type: (PartRegistrar) -> None
super(KinematicsSavuPart, self).setup(registrar)
# Tell the controller to expose some extra configure parameters
registrar.report(scanning.hooks.ConfigureHook.create_info(
self.configure))
# Hooks
registrar.hook(scanning.hooks.ConfigureHook, self.configure)
registrar.hook(scanning.hooks.PostConfigureHook, self.post_configure)
# Allow CamelCase as these parameters will be serialized
# noinspection PyPep8Naming
@add_call_types
def configure(self,
context, # type: scanning.hooks.AContext
fileDir, # type: scanning.hooks.AFileDir
generator, # type: scanning.hooks.AGenerator
axesToMove, # type: scanning.hooks.AAxesToMove
part_info, # type: scanning.hooks.APartInfo
fileTemplate="%s.nxs", # type: scanning.hooks.AFileTemplate
):
# type: (...) -> scanning.hooks.UInfos
self.p_vars = []
self.use_min_max = True
self.savu_variables = {}
self.savu_code_lines = []
self.shape = generator.shape
self.q_value_mapping = {}
self.generator = generator
# On initial configure, expect to get the demanded number of frames
child = context.block_view(self.mri)
self.pmac_mri = child.pmac.value
self.panda_mri = child.panda.value
# Derive file path from template
baseTemplate = os.path.splitext(fileTemplate)[0]
# Create the various nexus files to pass to Savu and expected output
fileName = (baseTemplate % "savu") + ".nxs"
vds_fileName = (baseTemplate % "kinematics-vds") + ".nxs"
savu_pl_fileName = (baseTemplate % "savu_pl") + ".nxs"
savu_fileName = (baseTemplate % "savu_processed") + ".nxs"
# This is path to the file to pass to Savu
self.nxs_full_filename = os.path.join(fileDir, fileName)
# This is path to the process list file to pass to Savu
self.savu_pl_filename = os.path.join(fileDir, savu_pl_fileName)
# This is the path to the VDS file which links to the processed Savu
# file with the output datasets
self.vds_full_filename = os.path.join(fileDir, vds_fileName)
# This is the path the the processed file created by Savu after having
# done the processing
savu_rel_path = os.path.join((baseTemplate % "savuproc"), savu_fileName)
self.savu_full_filename = os.path.join(fileDir, savu_rel_path)
# Get the cs port mapping for this PMAC
# {scannable: MotorInfo}
self.layout_table = context.block_view(self.pmac_mri).layout.value
axis_mapping = pmac.util.cs_axis_mapping(
context, self.layout_table, axesToMove
)
if self.cs_port is None:
# All axes will be in the same cs_port so just use the first
for mapping in axis_mapping.values():
self.cs_port = mapping.cs_port
break
# Create the mapping of output q variables to axis names
for mapping in axis_mapping.values():
if mapping.cs_axis in pmac.util.CS_AXIS_NAMES:
q_value = pmac.util.CS_AXIS_NAMES.index(mapping.cs_axis) + 1
self.q_value_mapping[q_value] = mapping.scannable
assert "." in self.nxs_full_filename, \
"File extension for %r should be supplied" % self.nxs_full_filename
self.pos_table = context.block_view(self.panda_mri).positions.value
# Get the axis number for the inverse kinematics mapped in this cs_port
self.axis_numbers = pmac.util.cs_axis_numbers(
context, self.layout_table, self.cs_port
)
produced_datasets = []
print(self.q_value_mapping)
print(self.axis_numbers)
dtypes = ["mean"]
for scannable, axis_num in self.axis_numbers.items():
dataset_i = None
for ind, name in enumerate(self.pos_table.datasetName):
if name == scannable:
pos_type = self.pos_table.capture[ind]
if pos_type == pandablocks.util.PositionCapture.MIN_MAX_MEAN:
dataset_i = ind
elif pos_type == pandablocks.util.PositionCapture.MEAN or\
pos_type == pandablocks.util.PositionCapture.VALUE:
dataset_i = ind
self.use_min_max = False
# Check there was a dataset for the axis
assert dataset_i, "No value dataset for %s" % scannable
if self.use_min_max:
dtypes += ["min", "max"]
for axis in self.q_value_mapping[axis_num + 1].values():
for dtype in dtypes:
PATH='/entry/' + axis + "." + dtype
produced_datasets += [
scanning.infos.DatasetProducedInfo(
name + "." + dtype,
savu_rel_path, info.type,
info.rank, PATH, None
)]
print(produced_datasets)
return produced_datasets
@add_call_types
def post_configure(self, context, part_info):
# type: (scanning.hooks.AContext, scanning.hooks.APartInfo) -> None
# Get the axis number for the inverse kinematics mapped in this cs_port
self.axis_numbers = pmac.util.cs_axis_numbers(
context, self.layout_table, self.cs_port
)
# Map these in the file
dataset_infos = scanning.infos.DatasetProducedInfo.filter_values(
part_info
)
for scannable, axis_num in self.axis_numbers.items():
min_i, max_i, value_i = None, None, None
for info in dataset_infos:
if info.name.startswith(scannable + "."):
if info.type == scanning.infos.DatasetType.POSITION_MIN:
min_i = info
elif info.type == scanning.infos.DatasetType.POSITION_MAX:
max_i = info
elif info.type == scanning.infos.DatasetType.POSITION_VALUE:
value_i = info
# Always make sure .value is there
assert value_i, "No value dataset for %s" % scannable
self.p_vars.append(PVar(
path=value_i.path, file=value_i.filename,
p_number="p%dmean" % axis_num)
)
if min_i and max_i:
self.p_vars.append(
PVar(
path=min_i.path, file=min_i.filename,
p_number="p%dmin" % axis_num
)
)
self.p_vars.append(PVar(path=max_i.path, file=max_i.filename,
p_number="p%dmax" % axis_num))
else:
self.use_min_max = False
# Get Forward Kinematics code lines and I,P,M,Q input variables
pmac_status_child = context.block_view(self.pmac_mri + ":STATUS")
raw_input_vars = " ".join([pmac_status_child.iVariables.value,
pmac_status_child.pVariables.value,
pmac_status_child.mVariables.value])
pmac_cs_child = context.block_view(
self.pmac_mri + ":" + self.cs_mri_suffix)
raw_kinematics_program_code = pmac_cs_child.forwardKinematic.value
raw_input_vars += " " + pmac_cs_child.qVariables.value
self.savu_code_lines = raw_kinematics_program_code.splitlines()
self.parse_input_variables(raw_input_vars)
self.create_files()
def check_mres_and_pos(self, split_var):
# if PandA has MRES/Offset, clear P variable so it isn't applied twice
for scannable, axis_num in self.axis_numbers.items():
posbus_ind = self.pos_table.datasetName.index(scannable)
panda_mres = self.pos_table.scale[posbus_ind]
panda_offset = self.pos_table.offset[posbus_ind]
if split_var[0] == MRES_VAR(axis_num):
if panda_mres != 1.0:
split_var[1] = "1.0"
elif split_var[0] == OFFSET_VAR(axis_num):
if panda_offset != 0.0:
split_var[1] = "0.0"
return split_var
def parse_input_variables(self, raw_input_vars):
try:
for var in raw_input_vars.split(' '):
if var:
split_var = var.split('=')
# ignore any values in hex
if not split_var[1].startswith('$'):
split_var = self.check_mres_and_pos(split_var)
self.savu_variables[split_var[0]] = split_var[1]
except IndexError:
raise ValueError("Error getting kinematic input variables from %s"
% raw_input_vars)
def create_files(self):
""" Create the files that will be used by Savu
- <ID>-savu.nxs - Input data file for Savu. Links to Panda data, and
datasets which contain the kinematics code and variables, and
whether to use min, mean and max datasets, or just the mean.
- <ID>-savu_pl.nxs - Savu process list
- <ID>-vds.nxs - VDS file linking to Savu processed data
"""
# Create the -savu.nxs file which contains the input data for Savu
with h5py.File(self.nxs_full_filename, 'w',
libver="latest") as savu_file:
savu_file.attrs['default'] = 'entry'
nxentry = savu_file.create_group('entry')
nxentry.attrs["NX_class"] = 'NXentry'
nxentry.attrs['default'] = 'inputs'
nxcollection = nxentry.create_group('inputs')
nxcollection.attrs["NX_class"] = 'NXcollection'
# Program code lines dataset
program_dset = nxcollection.create_dataset(
'program', (len(self.savu_code_lines),),
h5py.special_dtype(vlen=str)
)
program_dset[...] = self.savu_code_lines
program_dset.attrs['long_name'] = 'Kinematic Program lines'
# Fixed variables dataset
comp_type = np.dtype(
[('Name', h5py.special_dtype(vlen=str)), ('Value', 'f')]
)
data = np.array(list(self.savu_variables.items()), dtype=comp_type)
variables_dset = nxcollection.create_dataset("variables",
(len(data),),
comp_type)
variables_dset.attrs['long_name'] = 'Fixed program variables'
variables_dset[...] = data
# Use MinMax dataset
minmax_data = np.array([self.use_min_max])
minmax_dset = nxcollection.create_dataset("use_minmax",
data=minmax_data)
minmax_dset.attrs['long_name'] = 'Use min and max dataset'
# Link to external P values
for p_var in self.p_vars:
savu_file[
u"/entry/inputs/" + p_var.p_number] = h5py.ExternalLink(
p_var.file, p_var.path)
# Create Savu plugin list file
src = os.path.realpath(__file__)
src = os.path.dirname(src)
if self.use_min_max:
kinematics_file = "min_mean_max.nxs"
else:
kinematics_file = "only_mean.nxs"
src = os.path.join(src, "..", "kinematics", kinematics_file)
copyfile(src, self.savu_pl_filename)
# Create the finished VDS file which links to the processed Savu data
self.create_vds_file()
def create_vds_file(self):
"""Create the VDS file that points to the processed savu files.
Assumes that savu is called with the argument to specify the location
of the processed data is in a data folder with the suffix '-savuproc'
"""
virtual_shape = (9,) + self.shape
with h5py.File(self.vds_full_filename, 'w', libver='latest') as f:
f.require_group('/entry/')
if self.use_min_max:
datatypes = ['min', 'mean', 'max']
else:
datatypes = ['mean']
for datatype in datatypes:
for i in range(9):
layout = \
h5py.VirtualLayout(shape=self.shape, dtype=np.float)
v_source = h5py.VirtualSource(
self.savu_full_filename,
'/entry/final_result_q%s/data' % datatype,
shape=virtual_shape
)
layout[:] = v_source[i]
# Use axis name if have it, otherwise use raw Q number
if i + 1 in self.q_value_mapping:
f.create_virtual_dataset(
'/entry/' + self.q_value_mapping[i + 1] + datatype,
layout, fillvalue=-1
)
# Add any setpoint dimensions
for dim in self.generator.dimensions:
for axis in dim.axes:
f.create_dataset(
name="/entry/%s_set/%s.value_set" % (axis, axis),
dtype=np.float64,
data=[p for p in dim.get_positions(axis)]
)
fix erroneous dict index
import collections
import os
import h5py
import numpy as np
from shutil import copyfile
from annotypes import add_call_types, TYPE_CHECKING, Anno
from malcolm.core import APartName, Info, PartRegistrar
from malcolm.modules import builtin, scanning, pmac, pandablocks
if TYPE_CHECKING:
from typing import List, Dict
PartInfo = Dict[str, List[Info]]
# Pull re-used annotypes into our namespace in case we are subclassed
APartName = APartName
AMri = builtin.parts.AMri
with Anno("name of CS port"):
ACsPort = str
with Anno("mri suffix of malcolm CS block [$(pmac_mri):$(suffix)]"):
ACsMriSuffix = str
with Anno("mri suffix of malcolm Status block [$(pmac_mri):$(suffix)]"):
AStatusMriSuffix = str
PVar = collections.namedtuple('PVar', 'path file p_number')
def MRES_VAR(axis):
return "P48%02d" % axis
def OFFSET_VAR(axis):
return "P49%02d" % axis
# We will set these attributes on the child block, so don't save them
class KinematicsSavuPart(builtin.parts.ChildPart):
"""Part for writing out files to send to Savu for post processing
of forward kinematics. Creates the following files:
- <ID>-savu.nxs - Input data file for Savu. Links to Panda data, and
datasets which contain the kinematics code and variables.
- <ID>-savu_pl.nxs - Savu process list, copied from /kinematics directory
- <ID>-vds.nxs - VDS file linking to Savu processed data (when processed)
"""
def __init__(self, name, mri, cs_port=None, cs_mri_suffix=":CS", status_mri_suffix=":STATUS"):
# type: (APartName, AMri, ACsPort, ACsMriSuffix, AStatusMriSuffix) -> None
super(KinematicsSavuPart, self).__init__(name, mri, stateful=False)
self.nxs_full_filename = ""
self.vds_full_filename = ""
self.savu_pl_filename = ""
self.savu_full_filename = ""
self.savu_code_lines = []
self.savu_variables = {}
self.q_value_mapping = {}
self.p_vars = []
self.use_min_max = True
self.savu_file = None
self.layout_table = None
self.pos_table = None
self.cs_port = cs_port
self.cs_mri_suffix = cs_mri_suffix
self.status_mri_suffix = status_mri_suffix
self.shape = None
self.pmac_mri = None
self.panda_mri = None
self.axis_numbers = {}
self.generator = None
def setup(self, registrar):
# type: (PartRegistrar) -> None
super(KinematicsSavuPart, self).setup(registrar)
# Tell the controller to expose some extra configure parameters
registrar.report(scanning.hooks.ConfigureHook.create_info(
self.configure))
# Hooks
registrar.hook(scanning.hooks.ConfigureHook, self.configure)
registrar.hook(scanning.hooks.PostConfigureHook, self.post_configure)
# Allow CamelCase as these parameters will be serialized
# noinspection PyPep8Naming
@add_call_types
def configure(self,
context, # type: scanning.hooks.AContext
fileDir, # type: scanning.hooks.AFileDir
generator, # type: scanning.hooks.AGenerator
axesToMove, # type: scanning.hooks.AAxesToMove
part_info, # type: scanning.hooks.APartInfo
fileTemplate="%s.nxs", # type: scanning.hooks.AFileTemplate
):
# type: (...) -> scanning.hooks.UInfos
self.p_vars = []
self.use_min_max = True
self.savu_variables = {}
self.savu_code_lines = []
self.shape = generator.shape
self.q_value_mapping = {}
self.generator = generator
# On initial configure, expect to get the demanded number of frames
child = context.block_view(self.mri)
self.pmac_mri = child.pmac.value
self.panda_mri = child.panda.value
# Derive file path from template
baseTemplate = os.path.splitext(fileTemplate)[0]
# Create the various nexus files to pass to Savu and expected output
fileName = (baseTemplate % "savu") + ".nxs"
vds_fileName = (baseTemplate % "kinematics-vds") + ".nxs"
savu_pl_fileName = (baseTemplate % "savu_pl") + ".nxs"
savu_fileName = (baseTemplate % "savu_processed") + ".nxs"
# This is path to the file to pass to Savu
self.nxs_full_filename = os.path.join(fileDir, fileName)
# This is path to the process list file to pass to Savu
self.savu_pl_filename = os.path.join(fileDir, savu_pl_fileName)
# This is the path to the VDS file which links to the processed Savu
# file with the output datasets
self.vds_full_filename = os.path.join(fileDir, vds_fileName)
# This is the path the the processed file created by Savu after having
# done the processing
savu_rel_path = os.path.join((baseTemplate % "savuproc"), savu_fileName)
self.savu_full_filename = os.path.join(fileDir, savu_rel_path)
# Get the cs port mapping for this PMAC
# {scannable: MotorInfo}
self.layout_table = context.block_view(self.pmac_mri).layout.value
axis_mapping = pmac.util.cs_axis_mapping(
context, self.layout_table, axesToMove
)
if self.cs_port is None:
# All axes will be in the same cs_port so just use the first
for mapping in axis_mapping.values():
self.cs_port = mapping.cs_port
break
# Create the mapping of output q variables to axis names
for mapping in axis_mapping.values():
if mapping.cs_axis in pmac.util.CS_AXIS_NAMES:
q_value = pmac.util.CS_AXIS_NAMES.index(mapping.cs_axis) + 1
self.q_value_mapping[q_value] = mapping.scannable
assert "." in self.nxs_full_filename, \
"File extension for %r should be supplied" % self.nxs_full_filename
self.pos_table = context.block_view(self.panda_mri).positions.value
# Get the axis number for the inverse kinematics mapped in this cs_port
self.axis_numbers = pmac.util.cs_axis_numbers(
context, self.layout_table, self.cs_port
)
produced_datasets = []
print(self.q_value_mapping)
print(self.axis_numbers)
dtypes = ["mean"]
for scannable, axis_num in self.axis_numbers.items():
dataset_i = None
for ind, name in enumerate(self.pos_table.datasetName):
if name == scannable:
pos_type = self.pos_table.capture[ind]
if pos_type == pandablocks.util.PositionCapture.MIN_MAX_MEAN:
dataset_i = ind
elif pos_type == pandablocks.util.PositionCapture.MEAN or\
pos_type == pandablocks.util.PositionCapture.VALUE:
dataset_i = ind
self.use_min_max = False
# Check there was a dataset for the axis
assert dataset_i, "No value dataset for %s" % scannable
if self.use_min_max:
dtypes += ["min", "max"]
for axis in self.q_value_mapping.values():
for dtype in dtypes:
PATH='/entry/' + axis + "." + dtype
produced_datasets += [
scanning.infos.DatasetProducedInfo(
name + "." + dtype,
savu_rel_path, info.type,
info.rank, PATH, None
)]
print(produced_datasets)
return produced_datasets
@add_call_types
def post_configure(self, context, part_info):
# type: (scanning.hooks.AContext, scanning.hooks.APartInfo) -> None
# Get the axis number for the inverse kinematics mapped in this cs_port
self.axis_numbers = pmac.util.cs_axis_numbers(
context, self.layout_table, self.cs_port
)
# Map these in the file
dataset_infos = scanning.infos.DatasetProducedInfo.filter_values(
part_info
)
for scannable, axis_num in self.axis_numbers.items():
min_i, max_i, value_i = None, None, None
for info in dataset_infos:
if info.name.startswith(scannable + "."):
if info.type == scanning.infos.DatasetType.POSITION_MIN:
min_i = info
elif info.type == scanning.infos.DatasetType.POSITION_MAX:
max_i = info
elif info.type == scanning.infos.DatasetType.POSITION_VALUE:
value_i = info
# Always make sure .value is there
assert value_i, "No value dataset for %s" % scannable
self.p_vars.append(PVar(
path=value_i.path, file=value_i.filename,
p_number="p%dmean" % axis_num)
)
if min_i and max_i:
self.p_vars.append(
PVar(
path=min_i.path, file=min_i.filename,
p_number="p%dmin" % axis_num
)
)
self.p_vars.append(PVar(path=max_i.path, file=max_i.filename,
p_number="p%dmax" % axis_num))
else:
self.use_min_max = False
# Get Forward Kinematics code lines and I,P,M,Q input variables
pmac_status_child = context.block_view(self.pmac_mri + ":STATUS")
raw_input_vars = " ".join([pmac_status_child.iVariables.value,
pmac_status_child.pVariables.value,
pmac_status_child.mVariables.value])
pmac_cs_child = context.block_view(
self.pmac_mri + ":" + self.cs_mri_suffix)
raw_kinematics_program_code = pmac_cs_child.forwardKinematic.value
raw_input_vars += " " + pmac_cs_child.qVariables.value
self.savu_code_lines = raw_kinematics_program_code.splitlines()
self.parse_input_variables(raw_input_vars)
self.create_files()
def check_mres_and_pos(self, split_var):
# if PandA has MRES/Offset, clear P variable so it isn't applied twice
for scannable, axis_num in self.axis_numbers.items():
posbus_ind = self.pos_table.datasetName.index(scannable)
panda_mres = self.pos_table.scale[posbus_ind]
panda_offset = self.pos_table.offset[posbus_ind]
if split_var[0] == MRES_VAR(axis_num):
if panda_mres != 1.0:
split_var[1] = "1.0"
elif split_var[0] == OFFSET_VAR(axis_num):
if panda_offset != 0.0:
split_var[1] = "0.0"
return split_var
def parse_input_variables(self, raw_input_vars):
try:
for var in raw_input_vars.split(' '):
if var:
split_var = var.split('=')
# ignore any values in hex
if not split_var[1].startswith('$'):
split_var = self.check_mres_and_pos(split_var)
self.savu_variables[split_var[0]] = split_var[1]
except IndexError:
raise ValueError("Error getting kinematic input variables from %s"
% raw_input_vars)
def create_files(self):
""" Create the files that will be used by Savu
- <ID>-savu.nxs - Input data file for Savu. Links to Panda data, and
datasets which contain the kinematics code and variables, and
whether to use min, mean and max datasets, or just the mean.
- <ID>-savu_pl.nxs - Savu process list
- <ID>-vds.nxs - VDS file linking to Savu processed data
"""
# Create the -savu.nxs file which contains the input data for Savu
with h5py.File(self.nxs_full_filename, 'w',
libver="latest") as savu_file:
savu_file.attrs['default'] = 'entry'
nxentry = savu_file.create_group('entry')
nxentry.attrs["NX_class"] = 'NXentry'
nxentry.attrs['default'] = 'inputs'
nxcollection = nxentry.create_group('inputs')
nxcollection.attrs["NX_class"] = 'NXcollection'
# Program code lines dataset
program_dset = nxcollection.create_dataset(
'program', (len(self.savu_code_lines),),
h5py.special_dtype(vlen=str)
)
program_dset[...] = self.savu_code_lines
program_dset.attrs['long_name'] = 'Kinematic Program lines'
# Fixed variables dataset
comp_type = np.dtype(
[('Name', h5py.special_dtype(vlen=str)), ('Value', 'f')]
)
data = np.array(list(self.savu_variables.items()), dtype=comp_type)
variables_dset = nxcollection.create_dataset("variables",
(len(data),),
comp_type)
variables_dset.attrs['long_name'] = 'Fixed program variables'
variables_dset[...] = data
# Use MinMax dataset
minmax_data = np.array([self.use_min_max])
minmax_dset = nxcollection.create_dataset("use_minmax",
data=minmax_data)
minmax_dset.attrs['long_name'] = 'Use min and max dataset'
# Link to external P values
for p_var in self.p_vars:
savu_file[
u"/entry/inputs/" + p_var.p_number] = h5py.ExternalLink(
p_var.file, p_var.path)
# Create Savu plugin list file
src = os.path.realpath(__file__)
src = os.path.dirname(src)
if self.use_min_max:
kinematics_file = "min_mean_max.nxs"
else:
kinematics_file = "only_mean.nxs"
src = os.path.join(src, "..", "kinematics", kinematics_file)
copyfile(src, self.savu_pl_filename)
# Create the finished VDS file which links to the processed Savu data
self.create_vds_file()
def create_vds_file(self):
"""Create the VDS file that points to the processed savu files.
Assumes that savu is called with the argument to specify the location
of the processed data is in a data folder with the suffix '-savuproc'
"""
virtual_shape = (9,) + self.shape
with h5py.File(self.vds_full_filename, 'w', libver='latest') as f:
f.require_group('/entry/')
if self.use_min_max:
datatypes = ['min', 'mean', 'max']
else:
datatypes = ['mean']
for datatype in datatypes:
for i in range(9):
layout = \
h5py.VirtualLayout(shape=self.shape, dtype=np.float)
v_source = h5py.VirtualSource(
self.savu_full_filename,
'/entry/final_result_q%s/data' % datatype,
shape=virtual_shape
)
layout[:] = v_source[i]
# Use axis name if have it, otherwise use raw Q number
if i + 1 in self.q_value_mapping:
f.create_virtual_dataset(
'/entry/' + self.q_value_mapping[i + 1] + datatype,
layout, fillvalue=-1
)
# Add any setpoint dimensions
for dim in self.generator.dimensions:
for axis in dim.axes:
f.create_dataset(
name="/entry/%s_set/%s.value_set" % (axis, axis),
dtype=np.float64,
data=[p for p in dim.get_positions(axis)]
)
|
# The MIT License (MIT)
#
# Copyright (c) 2015 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Erik Hvatum <ice.rikh@gmail.com>
import datetime
import numpy
from PyQt5 import Qt
from string import Template
import textwrap
import time
from .basic_image import BasicImage
class _Property(property):
# Derived from "property" for the sole reason that IPython's question-mark magic is special-cased for
# properties. Deriving from property causes _Property to receive the same treatment, providing
# useful output for something.prop? in IPython (where prop is a _Property instance).
def __init__(self, properties, name, default_value_callback, transform_callback=None, pre_set_callback=None, post_set_callback=None, doc=None):
self.name = name
self.var_name = '_' + name
self.default_val_var_name = '_default_' + name
self.changed_signal_name = name + '_changed'
self.default_value_callback = default_value_callback
self.transform_callback = transform_callback
self.pre_set_callback = pre_set_callback
self.post_set_callback = post_set_callback
if doc is not None:
self.__doc__ = doc
properties.append(self)
@staticmethod
def eq(a, b):
r = a == b
if isinstance(r, bool):
return r
else:
return all(r)
def instantiate(self, image):
setattr(image, self.default_val_var_name, self.default_value_callback(image))
getattr(image, self.changed_signal_name).connect(image.changed)
def update_default(self, image):
if hasattr(image, self.var_name):
# An explicitly set value is overriding the default, so even if the default has changed, the apparent value of the property has not
setattr(image, self.default_val_var_name, self.default_value_callback(image))
else:
# The default value is the apparent value, meaning that we must check if the default has changed and signal an apparent value change
# if it has
old_default = getattr(image, self.default_val_var_name)
new_default = self.default_value_callback(image)
if not self.eq(new_default, old_default):
setattr(image, self.default_val_var_name, new_default)
getattr(image, self.changed_signal_name).emit(image)
def __get__(self, image, _=None):
if image is None:
return self
try:
return getattr(image, self.var_name)
except AttributeError:
return getattr(image, self.default_val_var_name)
def __set__(self, image, v):
if self.transform_callback is not None:
v = self.transform_callback(image, v)
if not hasattr(image, self.var_name) or not self.eq(v, getattr(image, self.var_name)):
if self.pre_set_callback is not None:
self.pre_set_callback(image, v)
setattr(image, self.var_name, v)
if self.post_set_callback is not None:
self.post_set_callback(image, v)
getattr(image, self.changed_signal_name).emit(image)
def __delete__(self, image):
"""Reset to default value by way of removing the explicitly set override, causing the apparent value to be default."""
try:
old_value = getattr(image, self.var_name)
delattr(image, self.var_name)
new_value = getattr(image, self.default_val_var_name)
if not self.eq(old_value, new_value):
if self.post_set_callback is not None:
self.post_set_callback(image, new_value)
getattr(image, self.changed_signal_name).emit(image)
except AttributeError:
# Property was already using default value
pass
class Image(BasicImage, Qt.QObject):
"""BasicImage's properties are all either computed from that ndarray, provide views into that ndarray's data (in the case of .data
and .data_T), or, in the special cases of .is_twelve_bit for uint16 images and .range for floating-point images, represent unenforced
constraints limiting the domain of valid values that are expected to be assumed by elements of the ndarray.
Image adds properties such as min/max/gamma scaling that control presentation of the image data contained by BasicImage, which
is a base class of Image.
In summary,
BasicImage: raw image data and essential information for interpreting that data in any context
Image: BasicImage + presentation data and metadata for RisWidget such as rescaling min/max/gamma values and an informative name
The changed signal is emitted when any property impacting image presentation is modified or image data is explicitly changed or refreshed.
In the case where any image appearence change should cause a function to be executed, do changed.connect(your_function) rather than
min_changed.connect(your_function); max_changed.connect(your_function); etc.
Although Image uses _Property descriptors, subclasses adding properties are not obligated
to use _Property to represent the additional properties. The regular @property decorator syntax or property(..) builtin
remain available - _Property provides an abstraction that is potentially convenient and worth understanding and using when
defining a large number of properties."""
GAMMA_RANGE = (0.0625, 16.0)
IMAGE_TYPE_TO_GETCOLOR_EXPRESSION = {
'G' : 'vec4(s.rrr, 1.0f)',
'Ga' : 'vec4(s.rrr, s.g)',
'rgb' : 'vec4(s.rgb, 1.0f)',
'rgba': 's'}
DEFAULT_TRANSFORM_SECTION = 'out_.rgb = pow((in_.rgb - rescale_min) / (rescale_range), gamma); out_.rgba *= tint;'
# Blend functions adapted from http://dev.w3.org/SVG/modules/compositing/master/
BLEND_FUNCTIONS = {
'src' : ('dca = sca;',
'da = s.a;'),
'src-over' : ('dca = sca + dca * (1.0f - s.a);',
'da = s.a + da - s.a * da;'),
'dst-over' : ('dca = dca + sca * (1.0f - da);',
'da = s.a + da - s.a * da;'),
'plus' : ('dca += sca;',
'da += s.a;'),
'multiply' : ('dca = sca * dca + sca * (1.0f - da) + dca * (1.0f - s.a);',
'da = s.a + da - s.a * da;'),
'screen' : ('dca = sca + dca - sca * dca;',
'da = s.a + da - s.a * da;'),
'overlay' : ('isa = 1.0f - s.a; osa = 1.0f + s.a;',
'ida = 1.0f - da; oda = 1.0f + da;',
'sada = s.a * da;',
'for(i = 0; i < 3; ++i){',
' dca[i] = (dca[i] + dca[i] <= da) ?',
' (sca[i] + sca[i]) * dca[i] + sca[i] * ida + dca[i] * isa :',
' sca[i] * oda + dca[i] * osa - (dca[i] + dca[i]) * sca[i] - sada;}',
'da = s.a + da - sada;'),
'difference':('dca = (sca * da + dca * s.a - (sca + sca) * dca) + sca * (1.0f - da) + dca * (1.0f - s.a);',
'da = s.a + da - s.a * da;')}
for k, v in BLEND_FUNCTIONS.items():
BLEND_FUNCTIONS[k] = ' // blending function name: {}\n '.format(k) + '\n '.join(v)
del k, v
# A call to .set_data or a change to any mutable property potentially impacts image presentation. For convenience, changed is emitted whenever
# .set_data or .refresh is called or any of the more specific mutable-property-changed signals are emitted.
#
# For example, this single call supports extensibility by subclassing:
# image_instance.changed.connect(something.refresh)
# And that single call replaces the following set of calls, which is not even complete if Image is subclassed:
# image_instance.name_changed.connect(something.refresh)
# image_instance.data_changed.connect(something.refresh)
# image_instance.min_changed.connect(something.refresh)
# image_instance.max_changed.connect(something.refresh)
# image_instance.gamma_changed.connect(something.refresh)
# image_instance.trilinear_filtering_enabled_changed.connect(something.refresh)
# image_instance.auto_getcolor_expression_enabled_changed.connect(something.refresh)
# image_instance.getcolor_expression_changed.connect(something.refresh)
# image_instance.extra_transformation_expression_changed.connect(something.refresh)
# image_instance.global_alpha_changed.connect(something.refresh)
# image_instance.mute_enabled_changed.connect(something.refresh)
#
# In the __init__ function of any Image subclass that adds presentation-affecting properties
# and associated change notification signals, do not forget to connect the subclass's change signals to changed.
changed = Qt.pyqtSignal(object)
data_changed = Qt.pyqtSignal(object)
def __init__(self, data, is_twelve_bit=False, float_range=None, shape_is_width_height=True, name=None, parent=None):
Qt.QObject.__init__(self, parent)
BasicImage.set_data(self, data, is_twelve_bit, float_range, shape_is_width_height)
self._retain_auto_min_max_enabled_on_min_max_change = False
for property in self.properties:
property.instantiate(self)
if name:
self.setObjectName(name)
if self.auto_min_max_enabled:
self.do_auto_min_max()
self._blend_function_impl = self.BLEND_FUNCTIONS[self.blend_function]
self.objectNameChanged.connect(lambda: self.name_changed.emit(self))
self.name_changed.connect(self.changed)
self.data_changed.connect(self.changed)
def set_data(self, data, is_twelve_bit=False, float_range=None, shape_is_width_height=True, keep_name=True, name=None):
"""If keep_name is True, the existing name is not changed, and the value supplied for the name argument is ignored.
If keep_name is False, the existing name is replaced with the supplied name or is cleared if supplied name is None
or an empty string."""
BasicImage.set_data(self, data, is_twelve_bit, float_range, shape_is_width_height)
if not keep_name:
self.name = name
for property in self.properties:
property.update_default(self)
if self.auto_min_max_enabled:
self.do_auto_min_max()
self.data_changed.emit(self)
def refresh(self):
BasicImage.refresh(self)
if self.auto_min_max_enabled:
self.do_auto_min_max()
self.data_changed.emit(self)
def generate_contextual_info_for_pos(self, x, y, idx=None):
if not self.visible:
return
sz = self.size
if 0 <= x < sz.width() and 0 <= y < sz.height():
type_ = self.type
num_channels = self.num_channels
name = self.name
mst = '' if idx is None else '{: 3}, '.format(idx)
if name:
mst += '"' + name + '", '
mst+= 'x:{} y:{} '.format(x, y)
vt = '(' + ' '.join((c + ':{}' for c in self.type)) + ')'
if num_channels == 1:
vt = vt.format(self.data[x, y])
else:
vt = vt.format(*self.data[x, y])
return mst+vt
properties = []
visible = _Property(
properties, 'visible',
doc = textwrap.dedent(
"""\
Generally, a non-visible image is not visible in the "main view" but does remain visible in specialized views,
such as the histogram view and image stack table widget.
In more detail:
If an Image's visible property is False, that Image does not contribute to mixed output. For example,
any single pixel in an ImageStackItem rendering may represent the result of blending a number of Images,
whereas only one Image at a time may be associated with a HistogramItem; no HistogramItem pixel in the
rendering of a HistogramItem is a function of more than one Image. Therefore, a non-visible Image that is part
of a SignalingList that is associated with an ImageStackItem will not be visible in the output of that
ImageStackItem's render function, although the histogram of the Image will still be visible in the output
of the render function of a HistogramItem associated with the Image."""),
default_value_callback = lambda image: True,
transform_callback = lambda image, v: bool(v))
def _auto_min_max_enabled_post_set(self, v):
if v:
self.do_auto_min_max()
auto_min_max_enabled = _Property(
properties, 'auto_min_max_enabled',
default_value_callback = lambda image: False,
transform_callback = lambda image, v: bool(v),
post_set_callback = _auto_min_max_enabled_post_set)
def _min_max_pre_set(self, v):
r = self.range
if not r[0] <= v <= r[1]:
raise ValueError('min/max values for this image must be in the closed interval [{}, {}].'.format(*r))
def _min_max_post_set(self, v, is_max):
if is_max:
if v < self.min:
self.min = v
else:
if v > self.max:
self.max = v
if not self._retain_auto_min_max_enabled_on_min_max_change:
self.auto_min_max_enabled = False
min = _Property(
properties, 'min',
default_value_callback = lambda image: float(image.range[0]),
transform_callback = lambda image, v: float(v),
pre_set_callback = _min_max_pre_set,
post_set_callback = lambda image, v, f=_min_max_post_set: f(image, v, False))
max = _Property(
properties, 'max',
default_value_callback = lambda image: float(image.range[1]),
transform_callback = lambda image, v: float(v),
pre_set_callback = _min_max_pre_set,
post_set_callback = lambda image, v, f=_min_max_post_set: f(image, v, True))
def _gamma_pre_set(self, v):
r = self.GAMMA_RANGE
if not r[0] <= v <= r[1]:
raise ValueError('gamma value must be in the closed interval [{}, {}].'.format(*r))
gamma = _Property(
properties, 'gamma',
default_value_callback = lambda image: 1.0,
transform_callback = lambda image, v: float(v),
pre_set_callback = _gamma_pre_set)
trilinear_filtering_enabled = _Property(
properties, 'trilinear_filtering_enabled',
default_value_callback = lambda image: True,
transform_callback = lambda image, v: bool(v))
SHAD_PROP_HELP = textwrap.dedent("""\
The GLSL fragment shader used to render an ImageStackItem is generated by iterating through ImageStackItem.image_stack,
replacing the ${values} in the following template with with those of the Image (or
ImageStackItem.BLEND_FUNCTIONS[Image.blend_function] in the case of ${blend_function}) at each iteration and
appending the resulting text to a string. The accumulated string is the GLSL fragment shader's source code.
// image_stack[${idx}]
s = texture2D(tex_${idx}, tex_coord);
${getcolor_channel_mapping_expression};
s = ${getcolor_expression};
sa = clamp(s.a, 0, 1) * global_alpha_${idx};
sc = min_max_gamma_transform(s.rgb, rescale_min_${idx}, rescale_range_${idx}, gamma_${idx});
${extra_transformation_expression}; // extra_transformation_expression
sca = sc * sa;
${blend_function}
da = clamp(da, 0, 1);
dca = clamp(dca, 0, 1);
So, the value stored in an Image's .getcolor_expression property replaces ${getcolor_expression}. Supplying
None or an empty string would create a GLSL syntax error that must be rectified before an ImageStackItem
containing the Image in question can be successfully rendered (unless the Image's .visible property is False).
In order to revert .getcolor_expression to something appropriate for the Image's .type, simply assign True
to that Image's .auto_getcolor_expression_enabled property (likewise, replacing the contents of
.getcolor_expression causes .auto_getcolor_expression_enabled to assume the value False).
Unlike .getcolor_expression, .extra_transformation_expression does accept None or an empty string. Supplying
either results in a GLSL line consisting of "; // extra_transformation_expression", which compiles to nothing,
with the effect that None or an empty string result in no extra transformation expression being applied (this
is the default).""")
getcolor_expression = _Property(
properties, 'getcolor_expression',
default_value_callback = lambda image: image.IMAGE_TYPE_TO_GETCOLOR_EXPRESSION[image.type],
transform_callback = lambda image, v: '' if v is None else str(v),
doc = SHAD_PROP_HELP)
def _tint_transform(self, v):
v = list(map(float, v))
if len(v) not in (3,4) or not all(map(lambda v_: 0 <= v_ <= 1, v)):
raise ValueError('The iteraterable assigned to .tint must represent 3 or 4 real numbers in the interval [0, 1].')
if len(v) == 3:
v.append(1)
return v
tint = _Property(
properties, 'tint',
default_value_callback = lambda image: numpy.array((1,1,1,1), dtype=numpy.float32),
transform_callback = lambda image, v, f=_tint_transform: f(image, v),
doc = textwrap.dedent("""\
I.tint: This property is used by the default I.transform_section, and with that default, has
the following meaning: I.tint contains 0-1 normalized RGBA component values by which the results
of applying I.getcolor_expression are scaled."""))
transform_section = _Property(
properties, 'transform_section',
default_value_callback = lambda image: image.DEFAULT_TRANSFORM_SECTION,
transform_callback = lambda image, v: '' if v is None else str(v))
def _blend_function_pre_set(self, v):
if v not in self.BLEND_FUNCTIONS:
raise ValueError('The string assigned to blend_function must be one of:\n' + '\n'.join("'" + s + "'" for s in sorted(self.BLEND_FUNCTIONS.keys())))
blend_function = _Property(
properties, 'blend_function',
default_value_callback = lambda image: 'screen',
transform_callback = lambda image, v: str(v),
pre_set_callback = lambda image, v, f=_blend_function_pre_set: f(image, v),
doc = SHAD_PROP_HELP + '\n\nSupported blend_functions:\n\n ' + '\n '.join("'" + s + "'" for s in sorted(BLEND_FUNCTIONS.keys())))
for property in properties:
exec(property.changed_signal_name + ' = Qt.pyqtSignal(object)')
del property
del SHAD_PROP_HELP
# NB: This a property, not a _Property. There is already a change signal, setter, and a getter for objectName, which
# we proxy/use.
name_changed = Qt.pyqtSignal(object)
name = property(
Qt.QObject.objectName,
lambda self, name: self.setObjectName('' if name is None else name),
doc='Property proxy for QObject::objectName Qt property, which is directly accessible via the objectName getter and '
'setObjectName setter. Upon change, objectNameChanged is emitted.')
def __repr__(self):
name = self.name
return '{}, {}{}>'.format(
super().__repr__()[:-1],
'with name "{}"'.format(name) if name else 'unnamed',
', visible=False' if not self.visible else '')
def do_auto_min_max(self):
self._retain_auto_min_max_enabled_on_min_max_change = True
try:
extremae = self.extremae
if self.has_alpha_channel:
eae = extremae[:-1, 0].min(), extremae[:-1, 1].max()
elif self.num_channels > 1:
eae = extremae[:, 0].min(), extremae[:, 1].max()
else:
eae = extremae
self.min, self.max = eae
finally:
self._retain_auto_min_max_enabled_on_min_max_change = False
def do_auto_getcolor_expression(self):
self._retain_auto_getcolor_expression_enabled_on_getcolor_expression_change = True
try:
self.getcolor_expression = self.IMAGE_TYPE_TO_GETCOLOR_EXPRESSION[self.type]
finally:
self._retain_auto_getcolor_expression_enabled_on_getcolor_expression_change = False
Add Image property copying function.
# The MIT License (MIT)
#
# Copyright (c) 2015 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Erik Hvatum <ice.rikh@gmail.com>
import datetime
import numpy
from PyQt5 import Qt
from string import Template
import textwrap
import time
from .basic_image import BasicImage
class _Property(property):
# Derived from "property" for the sole reason that IPython's question-mark magic is special-cased for
# properties. Deriving from property causes _Property to receive the same treatment, providing
# useful output for something.prop? in IPython (where prop is a _Property instance).
def __init__(self, properties, name, default_value_callback, transform_callback=None, pre_set_callback=None, post_set_callback=None, doc=None):
self.name = name
self.var_name = '_' + name
self.default_val_var_name = '_default_' + name
self.changed_signal_name = name + '_changed'
self.default_value_callback = default_value_callback
self.transform_callback = transform_callback
self.pre_set_callback = pre_set_callback
self.post_set_callback = post_set_callback
if doc is not None:
self.__doc__ = doc
properties.append(self)
@staticmethod
def eq(a, b):
r = a == b
if isinstance(r, bool):
return r
else:
return all(r)
def instantiate(self, image):
setattr(image, self.default_val_var_name, self.default_value_callback(image))
getattr(image, self.changed_signal_name).connect(image.changed)
def update_default(self, image):
if hasattr(image, self.var_name):
# An explicitly set value is overriding the default, so even if the default has changed, the apparent value of the property has not
setattr(image, self.default_val_var_name, self.default_value_callback(image))
else:
# The default value is the apparent value, meaning that we must check if the default has changed and signal an apparent value change
# if it has
old_default = getattr(image, self.default_val_var_name)
new_default = self.default_value_callback(image)
if not self.eq(new_default, old_default):
setattr(image, self.default_val_var_name, new_default)
getattr(image, self.changed_signal_name).emit(image)
def copy_instance_value(self, src_image, dst_image):
"""Replace value for this property in dst_image if src_image has a non-default value
for this property."""
try:
v = getattr(src_image, self.var_name)
except AttributeError:
return
setattr(dst_image, self.var_name, v)
def __get__(self, image, _=None):
if image is None:
return self
try:
return getattr(image, self.var_name)
except AttributeError:
return getattr(image, self.default_val_var_name)
def __set__(self, image, v):
if self.transform_callback is not None:
v = self.transform_callback(image, v)
if not hasattr(image, self.var_name) or not self.eq(v, getattr(image, self.var_name)):
if self.pre_set_callback is not None:
self.pre_set_callback(image, v)
setattr(image, self.var_name, v)
if self.post_set_callback is not None:
self.post_set_callback(image, v)
getattr(image, self.changed_signal_name).emit(image)
def __delete__(self, image):
"""Reset to default value by way of removing the explicitly set override, causing the apparent value to be default."""
try:
old_value = getattr(image, self.var_name)
delattr(image, self.var_name)
new_value = getattr(image, self.default_val_var_name)
if not self.eq(old_value, new_value):
if self.post_set_callback is not None:
self.post_set_callback(image, new_value)
getattr(image, self.changed_signal_name).emit(image)
except AttributeError:
# Property was already using default value
pass
class Image(BasicImage, Qt.QObject):
"""BasicImage's properties are all either computed from that ndarray, provide views into that ndarray's data (in the case of .data
and .data_T), or, in the special cases of .is_twelve_bit for uint16 images and .range for floating-point images, represent unenforced
constraints limiting the domain of valid values that are expected to be assumed by elements of the ndarray.
Image adds properties such as min/max/gamma scaling that control presentation of the image data contained by BasicImage, which
is a base class of Image.
In summary,
BasicImage: raw image data and essential information for interpreting that data in any context
Image: BasicImage + presentation data and metadata for RisWidget such as rescaling min/max/gamma values and an informative name
The changed signal is emitted when any property impacting image presentation is modified or image data is explicitly changed or refreshed.
In the case where any image appearence change should cause a function to be executed, do changed.connect(your_function) rather than
min_changed.connect(your_function); max_changed.connect(your_function); etc.
Although Image uses _Property descriptors, subclasses adding properties are not obligated
to use _Property to represent the additional properties. The regular @property decorator syntax or property(..) builtin
remain available - _Property provides an abstraction that is potentially convenient and worth understanding and using when
defining a large number of properties."""
GAMMA_RANGE = (0.0625, 16.0)
IMAGE_TYPE_TO_GETCOLOR_EXPRESSION = {
'G' : 'vec4(s.rrr, 1.0f)',
'Ga' : 'vec4(s.rrr, s.g)',
'rgb' : 'vec4(s.rgb, 1.0f)',
'rgba': 's'}
DEFAULT_TRANSFORM_SECTION = 'out_.rgb = pow((in_.rgb - rescale_min) / (rescale_range), gamma); out_.rgba *= tint;'
# Blend functions adapted from http://dev.w3.org/SVG/modules/compositing/master/
BLEND_FUNCTIONS = {
'src' : ('dca = sca;',
'da = s.a;'),
'src-over' : ('dca = sca + dca * (1.0f - s.a);',
'da = s.a + da - s.a * da;'),
'dst-over' : ('dca = dca + sca * (1.0f - da);',
'da = s.a + da - s.a * da;'),
'plus' : ('dca += sca;',
'da += s.a;'),
'multiply' : ('dca = sca * dca + sca * (1.0f - da) + dca * (1.0f - s.a);',
'da = s.a + da - s.a * da;'),
'screen' : ('dca = sca + dca - sca * dca;',
'da = s.a + da - s.a * da;'),
'overlay' : ('isa = 1.0f - s.a; osa = 1.0f + s.a;',
'ida = 1.0f - da; oda = 1.0f + da;',
'sada = s.a * da;',
'for(i = 0; i < 3; ++i){',
' dca[i] = (dca[i] + dca[i] <= da) ?',
' (sca[i] + sca[i]) * dca[i] + sca[i] * ida + dca[i] * isa :',
' sca[i] * oda + dca[i] * osa - (dca[i] + dca[i]) * sca[i] - sada;}',
'da = s.a + da - sada;'),
'difference':('dca = (sca * da + dca * s.a - (sca + sca) * dca) + sca * (1.0f - da) + dca * (1.0f - s.a);',
'da = s.a + da - s.a * da;')}
for k, v in BLEND_FUNCTIONS.items():
BLEND_FUNCTIONS[k] = ' // blending function name: {}\n '.format(k) + '\n '.join(v)
del k, v
# A call to .set_data or a change to any mutable property potentially impacts image presentation. For convenience, changed is emitted whenever
# .set_data or .refresh is called or any of the more specific mutable-property-changed signals are emitted.
#
# For example, this single call supports extensibility by subclassing:
# image_instance.changed.connect(something.refresh)
# And that single call replaces the following set of calls, which is not even complete if Image is subclassed:
# image_instance.name_changed.connect(something.refresh)
# image_instance.data_changed.connect(something.refresh)
# image_instance.min_changed.connect(something.refresh)
# image_instance.max_changed.connect(something.refresh)
# image_instance.gamma_changed.connect(something.refresh)
# image_instance.trilinear_filtering_enabled_changed.connect(something.refresh)
# image_instance.auto_getcolor_expression_enabled_changed.connect(something.refresh)
# image_instance.getcolor_expression_changed.connect(something.refresh)
# image_instance.extra_transformation_expression_changed.connect(something.refresh)
# image_instance.global_alpha_changed.connect(something.refresh)
# image_instance.mute_enabled_changed.connect(something.refresh)
#
# In the __init__ function of any Image subclass that adds presentation-affecting properties
# and associated change notification signals, do not forget to connect the subclass's change signals to changed.
changed = Qt.pyqtSignal(object)
data_changed = Qt.pyqtSignal(object)
def __init__(self, data, is_twelve_bit=False, float_range=None, shape_is_width_height=True, name=None, parent=None):
Qt.QObject.__init__(self, parent)
BasicImage.set_data(self, data, is_twelve_bit, float_range, shape_is_width_height)
self._retain_auto_min_max_enabled_on_min_max_change = False
for property in self.properties:
property.instantiate(self)
if name:
self.setObjectName(name)
if self.auto_min_max_enabled:
self.do_auto_min_max()
self._blend_function_impl = self.BLEND_FUNCTIONS[self.blend_function]
self.objectNameChanged.connect(lambda: self.name_changed.emit(self))
self.name_changed.connect(self.changed)
self.data_changed.connect(self.changed)
def set_data(self, data, is_twelve_bit=False, float_range=None, shape_is_width_height=True, keep_name=True, name=None):
"""If keep_name is True, the existing name is not changed, and the value supplied for the name argument is ignored.
If keep_name is False, the existing name is replaced with the supplied name or is cleared if supplied name is None
or an empty string."""
BasicImage.set_data(self, data, is_twelve_bit, float_range, shape_is_width_height)
if not keep_name:
self.name = name
for property in self.properties:
property.update_default(self)
if self.auto_min_max_enabled:
self.do_auto_min_max()
self.data_changed.emit(self)
def copy_property_values_from(self, source):
for property in self.properties:
property.copy_instance_value(source, self)
sname = source.name
if sname:
self.name = sname + ' dupe'
else:
self.name = 'dupe'
def refresh(self):
BasicImage.refresh(self)
if self.auto_min_max_enabled:
self.do_auto_min_max()
self.data_changed.emit(self)
def generate_contextual_info_for_pos(self, x, y, idx=None):
if not self.visible:
return
sz = self.size
if 0 <= x < sz.width() and 0 <= y < sz.height():
type_ = self.type
num_channels = self.num_channels
name = self.name
mst = '' if idx is None else '{: 3}, '.format(idx)
if name:
mst += '"' + name + '", '
mst+= 'x:{} y:{} '.format(x, y)
vt = '(' + ' '.join((c + ':{}' for c in self.type)) + ')'
if num_channels == 1:
vt = vt.format(self.data[x, y])
else:
vt = vt.format(*self.data[x, y])
return mst+vt
properties = []
visible = _Property(
properties, 'visible',
doc = textwrap.dedent(
"""\
Generally, a non-visible image is not visible in the "main view" but does remain visible in specialized views,
such as the histogram view and image stack table widget.
In more detail:
If an Image's visible property is False, that Image does not contribute to mixed output. For example,
any single pixel in an ImageStackItem rendering may represent the result of blending a number of Images,
whereas only one Image at a time may be associated with a HistogramItem; no HistogramItem pixel in the
rendering of a HistogramItem is a function of more than one Image. Therefore, a non-visible Image that is part
of a SignalingList that is associated with an ImageStackItem will not be visible in the output of that
ImageStackItem's render function, although the histogram of the Image will still be visible in the output
of the render function of a HistogramItem associated with the Image."""),
default_value_callback = lambda image: True,
transform_callback = lambda image, v: bool(v))
def _auto_min_max_enabled_post_set(self, v):
if v:
self.do_auto_min_max()
auto_min_max_enabled = _Property(
properties, 'auto_min_max_enabled',
default_value_callback = lambda image: False,
transform_callback = lambda image, v: bool(v),
post_set_callback = _auto_min_max_enabled_post_set)
def _min_max_pre_set(self, v):
r = self.range
if not r[0] <= v <= r[1]:
raise ValueError('min/max values for this image must be in the closed interval [{}, {}].'.format(*r))
def _min_max_post_set(self, v, is_max):
if is_max:
if v < self.min:
self.min = v
else:
if v > self.max:
self.max = v
if not self._retain_auto_min_max_enabled_on_min_max_change:
self.auto_min_max_enabled = False
min = _Property(
properties, 'min',
default_value_callback = lambda image: float(image.range[0]),
transform_callback = lambda image, v: float(v),
pre_set_callback = _min_max_pre_set,
post_set_callback = lambda image, v, f=_min_max_post_set: f(image, v, False))
max = _Property(
properties, 'max',
default_value_callback = lambda image: float(image.range[1]),
transform_callback = lambda image, v: float(v),
pre_set_callback = _min_max_pre_set,
post_set_callback = lambda image, v, f=_min_max_post_set: f(image, v, True))
def _gamma_pre_set(self, v):
r = self.GAMMA_RANGE
if not r[0] <= v <= r[1]:
raise ValueError('gamma value must be in the closed interval [{}, {}].'.format(*r))
gamma = _Property(
properties, 'gamma',
default_value_callback = lambda image: 1.0,
transform_callback = lambda image, v: float(v),
pre_set_callback = _gamma_pre_set)
trilinear_filtering_enabled = _Property(
properties, 'trilinear_filtering_enabled',
default_value_callback = lambda image: True,
transform_callback = lambda image, v: bool(v))
SHAD_PROP_HELP = textwrap.dedent("""\
The GLSL fragment shader used to render an ImageStackItem is generated by iterating through ImageStackItem.image_stack,
replacing the ${values} in the following template with with those of the Image (or
ImageStackItem.BLEND_FUNCTIONS[Image.blend_function] in the case of ${blend_function}) at each iteration and
appending the resulting text to a string. The accumulated string is the GLSL fragment shader's source code.
// image_stack[${idx}]
s = texture2D(tex_${idx}, tex_coord);
${getcolor_channel_mapping_expression};
s = ${getcolor_expression};
sa = clamp(s.a, 0, 1) * global_alpha_${idx};
sc = min_max_gamma_transform(s.rgb, rescale_min_${idx}, rescale_range_${idx}, gamma_${idx});
${extra_transformation_expression}; // extra_transformation_expression
sca = sc * sa;
${blend_function}
da = clamp(da, 0, 1);
dca = clamp(dca, 0, 1);
So, the value stored in an Image's .getcolor_expression property replaces ${getcolor_expression}. Supplying
None or an empty string would create a GLSL syntax error that must be rectified before an ImageStackItem
containing the Image in question can be successfully rendered (unless the Image's .visible property is False).
In order to revert .getcolor_expression to something appropriate for the Image's .type, simply assign True
to that Image's .auto_getcolor_expression_enabled property (likewise, replacing the contents of
.getcolor_expression causes .auto_getcolor_expression_enabled to assume the value False).
Unlike .getcolor_expression, .extra_transformation_expression does accept None or an empty string. Supplying
either results in a GLSL line consisting of "; // extra_transformation_expression", which compiles to nothing,
with the effect that None or an empty string result in no extra transformation expression being applied (this
is the default).""")
getcolor_expression = _Property(
properties, 'getcolor_expression',
default_value_callback = lambda image: image.IMAGE_TYPE_TO_GETCOLOR_EXPRESSION[image.type],
transform_callback = lambda image, v: '' if v is None else str(v),
doc = SHAD_PROP_HELP)
def _tint_transform(self, v):
v = list(map(float, v))
if len(v) not in (3,4) or not all(map(lambda v_: 0 <= v_ <= 1, v)):
raise ValueError('The iteraterable assigned to .tint must represent 3 or 4 real numbers in the interval [0, 1].')
if len(v) == 3:
v.append(1)
return v
tint = _Property(
properties, 'tint',
default_value_callback = lambda image: numpy.array((1,1,1,1), dtype=numpy.float32),
transform_callback = lambda image, v, f=_tint_transform: f(image, v),
doc = textwrap.dedent("""\
I.tint: This property is used by the default I.transform_section, and with that default, has
the following meaning: I.tint contains 0-1 normalized RGBA component values by which the results
of applying I.getcolor_expression are scaled."""))
transform_section = _Property(
properties, 'transform_section',
default_value_callback = lambda image: image.DEFAULT_TRANSFORM_SECTION,
transform_callback = lambda image, v: '' if v is None else str(v))
def _blend_function_pre_set(self, v):
if v not in self.BLEND_FUNCTIONS:
raise ValueError('The string assigned to blend_function must be one of:\n' + '\n'.join("'" + s + "'" for s in sorted(self.BLEND_FUNCTIONS.keys())))
blend_function = _Property(
properties, 'blend_function',
default_value_callback = lambda image: 'screen',
transform_callback = lambda image, v: str(v),
pre_set_callback = lambda image, v, f=_blend_function_pre_set: f(image, v),
doc = SHAD_PROP_HELP + '\n\nSupported blend_functions:\n\n ' + '\n '.join("'" + s + "'" for s in sorted(BLEND_FUNCTIONS.keys())))
for property in properties:
exec(property.changed_signal_name + ' = Qt.pyqtSignal(object)')
del property
del SHAD_PROP_HELP
# NB: This a property, not a _Property. There is already a change signal, setter, and a getter for objectName, which
# we proxy/use.
name_changed = Qt.pyqtSignal(object)
name = property(
Qt.QObject.objectName,
lambda self, name: self.setObjectName('' if name is None else name),
doc='Property proxy for QObject::objectName Qt property, which is directly accessible via the objectName getter and '
'setObjectName setter. Upon change, objectNameChanged is emitted.')
def __repr__(self):
name = self.name
return '{}, {}{}>'.format(
super().__repr__()[:-1],
'with name "{}"'.format(name) if name else 'unnamed',
', visible=False' if not self.visible else '')
def do_auto_min_max(self):
self._retain_auto_min_max_enabled_on_min_max_change = True
try:
extremae = self.extremae
if self.has_alpha_channel:
eae = extremae[:-1, 0].min(), extremae[:-1, 1].max()
elif self.num_channels > 1:
eae = extremae[:, 0].min(), extremae[:, 1].max()
else:
eae = extremae
self.min, self.max = eae
finally:
self._retain_auto_min_max_enabled_on_min_max_change = False
def do_auto_getcolor_expression(self):
self._retain_auto_getcolor_expression_enabled_on_getcolor_expression_change = True
try:
self.getcolor_expression = self.IMAGE_TYPE_TO_GETCOLOR_EXPRESSION[self.type]
finally:
self._retain_auto_getcolor_expression_enabled_on_getcolor_expression_change = False
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.osv import fields, osv
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
invoiced = False
if purchase.invoiced_rate == 100.00:
invoiced = True
res[purchase.id] = invoiced
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been paid"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = "name desc"
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_purchase_cancel(cr, uid, unlink_ids)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_send_rfq(cr, uid, ids)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
res = False
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
for order in self.browse(cr, uid, ids, context=context):
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this company: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id
fpos = order.fiscal_position or False
acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def action_cancel(self, cr, uid, ids, context=None):
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
self.pool.get('stock.picking') \
.signal_button_cancel(cr, uid, map(attrgetter('id'), purchase.picking_ids))
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
self.pool.get('account.invoice') \
.signal_invoice_cancel(cr, uid, map(attrgetter('id'), purchase.invoice_ids))
self.write(cr,uid,ids,{'state':'cancel'})
self.signal_purchase_cancel(cr, uid, ids)
return True
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': order.date_order,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'partner_id': order.dest_address_id.id or order.partner_id.id,
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order_line.date_planned,
'date_expected': order_line.date_planned,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': order_line.price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
stock_picking = self.pool.get('stock.picking')
if not picking_id:
picking_id = stock_picking.create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id:
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
stock_picking.signal_button_confirm(cr, uid, [picking_id])
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
self.message_post(cr, uid, ids, body=_("Products <b>Received.</b>"), context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_purchase_cancel(cr, uid, [old_id]) # TODO Is it necessary to interleave the calls?
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
name = product.name
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if (qty or 0.0) < min_qty: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not partner:
message = _('No default supplier defined for this product')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
address_id = partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']
if not address_id:
message = _('No address defined for the supplier')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
warehouse_obj = self.pool.get('stock.warehouse')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
warehouse_id = warehouse_obj.search(cr, uid, [('company_id', '=', procurement.company_id.id or company.id)], context=context)
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': warehouse_id and warehouse_id[0] or False,
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
self.pool.get('purchase.order').signal_send_rfq(cr, uid, [mail.res_id])
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_send_rfq(cr, uid, [context['default_res_id']])
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
po_ids = self.pool.get('purchase.order').search(cr,uid,[('invoice_ids','in',ids)],context)
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=None)
self.pool.get('purchase.order').message_post(cr, uid, po_ids, body=_("Invoice <b>Received.</b>"), context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[IMP] Added message post for 'Invoice Paid.' when the invoice is paid in PO.
bzr revid: psa@tinyerp.com-20130214063655-15kjqgc67fwcqw9f
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.osv import fields, osv
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
invoiced = False
if purchase.invoiced_rate == 100.00:
invoiced = True
res[purchase.id] = invoiced
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been paid"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = "name desc"
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_purchase_cancel(cr, uid, unlink_ids)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_send_rfq(cr, uid, ids)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
res = False
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
for order in self.browse(cr, uid, ids, context=context):
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this company: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id
fpos = order.fiscal_position or False
acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
self.message_post(cr, uid, ids, body=_("Invoice <b>Paid.</b>"), context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def action_cancel(self, cr, uid, ids, context=None):
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
self.pool.get('stock.picking') \
.signal_button_cancel(cr, uid, map(attrgetter('id'), purchase.picking_ids))
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
self.pool.get('account.invoice') \
.signal_invoice_cancel(cr, uid, map(attrgetter('id'), purchase.invoice_ids))
self.write(cr,uid,ids,{'state':'cancel'})
self.signal_purchase_cancel(cr, uid, ids)
return True
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': order.date_order,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'partner_id': order.dest_address_id.id or order.partner_id.id,
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order_line.date_planned,
'date_expected': order_line.date_planned,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': order_line.price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
stock_picking = self.pool.get('stock.picking')
if not picking_id:
picking_id = stock_picking.create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id:
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
stock_picking.signal_button_confirm(cr, uid, [picking_id])
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
self.message_post(cr, uid, ids, body=_("Products <b>Received.</b>"), context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_purchase_cancel(cr, uid, [old_id]) # TODO Is it necessary to interleave the calls?
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
name = product.name
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if (qty or 0.0) < min_qty: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not partner:
message = _('No default supplier defined for this product')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
address_id = partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']
if not address_id:
message = _('No address defined for the supplier')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
warehouse_obj = self.pool.get('stock.warehouse')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
warehouse_id = warehouse_obj.search(cr, uid, [('company_id', '=', procurement.company_id.id or company.id)], context=context)
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': warehouse_id and warehouse_id[0] or False,
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
self.pool.get('purchase.order').signal_send_rfq(cr, uid, [mail.res_id])
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_send_rfq(cr, uid, [context['default_res_id']])
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
po_ids = self.pool.get('purchase.order').search(cr,uid,[('invoice_ids','in',ids)],context)
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=None)
self.pool.get('purchase.order').message_post(cr, uid, po_ids, body=_("Invoice <b>Received.</b>"), context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
#!/usr/bin/env python
"""
Created as part of the StratusLab project (http://stratuslab.eu),
co-funded by the European Commission under the Grant Agreement
INSFO-RI-261552.
Copyright (c) 2011, Centre National de la Recherche Scientifique (CNRS)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Script used by StratusLab pdisk to manage iSCSI LUNs
"""
import sys
import os
import os.path
import re
from subprocess import *
import StringIO
from optparse import OptionParser
import logging
import logging.handlers
import syslog
import ConfigParser
import uuid
# Initializations
verbosity = 0
logger = None
action_default = ''
status = 0 # Assume success
# Supported iSCSI proxy variants
iscsi_supported_variants = [ 'lvm', 'netapp' ]
# Keys are supported actions, values are the number of arguments required for the each action
valid_actions = { 'check':1, 'create':2, 'delete':1, 'rebase':1, 'snapshot':3 , 'getturl':1 , 'map':1 , 'unmap':1}
valid_actions_str = ', '.join(valid_actions.keys())
config_file_default = '/opt/stratuslab/etc/persistent-disk-backend.conf'
config_main_section = 'main'
config_defaults = StringIO.StringIO("""
# Options commented out are configuration options available for which no
# sensible default value can be defined.
[main]
# Define the list of iSCSI proxies that can be used.
# One section per proxy must also exists to define parameters specific to the proxy.
#iscsi_proxies=netapp.example.org
# Log file for persistent disk management
log_file=/var/log/stratuslab-persistent-disk.log
# User name to use to connect the filer (may also be defined in the filer section)
mgt_user_name=root
# SSH private key to use for 'mgt_user_name' authorisation
#mgt_user_private_key=/some/file.rsa
#[netapp.example.org]
# iSCSI back-end type (case insensitive)
#type=NetApp
# Initiator group the LUN must be mapped to
#initiator_group = linux_servers
# Name appended to the volume name to build the LUN path (a / will be appended)
#lun_namespace=stratuslab
# Volume name where LUNs will be created
#volume_name = /vol/iscsi
# Name prefix to use to build the volume snapshot used as a LUN clone snapshot parent
# (a _ will be appended)
#volume_snapshot_prefix=pdisk_clone
#[lvm.example.org]
# iSCSI back-end type (case insensitive)
#type=LVM
# LVM volume group to use for creating LUNs
#volume_name = /dev/iscsi.01
""")
####################################################################
# Superclass describing common aspect of every iSCSI backends #
# Inherited by all iSCSI backend classes. #
# Variable required in all backend implementationss are documented #
# here but generally have empty defaults. #
####################################################################
class Backend:
# Command prefix to use to connect through ssh
ssh_cmd_prefix = [ 'ssh', '-x', '-i', '%%PRIVKEY%%','%%ISCSI_PROXY%%' ]
# Table defining mapping of LUN actions to NetApp actions.
# This is a 1 to n mapping (several NetApp commands may be needed for one LUN action).
# map and unmap are necessary as separate actions as they are not necessarily executed on
# the same LUN as the other operations (eg. snapshot action).
# Special values for commands are:
# - None: action is not implemented
# - empty list: action does nothing
lun_backend_cmd_mapping = {'check':None,
'create':None,
'delete':None,
'map':None,
'rebase':None,
'size':None,
'snapshot':None,
'unmap':None,
'getturl':None,
}
# Definitions of NetApp commands used to implement actions.
backend_cmds = {
}
# Commands to execute when a given backend command fails.
# This is a dictionnary where key is one of the backend_cmds key and the value is another dictionnary
# whose key is the name of lun action (as defined in lun_backend_cmd_mapping) that defines the
# context of the backend command and the value is the key of another command in backend_cmds).
# IF the context is '-', the alternate command will be executed in any context (lun actions) in case of errors.
# If the value (alternate command) is an empty string, further backend commands part of the same LUN action are skipped.
# If it is None,processing of further actions contnues as if there was no entry for the command in the dictionnary.
# IF a backup command fails and has no entry in this dictionnary, the execution continues
# with next command if any.
backend_failure_cmds = {
}
# Most commands are expected to return nothing when they succeeded. The following
# dictionnary lists exceptions and provides a pattern matching output in case of
# success.
# Keys must match an existing key in backend_cmds
success_msg_pattern = {
}
# The creation of a new LUN may be required by some operations
# on some backends (e.g. rebase with LVM backend).
# This dictionnary allows to define which LUN actions (same keys
# as in lun_backend_cmd_mapping, value ignored).
# By default, this variable is empty: redefine it appropriately in
# the context of a particular backend if needed.
new_lun_required = {
}
# Generator function returning:
# - the command corresponding to the action as a list of tokens, with iSCSI proxy related
# variables parsed.
# - the expected message patterns in case of success if the command output is not empty. This is returned as
# a list of patterns (a simple string is converted to a list).
# This function must be called from an iteration loop control statement
def getCmd(self,lun_action):
if lun_action in self.lun_backend_cmd_mapping:
backend_actions = self.lun_backend_cmd_mapping[lun_action]
else:
abort("Internal error: LUN action '%s' unknown" % (lun_action))
# If None, means that the action is not implemented
if backend_actions == None:
yield backend_actions,None,None
# Intialize parsed_command and success_patters in case backend_actions is an empty list
parsed_command = []
success_patterns = None
failure_command = None
for action in backend_actions:
if action in self.backend_cmds.keys():
parsed_command = self.parse(self.backend_cmds[action])
else:
abort("Internal error: action '%s' unknown" % (action))
if action in self.success_msg_pattern:
success_patterns = self.success_msg_pattern[action]
if isinstance(success_patterns,str):
success_patterns = [ success_patterns ]
else:
success_patterns = None
if action in self.backend_failure_cmds.keys():
failure_actions = self.backend_failure_cmds[action]
if lun_action in failure_actions:
command = failure_actions[lun_action]
# '-' is a special key value meaning the alternate command applies to all LN actions
elif '-' in failure_actions:
command = failure_actions['-']
else:
command = None
if command:
failure_command = self.parse(command)
yield parsed_command,success_patterns,failure_command
# Method returning true if creation of a new LUN is required for a particular LUN action.
# LUN creation is the responsibility of the caller.
def newLunRequired(self,action):
if action in self.new_lun_required:
return True
else:
return False
############################################
# Class describing a NetApp iSCSI back-end #
############################################
class NetAppBackend(Backend):
# The following variables define which command to execute for each action.
# They are documented in the superclass Backend.
lun_backend_cmd_mapping = {'check':['check'],
'create':['create'],
# Attemtp to delete volume snapshot associated with the LUN if it is no longer used (no more LUN clone exists)
'delete':['delete','snapdel'],
'map':['map'],
'rebase':[],
'size':None,
'snapshot':['snapshot','clone'],
'unmap':['unmap'],
'getturl':[],
}
backend_cmds = {'check':[ 'lun', 'show', '%%NAME%%' ],
'clone':[ 'lun', 'clone', 'create', '%%SNAP_NAME%%', '-b', '%%NAME%%', '%%SNAP_PARENT%%' ],
'create':[ 'lun', 'create', '-s', '%%SIZE%%g', '-t', '%%LUNOS%%', '%%NAME%%' ],
'delete':[ 'lun', 'destroy', '%%NAME%%' ],
'map':[ 'lun', 'map', '-f', '%%NAME%%', '%%INITIATORGRP%%' ],
'snapdel':[ 'snap', 'delete', '%%VOLUME_NAME%%', '%%SNAP_PARENT%%' ],
'snapshot':[ 'snap', 'create', '%%VOLUME_NAME%%', '%%SNAP_PARENT%%' ],
'unmap':[ 'lun', 'unmap', '%%NAME%%', '%%INITIATORGRP%%' ]
}
backend_failure_cmds = {
}
success_msg_pattern = { 'check':'online',
# snapdel is expected to fail if there is still a LUN clone using it or if the snapshot doesnt exist
# (LUN never cloned or is a clone). These are not considered as an error.
'snapdel':[ 'deleting snapshot\.\.\.', 'Snapshot [\w\-]+ is busy because of LUN clone','No such snapshot' ],
'snapshot':['^creating snapshot','^Snapshot already exists.']
}
# Would be great to have it configurable as NetApp needs to know the client OS
lunOS = 'linux'
def __init__(self,proxy,mgtUser,mgtPrivKey,volume,namespace,initiatorGroup,snapshotPrefix):
self.proxyHost = proxy
self.mgtUser = mgtUser
self.mgtPrivKey = mgtPrivKey
self.volumePath = volume
self.volumeName = volume.split('/')[-1]
self.namespace = "%s/%s" % (self.volumePath,namespace)
self.initiatorGroup = initiatorGroup
self.snapshotPrefix = snapshotPrefix
# Command to connect to NetApp filer (always ssh)
self.cmd_prefix = self.ssh_cmd_prefix
# Add command prefix and parse all variables related to iSCSI proxy in the command (passed as a list of tokens).
# Return parsed command as a list of token.
def parse(self,command):
# Build command to execute
action_cmd = []
action_cmd.extend(self.cmd_prefix)
action_cmd.extend(command)
for i in range(len(action_cmd)):
if action_cmd[i] == '%%INITIATORGRP%%':
action_cmd[i] = self.initiatorGroup
elif action_cmd[i] == '%%LUNOS%%':
action_cmd[i] = self.lunOS
elif action_cmd[i] == '%%PRIVKEY%%':
action_cmd[i] = self.mgtPrivKey
elif action_cmd[i] == '%%ISCSI_PROXY%%':
action_cmd[i] = "%s@%s" % (self.mgtUser,self.proxyHost)
elif action_cmd[i] == '%%SNAP_PARENT%%':
action_cmd[i] = self.snapshotPrefix + '_%%UUID%%'
elif action_cmd[i] == '%%NAME%%':
action_cmd[i] = self.namespace + "/%%UUID%%"
elif action_cmd[i] == '%%SNAP_NAME%%':
action_cmd[i] = self.namespace + "/%%SNAP_UUID%%"
elif action_cmd[i] == '%%VOLUME_NAME%%':
action_cmd[i] = self.volumeName
return action_cmd
# Return iSCSI back-end type
def getType(self):
return 'NetApp'
####################################
# Class describing a File back-end #
####################################
class FileBackend(Backend):
# The following variables define which command to execute for each action.
# They are documented in the superclass Backend
lun_backend_cmd_mapping = {'check':['check'],
'create':['create','chown'],
'delete':['delete'],
'map':[],
'rebase':[],
'size':[],
'snapshot':['copy'],
'unmap':[],
'getturl':['getturl'],
}
backend_cmds = {'check':['/usr/bin/test','-f','%%LOGVOL_PATH%%'],
'create':['/bin/dd','if=/dev/zero','of=%%LOGVOL_PATH%%','bs=1024','count=%%SIZE%%M'],
'chown' :['/bin/chown','oneadmin:cloud','%%LOGVOL_PATH%%'],
'delete':['/bin/rm','-rf','%%LOGVOL_PATH%%'],
'copy':['/bin/cp','%%NEW_LOGVOL_PATH%%','%%LOGVOL_PATH%%'],
'getturl':['/bin/echo','file://%%LOGVOL_PATH%%'],
}
success_msg_pattern = {'create' : '.*',
'getturl' : '(.*://.*)',
}
def __init__(self,proxy,volume,mgtUser=None,mgtPrivKey=None):
self.volumeName = volume
self.proxyHost = proxy
self.mgtUser = mgtUser
self.mgtPrivKey = mgtPrivKey
if self.mgtUser and self.mgtPrivKey:
debug(1,'SSH will be used to connect to File backend')
self.cmd_prefix = self.ssh_cmd_prefix
else:
self.cmd_prefix = []
# Add command prefix and parse all variables related to iSCSI proxy in the command (passed as a list of tokens).
# Return parsed command as a list of token.
def parse(self,command):
# Build command to execute
action_cmd = []
action_cmd.extend(self.cmd_prefix)
action_cmd.extend(command)
for i in range(len(action_cmd)):
if action_cmd[i] == '%%ISCSI_PROXY%%':
action_cmd[i] = "%s@%s" % (self.mgtUser,self.proxyHost)
elif re.search('%%LOGVOL_PATH%%',action_cmd[i]):
action_cmd[i] = re.sub('%%LOGVOL_PATH%%',self.volumeName+"/%%UUID%%",action_cmd[i])
elif re.search('%%NEW_LOGVOL_PATH%%',action_cmd[i]):
action_cmd[i] = re.sub('%%NEW_LOGVOL_PATH%%',self.volumeName+"/%%SNAP_UUID%%",action_cmd[i])
elif action_cmd[i] == '%%PRIVKEY%%':
action_cmd[i] = self.mgtPrivKey
elif action_cmd[i] == '%%VOLUME_NAME%%':
action_cmd[i] = self.volumeName
return action_cmd
def getType(self):
return 'File'
#########################################
# Class describing a LVM iSCSI back-end #
#########################################
class LVMBackend (Backend):
# The following variables define which command to execute for each action.
# They are documented in the superclass Backend.
lun_backend_cmd_mapping = {'check':['check'],
'create':['create','add_device','reload_iscsi'],
'delete':['remove_device','reload_iscsi','wait2','dmremove','remove'],
# map is a required action for snapshot action but does nothing in LVM
'map':[],
'rebase':['rebase','add_device','reload_iscsi'],
'size':['size'],
'snapshot':['snapshot','add_device','reload_iscsi'],
'unmap':[],
'getturl' : ['getturl'],
}
backend_cmds = {'check':[ '/usr/bin/test', '-b', '%%LOGVOL_PATH%%' ],
'create':[ '/sbin/lvcreate', '-L', '%%SIZE%%G', '-n', '%%UUID%%', '%%VOLUME_NAME%%' ],
'dmremove':[ '/sbin/dmsetup', 'remove', '%%DM_VOLUME_PATH%%' ],
'add_device':[ '/bin/sed', '-i', '1i<target iqn.2011-01.eu.stratuslab:%%UUID%%> \\n backing-store %%LOGVOL_PATH%% \\n </target>','/etc/stratuslab/iscsi.conf' ],
'reload_iscsi': [ '/usr/sbin/tgt-admin','--update','iqn.2011-01.eu.stratuslab:%%UUID%%'],
'remove_device': [ '/bin/sed', '-i', '/<target iqn.2011-01.eu.stratuslab:.*%%UUID%%.*/,+2d', '/etc/stratuslab/iscsi.conf' ],
'dmremove':['/sbin/dmsetup','remove','%%LOGVOL_PATH%%'],
'wait2':['/bin/sleep','2'],
'remove':[ '/sbin/lvremove', '-f', '%%LOGVOL_PATH%%' ],
'rebase':[ '/bin/dd', 'if=%%LOGVOL_PATH%%', 'of=%%NEW_LOGVOL_PATH%%'],
# lvchange doesn't work with clone. Normally unneeded as lvremove -f (remove) does the same
'setinactive':[ '/sbin/lvchange', '-a', 'n', '%%LOGVOL_PATH%%' ],
'size':['/sbin/lvs', '-o', 'lv_size', '--noheadings', '%%LOGVOL_PATH%%'],
'snapshot':[ '/sbin/lvcreate', '--snapshot', '-p', 'rw', '--size', '%%SIZE%%G', '-n', '%%UUID%%', '%%NEW_LOGVOL_PATH%%' ],
'getturl' : ['/bin/echo', 'iscsi://%%PORTAL%%/iqn.2011-01.eu.stratuslab:%%UUID%%:1' ],
}
backend_failure_cmds = {'remove': {'add_device' : [ 'sed', '-i', '1i<target iqn.2011-01.eu.stratuslab:%%UUID%%> \\n backing-store %%LOGVOL_PATH%% \\n </target>','/etc/stratuslab/iscsi.conf' ]}
#{'delete' : 'add_device'}
}
success_msg_pattern = {'create':'Logical volume "[\w\-]+" created',
'remove':'Logical volume "[\w\-]+" successfully removed',
'rebase':'\d+ bytes .* copied',
'setinactive':[ '^$', 'File descriptor .* leaked on lvchange invocation' ],
'size':['([\d\.]+)g'],
'reload_iscsi':'.*',
'snapshot':'Logical volume "[\w\-]+" created',
'getturl' : '(.*://.*/.*)'
}
new_lun_required = {'rebase':True
}
def __init__(self,proxy,volume,mgtUser=None,mgtPrivKey=None):
self.volumeName = volume
self.proxyHost = proxy
self.mgtUser = mgtUser
self.mgtPrivKey = mgtPrivKey
if self.mgtUser and self.mgtPrivKey:
debug(1,'SSH will be used to connect to LVM backend')
self.cmd_prefix = self.ssh_cmd_prefix
else:
self.cmd_prefix = [ ]
# Add command prefix and parse all variables related to iSCSI proxy in the command (passed as a list of tokens).
# Return parsed command as a list of token.
def parse(self,command):
# Build command to execute
action_cmd = []
action_cmd.extend(self.cmd_prefix)
action_cmd.extend(command)
for i in range(len(action_cmd)):
if action_cmd[i] == '%%ISCSI_PROXY%%':
action_cmd[i] = "%s@%s" % (self.mgtUser,self.proxyHost)
elif re.search('%%LOGVOL_PATH%%',action_cmd[i]):
action_cmd[i] = re.sub('%%LOGVOL_PATH%%',self.volumeName+"/%%UUID%%",action_cmd[i])
elif re.search('%%NEW_LOGVOL_PATH%%',action_cmd[i]):
action_cmd[i] = re.sub('%%NEW_LOGVOL_PATH%%',self.volumeName+"/%%SNAP_UUID%%",action_cmd[i])
elif action_cmd[i] == '%%PRIVKEY%%':
action_cmd[i] = self.mgtPrivKey
elif action_cmd[i] == '%%VOLUME_NAME%%':
action_cmd[i] = self.volumeName
elif re.search('%%PORTAL%%',action_cmd[i]):
action_cmd[i] = re.sub('%%PORTAL%%',self.proxyHost+":3260",action_cmd[i])
return action_cmd
# Return iSCSI back-end type
def getType(self):
return 'LVM'
#################################################################
# Class describing a LUN and implementing the supported actions #
#################################################################
class LUN:
# Some LUN commands (e.g. rebase) needs to return information as a string on stdout
# that will be captured by pdisk. The string is defined using the same tokens as commands.
# By default, a command returns nothing on stdout.
# This information is returned only on successful execution of action.
action_output = {'rebase':'%%SNAP_UUID%%',
}
def __init__(self,uuid,size=None,proxy=None):
self.uuid = uuid
self.size = size
self.proxy = proxy
# Another LUN involved in actions like rebase or snapshot
self.associatedLUN = None
def getUuid(self):
return self.uuid
def check(self):
status,optInfo = self.__executeAction__('check')
return status
def create(self):
status,optInfo = self.__executeAction__('create')
return status
def delete(self):
status,optInfo = self.__executeAction__('delete')
return status
def getSize(self):
status,self.size = self.__executeAction__('size')
if status != 0:
abort('Failure to retrieve size of LUN %s' % (self.uuid))
return status
def getTurl(self):
status,self.turl = self.__executeAction__('getturl')
if status != 0:
abort('Failure to retrieve Transport URL of %s' % (self.uuid))
return self.turl
def map(self):
status,optInfo = self.__executeAction__('map')
return status
def rebase(self):
if self.proxy.newLunRequired('rebase'):
#TODO: generate a UUID based on creation timestamp as in PDisk
new_lun_uuid = str(uuid.uuid4())
self.getSize()
self.associatedLUN = LUN(new_lun_uuid,size=self.size,proxy=self.proxy)
if self.associatedLUN.create() != 0:
abort('An error occured creating a new LUN for rebasing %s' % (self.uuid))
else:
self.associatedLUN = self # To simplify returned value
status,optInfo = self.__executeAction__('rebase')
return status
def snapshot(self,snapshot_lun):
self.associatedLUN = snapshot_lun
status,optInfo = self.__executeAction__('snapshot')
return status
def unmap(self):
status,optInfo = self.__executeAction__('unmap')
return status
# Execute an action on a LUN.
# An action may involve several actual commands : getCmd() method of proxy is a generator returning
# the commands to execute one by one.
# In case an error occurs during one command, try to continue...
# Return the status of the last command executed and an optional additional value returned by the command.
# Optionally a string is printed on stdout to allow the script to return information to the caller.
# Special values for commands are:
# - None: action is not implemented
# - empty list: action does nothing
def __executeAction__(self,action):
status = 0 # Assume success
optInfo = None
for cmd_toks,successMsg,failure_cmd_toks in self.proxy.getCmd(action):
# When returned command for action is None, it means that the action is not implemented
if cmd_toks == None:
abort("Action '%s' not implemented by back-end type '%s'" % (action,self.proxy.getType()))
command = Command(action,self.parseCmd(cmd_toks),successMsg)
command.execute()
status,optInfo = command.checkStatus()
if status != 0 and failure_cmd_toks:
command = Command(action,self.parseCmd(failure_cmd_toks),successMsg)
command.execute()
status_,optInfo_ = command.checkStatus()
if status_ != 0:
print "Rollback command",failure_cmd_toks,"failed:", optInfo_
break
# If failure_cmd_toks is an amtpy string, stop LUN action processing
elif failure_cmd_toks == '':
break
if status == 0 and action in self.action_output:
print self.__parse__(self.action_output[action])
return status,optInfo
# Parse all variables related to current LUN in the command (passed and returned as a list of tokens).
def parseCmd(self,action_cmd):
for i in range(len(action_cmd)):
action_cmd[i] = self.__parse__(action_cmd[i])
return action_cmd
# Do the actual string parsing
def __parse__(self,string):
if re.search('%%SIZE%%',string):
string = re.sub('%%SIZE%%',self.size,string)
elif re.search('%%UUID%%',string):
string = re.sub('%%UUID%%',self.getUuid(),string)
elif re.search('%%SNAP_UUID%%',string):
string = re.sub('%%SNAP_UUID%%',self.associatedLUN.getUuid(),string)
return string
#######################################################
# Class representing a command passed to the back-end #
#######################################################
class Command:
cmd_output_start = '<<<<<<<<<<'
cmd_output_end = '>>>>>>>>>>'
def __init__(self,action,cmd,successMsgs=None):
self.action = action
self.action_cmd = cmd
self.successMsgs = successMsgs
self.proc = None
def execute(self):
status = 0
# Execute command: NetApp command don't return an exit code. When a command is sucessful,
# its output is empty.
#action_cmd = 'echo ' + self.action_cmd
debug(1,"Executing command: '%s'" % (' '.join(self.action_cmd)))
try:
self.proc = Popen(self.action_cmd, shell=False, stdout=PIPE, stderr=STDOUT)
except OSError, details:
abort('Failed to execute %s action: %s' % (self.action,details))
status = 1
return status
def checkStatus(self):
optInfo = None
try:
retcode = self.proc.wait()
output = self.proc.communicate()[0]
if retcode != 0:
abort('An error occured during %s action (error=%s). Command output:\n%s\n%s\n%s' % (self.action,retcode,self.cmd_output_start,output,self.cmd_output_end))
else:
# Need to check if the command is expected to return an output when successfull
success = False
if self.successMsgs:
for successPattern in self.successMsgs:
output_regexp = re.compile(successPattern)
matcher = output_regexp.search(output)
if matcher:
# Return only the first capturing group
if output_regexp.groups > 0:
optInfo = matcher.group(1)
success = True
break
else:
if len(output) == 0:
success = True
if success:
debug(1,'%s action completed successfully.' % (self.action))
if len(output) > 0:
debug(2,'Command output:\n%s\n%s\n%s' % (self.cmd_output_start,output,self.cmd_output_end))
else:
retcode = -1
debug(0,'An error occured during %s action. Command output:\n%s\n%s\n%s' % (self.action,self.cmd_output_start,output,self.cmd_output_end))
except OSError, details:
abort('Failed to execute %s action: %s' % (self.action,details))
return retcode,optInfo
###############################
# Functions to handle logging #
###############################
def abort(msg):
logger.error("Persistent disk operation failed:\n%s" % (msg))
sys.exit(2)
def debug(level,msg):
if level <= verbosity:
if level == 0:
logger.info(msg)
else:
logger.debug(msg)
#############
# Main code #
#############
# Configure loggers and handlers.
# Initially configure only syslog and stderr handler.
logging_source = 'stratuslab-pdisk'
logger = logging.getLogger(logging_source)
logger.setLevel(logging.DEBUG)
#fmt=logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fmt=logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
# Handler used to report to SVN must display only the message to allow proper XML formatting
svn_fmt=logging.Formatter("%(message)s")
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
# Parse configuration and options
usage_text = """usage: %prog [options] action_parameters
Parameters:
action=check: LUN_UUID
action=create: LUN_UUID LUN_Size
action=delete: LUN_UUID
action=rebase: LUN_UUID (will return the rebased LUN UUID on stdout)
action=snapshot: LUN_UUID New_LUN_UUID Snapshot_Size
action=getturl: LUN_UUID
action=map: LUN_UUID
action=unmap: LUN_UUID
"""
parser = OptionParser(usage=usage_text)
parser.add_option('--config', dest='config_file', action='store', default=config_file_default, help='Name of the configuration file to use (D: %s)' % (config_file_default))
parser.add_option('--action', dest='action', action='store', default=action_default, help='Action to execute. Valid actions: %s'%(valid_actions_str))
parser.add_option('-v', '--debug', '--verbose', dest='verbosity', action='count', default=0, help='Increase verbosity level for debugging (multiple allowed)')
parser.add_option('--version', dest='version', action='store_true', default=False, help='Display various information about this script')
options, args = parser.parse_args()
if options.version:
debug (0,"Version %s written by %s" % (__version__,__author__))
debug (0,__doc__)
sys.exit(0)
if options.verbosity:
verbosity = options.verbosity
if options.action in valid_actions:
if len(args) < valid_actions[options.action]:
debug(0,"Insufficient argument provided (%d required)" % (valid_actions[options.action]))
parser.print_help()
abort("")
else:
if options.action:
debug(0,"Invalid action requested (%s)\n" % (options.action))
else:
debug(0,"No action specified\n")
parser.print_help()
abort("")
# Read configuration file.
# The file must exists as there is no sensible default value for several options.
config = ConfigParser.ConfigParser()
config.readfp(config_defaults)
try:
config.readfp(open(options.config_file))
except IOError, (errno,errmsg):
if errno == 2:
abort('Configuration file (%s) is missing.' % (options.config_file))
else:
abort('Error opening configuration file (%s): %s (errno=%s)' % (options.config_file,errmsg,errno))
logfile_handler = None
try:
log_file = config.get(config_main_section,'log_file')
if log_file:
logfile_handler = logging.handlers.RotatingFileHandler(log_file,'a',100000,10)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(fmt)
logger.addHandler(logfile_handler)
except ValueError:
abort("Invalid value specified for 'log_file' (section %s)" % (config_main_section))
if logfile_handler == None or not log_file:
# Use standard log destination in case a log file is not defined
syslog_handler = logging.handlers.SysLogHandler('/dev/log')
syslog_handler.setLevel(logging.WARNING)
logger.addHandler(syslog_handler)
try:
iscsi_proxies_list = config.get(config_main_section,'iscsi_proxies')
iscsi_proxies = iscsi_proxies_list.split(',')
iscsi_proxy_name = iscsi_proxies[0]
except ValueError:
abort("Invalid value specified for 'iscsi_proxies' (section %s) (must be a comma-separated list)" % (config_main_section))
try:
backend_variant=config.get(iscsi_proxy_name,'type')
except:
abort("Section '%s' or required attribute 'type' missing" % (iscsi_proxy_name))
# NetApp back-end configuration
if backend_variant.lower() == 'netapp':
# Retrieve NetApp back-end mandatory attributes.
# Mandatory attributes should be defined as keys of backend_attributes with an arbitrary value.
# Key name must match the attribute name in the configuration file.
backend_attributes = {'initiator_group':'',
'lun_namespace':'',
'volume_name':'',
'volume_snapshot_prefix':''
}
try:
for attribute in backend_attributes.keys():
backend_attributes[attribute]=config.get(iscsi_proxy_name,attribute)
except:
abort("Section '%s' or required attribute '%s' missing" % (iscsi_proxy_name,attribute))
try:
backend_attributes['mgt_user_name']=config.get(iscsi_proxy_name,'mgt_user_name')
except:
try:
backend_attributes['mgt_user_name']=config.get(config_main_section,'mgt_user_name')
except:
abort("User name to use for connecting to iSCSI proxy undefined")
try:
backend_attributes['mgt_user_private_key']=config.get(iscsi_proxy_name,'mgt_user_private_key')
except:
try:
backend_attributes['mgt_user_private_key']=config.get(config_main_section,'mgt_user_private_key')
except:
abort("SSH private key to use for connecting to iSCSI proxy undefined")
# Create iSCSI back-end object
iscsi_proxy = NetAppBackend(iscsi_proxy_name,
backend_attributes['mgt_user_name'],
backend_attributes['mgt_user_private_key'],
backend_attributes['volume_name'],
backend_attributes['lun_namespace'],
backend_attributes['initiator_group'],
backend_attributes['volume_snapshot_prefix']
)
# LVM back-end configuration
elif backend_variant.lower() == 'lvm':
# Retrieve NetApp back-end mandatory attributes.
# Mandatory attributes should be defined as keys of backend_attributes with an arbitrary value.
# Key name must match the attribute name in the configuration file.
backend_attributes = {'volume_name':'',
}
try:
for attribute in backend_attributes.keys():
backend_attributes[attribute]=config.get(iscsi_proxy_name,attribute)
except:
abort("Section '%s' or required attribute '%s' missing" % (iscsi_proxy_name,attribute))
# 'local' is a reserved name to designate the local machine: in this case, don't use ssh to
# connect to backend
if iscsi_proxy_name == 'local':
backend_attributes['mgt_user_name'] = None
backend_attributes['mgt_user_private_key'] = None
else:
try:
backend_attributes['mgt_user_name']=config.get(iscsi_proxy_name,'mgt_user_name')
except:
try:
backend_attributes['mgt_user_name']=config.get(config_main_section,'mgt_user_name')
except:
abort("User name to use for connecting to iSCSI proxy undefined")
try:
backend_attributes['mgt_user_private_key']=config.get(iscsi_proxy_name,'mgt_user_private_key')
except:
try:
backend_attributes['mgt_user_private_key']=config.get(config_main_section,'mgt_user_private_key')
except:
abort("SSH private key to use for connecting to iSCSI proxy undefined")
# Create iSCSI back-end object
iscsi_proxy = LVMBackend(iscsi_proxy_name,
backend_attributes['volume_name'],
backend_attributes['mgt_user_name'],
backend_attributes['mgt_user_private_key'],
)
elif backend_variant.lower() == 'file':
backend_attributes = {'volume_name':'',
}
try:
for attribute in backend_attributes.keys():
backend_attributes[attribute]=config.get(iscsi_proxy_name,attribute)
except:
abort("Section '%s' or required attribute '%s' missing" % (iscsi_proxy_name,attribute))
if iscsi_proxy_name == 'local':
backend_attributes['mgt_user_name'] = None
backend_attributes['mgt_user_private_key'] = None
else:
try:
backend_attributes['mgt_user_name']=config.get(iscsi_proxy_name,'mgt_user_name')
except:
try:
backend_attributes['mgt_user_name']=config.get(config_main_section,'mgt_user_name')
except:
abort("User name to use for connecting to iSCSI proxy undefined")
try:
backend_attributes['mgt_user_private_key']=config.get(iscsi_proxy_name,'mgt_user_private_key')
except:
try:
backend_attributes['mgt_user_private_key']=config.get(config_main_section,'mgt_user_private_key')
except:
abort("SSH private key to use for connecting to iSCSI proxy undefined")
# Create iSCSI back-end object
iscsi_proxy = FileBackend(iscsi_proxy_name,
backend_attributes['volume_name'],
backend_attributes['mgt_user_name'],
backend_attributes['mgt_user_private_key'],
)
# Abort if iSCSI back-end variant specified is not supported
else:
abort("Unsupported iSCSI back-end variant '%s' (supported variants: %s)" % (backend_variant,','.join(iscsi_supported_variants)))
# Execute requested action
if options.action == 'check':
debug(1,"Checking LUN existence...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.check()
elif options.action == 'create':
debug(1,"Creating LUN...")
lun = LUN(args[0],size=args[1],proxy=iscsi_proxy)
status = lun.create()
elif options.action == 'delete':
debug(1,"Deleting LUN...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.delete()
elif options.action == 'rebase':
debug(1,"Rebasing LUN...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.rebase()
elif options.action == 'snapshot':
debug(1,"Doing a LUN snapshot...")
lun = LUN(args[1],size=args[2],proxy=iscsi_proxy)
snapshot_lun = LUN(args[0],proxy=iscsi_proxy)
# Only the last error is returned
status = lun.snapshot(snapshot_lun)
status = snapshot_lun.map()
elif options.action == 'map':
debug(1,"Mapping LUN...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.map()
elif options.action == 'unmap':
debug(1,"Unmapping LUN...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.unmap()
elif options.action == 'getturl' :
debug(1,"Returning Transport URL...")
lun = LUN(args[0], proxy=iscsi_proxy)
turl = lun.getTurl()
if turl == "":
status = 1
else:
status = 0
print turl
else:
abort ("Internal error: unimplemented action (%s)" % (options.action))
sys.exit(status)
remove double definition of dmremove command
#!/usr/bin/env python
"""
Created as part of the StratusLab project (http://stratuslab.eu),
co-funded by the European Commission under the Grant Agreement
INSFO-RI-261552.
Copyright (c) 2011, Centre National de la Recherche Scientifique (CNRS)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Script used by StratusLab pdisk to manage iSCSI LUNs
"""
import sys
import os
import os.path
import re
from subprocess import *
import StringIO
from optparse import OptionParser
import logging
import logging.handlers
import syslog
import ConfigParser
import uuid
# Initializations
verbosity = 0
logger = None
action_default = ''
status = 0 # Assume success
# Supported iSCSI proxy variants
iscsi_supported_variants = [ 'lvm', 'netapp' ]
# Keys are supported actions, values are the number of arguments required for the each action
valid_actions = { 'check':1, 'create':2, 'delete':1, 'rebase':1, 'snapshot':3 , 'getturl':1 , 'map':1 , 'unmap':1}
valid_actions_str = ', '.join(valid_actions.keys())
config_file_default = '/opt/stratuslab/etc/persistent-disk-backend.conf'
config_main_section = 'main'
config_defaults = StringIO.StringIO("""
# Options commented out are configuration options available for which no
# sensible default value can be defined.
[main]
# Define the list of iSCSI proxies that can be used.
# One section per proxy must also exists to define parameters specific to the proxy.
#iscsi_proxies=netapp.example.org
# Log file for persistent disk management
log_file=/var/log/stratuslab-persistent-disk.log
# User name to use to connect the filer (may also be defined in the filer section)
mgt_user_name=root
# SSH private key to use for 'mgt_user_name' authorisation
#mgt_user_private_key=/some/file.rsa
#[netapp.example.org]
# iSCSI back-end type (case insensitive)
#type=NetApp
# Initiator group the LUN must be mapped to
#initiator_group = linux_servers
# Name appended to the volume name to build the LUN path (a / will be appended)
#lun_namespace=stratuslab
# Volume name where LUNs will be created
#volume_name = /vol/iscsi
# Name prefix to use to build the volume snapshot used as a LUN clone snapshot parent
# (a _ will be appended)
#volume_snapshot_prefix=pdisk_clone
#[lvm.example.org]
# iSCSI back-end type (case insensitive)
#type=LVM
# LVM volume group to use for creating LUNs
#volume_name = /dev/iscsi.01
""")
####################################################################
# Superclass describing common aspect of every iSCSI backends #
# Inherited by all iSCSI backend classes. #
# Variable required in all backend implementationss are documented #
# here but generally have empty defaults. #
####################################################################
class Backend:
# Command prefix to use to connect through ssh
ssh_cmd_prefix = [ 'ssh', '-x', '-i', '%%PRIVKEY%%','%%ISCSI_PROXY%%' ]
# Table defining mapping of LUN actions to NetApp actions.
# This is a 1 to n mapping (several NetApp commands may be needed for one LUN action).
# map and unmap are necessary as separate actions as they are not necessarily executed on
# the same LUN as the other operations (eg. snapshot action).
# Special values for commands are:
# - None: action is not implemented
# - empty list: action does nothing
lun_backend_cmd_mapping = {'check':None,
'create':None,
'delete':None,
'map':None,
'rebase':None,
'size':None,
'snapshot':None,
'unmap':None,
'getturl':None,
}
# Definitions of NetApp commands used to implement actions.
backend_cmds = {
}
# Commands to execute when a given backend command fails.
# This is a dictionnary where key is one of the backend_cmds key and the value is another dictionnary
# whose key is the name of lun action (as defined in lun_backend_cmd_mapping) that defines the
# context of the backend command and the value is the key of another command in backend_cmds).
# IF the context is '-', the alternate command will be executed in any context (lun actions) in case of errors.
# If the value (alternate command) is an empty string, further backend commands part of the same LUN action are skipped.
# If it is None,processing of further actions contnues as if there was no entry for the command in the dictionnary.
# IF a backup command fails and has no entry in this dictionnary, the execution continues
# with next command if any.
backend_failure_cmds = {
}
# Most commands are expected to return nothing when they succeeded. The following
# dictionnary lists exceptions and provides a pattern matching output in case of
# success.
# Keys must match an existing key in backend_cmds
success_msg_pattern = {
}
# The creation of a new LUN may be required by some operations
# on some backends (e.g. rebase with LVM backend).
# This dictionnary allows to define which LUN actions (same keys
# as in lun_backend_cmd_mapping, value ignored).
# By default, this variable is empty: redefine it appropriately in
# the context of a particular backend if needed.
new_lun_required = {
}
# Generator function returning:
# - the command corresponding to the action as a list of tokens, with iSCSI proxy related
# variables parsed.
# - the expected message patterns in case of success if the command output is not empty. This is returned as
# a list of patterns (a simple string is converted to a list).
# This function must be called from an iteration loop control statement
def getCmd(self,lun_action):
if lun_action in self.lun_backend_cmd_mapping:
backend_actions = self.lun_backend_cmd_mapping[lun_action]
else:
abort("Internal error: LUN action '%s' unknown" % (lun_action))
# If None, means that the action is not implemented
if backend_actions == None:
yield backend_actions,None,None
# Intialize parsed_command and success_patters in case backend_actions is an empty list
parsed_command = []
success_patterns = None
failure_command = None
for action in backend_actions:
if action in self.backend_cmds.keys():
parsed_command = self.parse(self.backend_cmds[action])
else:
abort("Internal error: action '%s' unknown" % (action))
if action in self.success_msg_pattern:
success_patterns = self.success_msg_pattern[action]
if isinstance(success_patterns,str):
success_patterns = [ success_patterns ]
else:
success_patterns = None
if action in self.backend_failure_cmds.keys():
failure_actions = self.backend_failure_cmds[action]
if lun_action in failure_actions:
command = failure_actions[lun_action]
# '-' is a special key value meaning the alternate command applies to all LN actions
elif '-' in failure_actions:
command = failure_actions['-']
else:
command = None
if command:
failure_command = self.parse(command)
yield parsed_command,success_patterns,failure_command
# Method returning true if creation of a new LUN is required for a particular LUN action.
# LUN creation is the responsibility of the caller.
def newLunRequired(self,action):
if action in self.new_lun_required:
return True
else:
return False
############################################
# Class describing a NetApp iSCSI back-end #
############################################
class NetAppBackend(Backend):
# The following variables define which command to execute for each action.
# They are documented in the superclass Backend.
lun_backend_cmd_mapping = {'check':['check'],
'create':['create'],
# Attemtp to delete volume snapshot associated with the LUN if it is no longer used (no more LUN clone exists)
'delete':['delete','snapdel'],
'map':['map'],
'rebase':[],
'size':None,
'snapshot':['snapshot','clone'],
'unmap':['unmap'],
'getturl':[],
}
backend_cmds = {'check':[ 'lun', 'show', '%%NAME%%' ],
'clone':[ 'lun', 'clone', 'create', '%%SNAP_NAME%%', '-b', '%%NAME%%', '%%SNAP_PARENT%%' ],
'create':[ 'lun', 'create', '-s', '%%SIZE%%g', '-t', '%%LUNOS%%', '%%NAME%%' ],
'delete':[ 'lun', 'destroy', '%%NAME%%' ],
'map':[ 'lun', 'map', '-f', '%%NAME%%', '%%INITIATORGRP%%' ],
'snapdel':[ 'snap', 'delete', '%%VOLUME_NAME%%', '%%SNAP_PARENT%%' ],
'snapshot':[ 'snap', 'create', '%%VOLUME_NAME%%', '%%SNAP_PARENT%%' ],
'unmap':[ 'lun', 'unmap', '%%NAME%%', '%%INITIATORGRP%%' ]
}
backend_failure_cmds = {
}
success_msg_pattern = { 'check':'online',
# snapdel is expected to fail if there is still a LUN clone using it or if the snapshot doesnt exist
# (LUN never cloned or is a clone). These are not considered as an error.
'snapdel':[ 'deleting snapshot\.\.\.', 'Snapshot [\w\-]+ is busy because of LUN clone','No such snapshot' ],
'snapshot':['^creating snapshot','^Snapshot already exists.']
}
# Would be great to have it configurable as NetApp needs to know the client OS
lunOS = 'linux'
def __init__(self,proxy,mgtUser,mgtPrivKey,volume,namespace,initiatorGroup,snapshotPrefix):
self.proxyHost = proxy
self.mgtUser = mgtUser
self.mgtPrivKey = mgtPrivKey
self.volumePath = volume
self.volumeName = volume.split('/')[-1]
self.namespace = "%s/%s" % (self.volumePath,namespace)
self.initiatorGroup = initiatorGroup
self.snapshotPrefix = snapshotPrefix
# Command to connect to NetApp filer (always ssh)
self.cmd_prefix = self.ssh_cmd_prefix
# Add command prefix and parse all variables related to iSCSI proxy in the command (passed as a list of tokens).
# Return parsed command as a list of token.
def parse(self,command):
# Build command to execute
action_cmd = []
action_cmd.extend(self.cmd_prefix)
action_cmd.extend(command)
for i in range(len(action_cmd)):
if action_cmd[i] == '%%INITIATORGRP%%':
action_cmd[i] = self.initiatorGroup
elif action_cmd[i] == '%%LUNOS%%':
action_cmd[i] = self.lunOS
elif action_cmd[i] == '%%PRIVKEY%%':
action_cmd[i] = self.mgtPrivKey
elif action_cmd[i] == '%%ISCSI_PROXY%%':
action_cmd[i] = "%s@%s" % (self.mgtUser,self.proxyHost)
elif action_cmd[i] == '%%SNAP_PARENT%%':
action_cmd[i] = self.snapshotPrefix + '_%%UUID%%'
elif action_cmd[i] == '%%NAME%%':
action_cmd[i] = self.namespace + "/%%UUID%%"
elif action_cmd[i] == '%%SNAP_NAME%%':
action_cmd[i] = self.namespace + "/%%SNAP_UUID%%"
elif action_cmd[i] == '%%VOLUME_NAME%%':
action_cmd[i] = self.volumeName
return action_cmd
# Return iSCSI back-end type
def getType(self):
return 'NetApp'
####################################
# Class describing a File back-end #
####################################
class FileBackend(Backend):
# The following variables define which command to execute for each action.
# They are documented in the superclass Backend
lun_backend_cmd_mapping = {'check':['check'],
'create':['create','chown'],
'delete':['delete'],
'map':[],
'rebase':[],
'size':[],
'snapshot':['copy'],
'unmap':[],
'getturl':['getturl'],
}
backend_cmds = {'check':['/usr/bin/test','-f','%%LOGVOL_PATH%%'],
'create':['/bin/dd','if=/dev/zero','of=%%LOGVOL_PATH%%','bs=1024','count=%%SIZE%%M'],
'chown' :['/bin/chown','oneadmin:cloud','%%LOGVOL_PATH%%'],
'delete':['/bin/rm','-rf','%%LOGVOL_PATH%%'],
'copy':['/bin/cp','%%NEW_LOGVOL_PATH%%','%%LOGVOL_PATH%%'],
'getturl':['/bin/echo','file://%%LOGVOL_PATH%%'],
}
success_msg_pattern = {'create' : '.*',
'getturl' : '(.*://.*)',
}
def __init__(self,proxy,volume,mgtUser=None,mgtPrivKey=None):
self.volumeName = volume
self.proxyHost = proxy
self.mgtUser = mgtUser
self.mgtPrivKey = mgtPrivKey
if self.mgtUser and self.mgtPrivKey:
debug(1,'SSH will be used to connect to File backend')
self.cmd_prefix = self.ssh_cmd_prefix
else:
self.cmd_prefix = []
# Add command prefix and parse all variables related to iSCSI proxy in the command (passed as a list of tokens).
# Return parsed command as a list of token.
def parse(self,command):
# Build command to execute
action_cmd = []
action_cmd.extend(self.cmd_prefix)
action_cmd.extend(command)
for i in range(len(action_cmd)):
if action_cmd[i] == '%%ISCSI_PROXY%%':
action_cmd[i] = "%s@%s" % (self.mgtUser,self.proxyHost)
elif re.search('%%LOGVOL_PATH%%',action_cmd[i]):
action_cmd[i] = re.sub('%%LOGVOL_PATH%%',self.volumeName+"/%%UUID%%",action_cmd[i])
elif re.search('%%NEW_LOGVOL_PATH%%',action_cmd[i]):
action_cmd[i] = re.sub('%%NEW_LOGVOL_PATH%%',self.volumeName+"/%%SNAP_UUID%%",action_cmd[i])
elif action_cmd[i] == '%%PRIVKEY%%':
action_cmd[i] = self.mgtPrivKey
elif action_cmd[i] == '%%VOLUME_NAME%%':
action_cmd[i] = self.volumeName
return action_cmd
def getType(self):
return 'File'
#########################################
# Class describing a LVM iSCSI back-end #
#########################################
class LVMBackend (Backend):
# The following variables define which command to execute for each action.
# They are documented in the superclass Backend.
lun_backend_cmd_mapping = {'check':['check'],
'create':['create','add_device','reload_iscsi'],
'delete':['remove_device','reload_iscsi','wait2','dmremove','remove'],
# map is a required action for snapshot action but does nothing in LVM
'map':[],
'rebase':['rebase','add_device','reload_iscsi'],
'size':['size'],
'snapshot':['snapshot','add_device','reload_iscsi'],
'unmap':[],
'getturl' : ['getturl'],
}
backend_cmds = {'check':[ '/usr/bin/test', '-b', '%%LOGVOL_PATH%%' ],
'create':[ '/sbin/lvcreate', '-L', '%%SIZE%%G', '-n', '%%UUID%%', '%%VOLUME_NAME%%' ],
'add_device':[ '/bin/sed', '-i', '1i<target iqn.2011-01.eu.stratuslab:%%UUID%%> \\n backing-store %%LOGVOL_PATH%% \\n </target>','/etc/stratuslab/iscsi.conf' ],
'reload_iscsi': [ '/usr/sbin/tgt-admin','--update','iqn.2011-01.eu.stratuslab:%%UUID%%'],
'remove_device': [ '/bin/sed', '-i', '/<target iqn.2011-01.eu.stratuslab:.*%%UUID%%.*/,+2d', '/etc/stratuslab/iscsi.conf' ],
'dmremove':['/sbin/dmsetup','remove','%%LOGVOL_PATH%%'],
'wait2':['/bin/sleep','2'],
'remove':[ '/sbin/lvremove', '-f', '%%LOGVOL_PATH%%' ],
'rebase':[ '/bin/dd', 'if=%%LOGVOL_PATH%%', 'of=%%NEW_LOGVOL_PATH%%'],
# lvchange doesn't work with clone. Normally unneeded as lvremove -f (remove) does the same
'setinactive':[ '/sbin/lvchange', '-a', 'n', '%%LOGVOL_PATH%%' ],
'size':['/sbin/lvs', '-o', 'lv_size', '--noheadings', '%%LOGVOL_PATH%%'],
'snapshot':[ '/sbin/lvcreate', '--snapshot', '-p', 'rw', '--size', '%%SIZE%%G', '-n', '%%UUID%%', '%%NEW_LOGVOL_PATH%%' ],
'getturl' : ['/bin/echo', 'iscsi://%%PORTAL%%/iqn.2011-01.eu.stratuslab:%%UUID%%:1' ],
}
backend_failure_cmds = {'remove': {'add_device' : [ 'sed', '-i', '1i<target iqn.2011-01.eu.stratuslab:%%UUID%%> \\n backing-store %%LOGVOL_PATH%% \\n </target>','/etc/stratuslab/iscsi.conf' ]}
#{'delete' : 'add_device'}
}
success_msg_pattern = {'create':'Logical volume "[\w\-]+" created',
'remove':'Logical volume "[\w\-]+" successfully removed',
'rebase':'\d+ bytes .* copied',
'setinactive':[ '^$', 'File descriptor .* leaked on lvchange invocation' ],
'size':['([\d\.]+)g'],
'reload_iscsi':'.*',
'snapshot':'Logical volume "[\w\-]+" created',
'getturl' : '(.*://.*/.*)'
}
new_lun_required = {'rebase':True
}
def __init__(self,proxy,volume,mgtUser=None,mgtPrivKey=None):
self.volumeName = volume
self.proxyHost = proxy
self.mgtUser = mgtUser
self.mgtPrivKey = mgtPrivKey
if self.mgtUser and self.mgtPrivKey:
debug(1,'SSH will be used to connect to LVM backend')
self.cmd_prefix = self.ssh_cmd_prefix
else:
self.cmd_prefix = [ ]
# Add command prefix and parse all variables related to iSCSI proxy in the command (passed as a list of tokens).
# Return parsed command as a list of token.
def parse(self,command):
# Build command to execute
action_cmd = []
action_cmd.extend(self.cmd_prefix)
action_cmd.extend(command)
for i in range(len(action_cmd)):
if action_cmd[i] == '%%ISCSI_PROXY%%':
action_cmd[i] = "%s@%s" % (self.mgtUser,self.proxyHost)
elif re.search('%%LOGVOL_PATH%%',action_cmd[i]):
action_cmd[i] = re.sub('%%LOGVOL_PATH%%',self.volumeName+"/%%UUID%%",action_cmd[i])
elif re.search('%%NEW_LOGVOL_PATH%%',action_cmd[i]):
action_cmd[i] = re.sub('%%NEW_LOGVOL_PATH%%',self.volumeName+"/%%SNAP_UUID%%",action_cmd[i])
elif action_cmd[i] == '%%PRIVKEY%%':
action_cmd[i] = self.mgtPrivKey
elif action_cmd[i] == '%%VOLUME_NAME%%':
action_cmd[i] = self.volumeName
elif re.search('%%PORTAL%%',action_cmd[i]):
action_cmd[i] = re.sub('%%PORTAL%%',self.proxyHost+":3260",action_cmd[i])
return action_cmd
# Return iSCSI back-end type
def getType(self):
return 'LVM'
#################################################################
# Class describing a LUN and implementing the supported actions #
#################################################################
class LUN:
# Some LUN commands (e.g. rebase) needs to return information as a string on stdout
# that will be captured by pdisk. The string is defined using the same tokens as commands.
# By default, a command returns nothing on stdout.
# This information is returned only on successful execution of action.
action_output = {'rebase':'%%SNAP_UUID%%',
}
def __init__(self,uuid,size=None,proxy=None):
self.uuid = uuid
self.size = size
self.proxy = proxy
# Another LUN involved in actions like rebase or snapshot
self.associatedLUN = None
def getUuid(self):
return self.uuid
def check(self):
status,optInfo = self.__executeAction__('check')
return status
def create(self):
status,optInfo = self.__executeAction__('create')
return status
def delete(self):
status,optInfo = self.__executeAction__('delete')
return status
def getSize(self):
status,self.size = self.__executeAction__('size')
if status != 0:
abort('Failure to retrieve size of LUN %s' % (self.uuid))
return status
def getTurl(self):
status,self.turl = self.__executeAction__('getturl')
if status != 0:
abort('Failure to retrieve Transport URL of %s' % (self.uuid))
return self.turl
def map(self):
status,optInfo = self.__executeAction__('map')
return status
def rebase(self):
if self.proxy.newLunRequired('rebase'):
#TODO: generate a UUID based on creation timestamp as in PDisk
new_lun_uuid = str(uuid.uuid4())
self.getSize()
self.associatedLUN = LUN(new_lun_uuid,size=self.size,proxy=self.proxy)
if self.associatedLUN.create() != 0:
abort('An error occured creating a new LUN for rebasing %s' % (self.uuid))
else:
self.associatedLUN = self # To simplify returned value
status,optInfo = self.__executeAction__('rebase')
return status
def snapshot(self,snapshot_lun):
self.associatedLUN = snapshot_lun
status,optInfo = self.__executeAction__('snapshot')
return status
def unmap(self):
status,optInfo = self.__executeAction__('unmap')
return status
# Execute an action on a LUN.
# An action may involve several actual commands : getCmd() method of proxy is a generator returning
# the commands to execute one by one.
# In case an error occurs during one command, try to continue...
# Return the status of the last command executed and an optional additional value returned by the command.
# Optionally a string is printed on stdout to allow the script to return information to the caller.
# Special values for commands are:
# - None: action is not implemented
# - empty list: action does nothing
def __executeAction__(self,action):
status = 0 # Assume success
optInfo = None
for cmd_toks,successMsg,failure_cmd_toks in self.proxy.getCmd(action):
# When returned command for action is None, it means that the action is not implemented
if cmd_toks == None:
abort("Action '%s' not implemented by back-end type '%s'" % (action,self.proxy.getType()))
command = Command(action,self.parseCmd(cmd_toks),successMsg)
command.execute()
status,optInfo = command.checkStatus()
if status != 0 and failure_cmd_toks:
command = Command(action,self.parseCmd(failure_cmd_toks),successMsg)
command.execute()
status_,optInfo_ = command.checkStatus()
if status_ != 0:
print "Rollback command",failure_cmd_toks,"failed:", optInfo_
break
# If failure_cmd_toks is an amtpy string, stop LUN action processing
elif failure_cmd_toks == '':
break
if status == 0 and action in self.action_output:
print self.__parse__(self.action_output[action])
return status,optInfo
# Parse all variables related to current LUN in the command (passed and returned as a list of tokens).
def parseCmd(self,action_cmd):
for i in range(len(action_cmd)):
action_cmd[i] = self.__parse__(action_cmd[i])
return action_cmd
# Do the actual string parsing
def __parse__(self,string):
if re.search('%%SIZE%%',string):
string = re.sub('%%SIZE%%',self.size,string)
elif re.search('%%UUID%%',string):
string = re.sub('%%UUID%%',self.getUuid(),string)
elif re.search('%%SNAP_UUID%%',string):
string = re.sub('%%SNAP_UUID%%',self.associatedLUN.getUuid(),string)
return string
#######################################################
# Class representing a command passed to the back-end #
#######################################################
class Command:
cmd_output_start = '<<<<<<<<<<'
cmd_output_end = '>>>>>>>>>>'
def __init__(self,action,cmd,successMsgs=None):
self.action = action
self.action_cmd = cmd
self.successMsgs = successMsgs
self.proc = None
def execute(self):
status = 0
# Execute command: NetApp command don't return an exit code. When a command is sucessful,
# its output is empty.
#action_cmd = 'echo ' + self.action_cmd
debug(1,"Executing command: '%s'" % (' '.join(self.action_cmd)))
try:
self.proc = Popen(self.action_cmd, shell=False, stdout=PIPE, stderr=STDOUT)
except OSError, details:
abort('Failed to execute %s action: %s' % (self.action,details))
status = 1
return status
def checkStatus(self):
optInfo = None
try:
retcode = self.proc.wait()
output = self.proc.communicate()[0]
if retcode != 0:
abort('An error occured during %s action (error=%s). Command output:\n%s\n%s\n%s' % (self.action,retcode,self.cmd_output_start,output,self.cmd_output_end))
else:
# Need to check if the command is expected to return an output when successfull
success = False
if self.successMsgs:
for successPattern in self.successMsgs:
output_regexp = re.compile(successPattern)
matcher = output_regexp.search(output)
if matcher:
# Return only the first capturing group
if output_regexp.groups > 0:
optInfo = matcher.group(1)
success = True
break
else:
if len(output) == 0:
success = True
if success:
debug(1,'%s action completed successfully.' % (self.action))
if len(output) > 0:
debug(2,'Command output:\n%s\n%s\n%s' % (self.cmd_output_start,output,self.cmd_output_end))
else:
retcode = -1
debug(0,'An error occured during %s action. Command output:\n%s\n%s\n%s' % (self.action,self.cmd_output_start,output,self.cmd_output_end))
except OSError, details:
abort('Failed to execute %s action: %s' % (self.action,details))
return retcode,optInfo
###############################
# Functions to handle logging #
###############################
def abort(msg):
logger.error("Persistent disk operation failed:\n%s" % (msg))
sys.exit(2)
def debug(level,msg):
if level <= verbosity:
if level == 0:
logger.info(msg)
else:
logger.debug(msg)
#############
# Main code #
#############
# Configure loggers and handlers.
# Initially configure only syslog and stderr handler.
logging_source = 'stratuslab-pdisk'
logger = logging.getLogger(logging_source)
logger.setLevel(logging.DEBUG)
#fmt=logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fmt=logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
# Handler used to report to SVN must display only the message to allow proper XML formatting
svn_fmt=logging.Formatter("%(message)s")
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
# Parse configuration and options
usage_text = """usage: %prog [options] action_parameters
Parameters:
action=check: LUN_UUID
action=create: LUN_UUID LUN_Size
action=delete: LUN_UUID
action=rebase: LUN_UUID (will return the rebased LUN UUID on stdout)
action=snapshot: LUN_UUID New_LUN_UUID Snapshot_Size
action=getturl: LUN_UUID
action=map: LUN_UUID
action=unmap: LUN_UUID
"""
parser = OptionParser(usage=usage_text)
parser.add_option('--config', dest='config_file', action='store', default=config_file_default, help='Name of the configuration file to use (D: %s)' % (config_file_default))
parser.add_option('--action', dest='action', action='store', default=action_default, help='Action to execute. Valid actions: %s'%(valid_actions_str))
parser.add_option('-v', '--debug', '--verbose', dest='verbosity', action='count', default=0, help='Increase verbosity level for debugging (multiple allowed)')
parser.add_option('--version', dest='version', action='store_true', default=False, help='Display various information about this script')
options, args = parser.parse_args()
if options.version:
debug (0,"Version %s written by %s" % (__version__,__author__))
debug (0,__doc__)
sys.exit(0)
if options.verbosity:
verbosity = options.verbosity
if options.action in valid_actions:
if len(args) < valid_actions[options.action]:
debug(0,"Insufficient argument provided (%d required)" % (valid_actions[options.action]))
parser.print_help()
abort("")
else:
if options.action:
debug(0,"Invalid action requested (%s)\n" % (options.action))
else:
debug(0,"No action specified\n")
parser.print_help()
abort("")
# Read configuration file.
# The file must exists as there is no sensible default value for several options.
config = ConfigParser.ConfigParser()
config.readfp(config_defaults)
try:
config.readfp(open(options.config_file))
except IOError, (errno,errmsg):
if errno == 2:
abort('Configuration file (%s) is missing.' % (options.config_file))
else:
abort('Error opening configuration file (%s): %s (errno=%s)' % (options.config_file,errmsg,errno))
logfile_handler = None
try:
log_file = config.get(config_main_section,'log_file')
if log_file:
logfile_handler = logging.handlers.RotatingFileHandler(log_file,'a',100000,10)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(fmt)
logger.addHandler(logfile_handler)
except ValueError:
abort("Invalid value specified for 'log_file' (section %s)" % (config_main_section))
if logfile_handler == None or not log_file:
# Use standard log destination in case a log file is not defined
syslog_handler = logging.handlers.SysLogHandler('/dev/log')
syslog_handler.setLevel(logging.WARNING)
logger.addHandler(syslog_handler)
try:
iscsi_proxies_list = config.get(config_main_section,'iscsi_proxies')
iscsi_proxies = iscsi_proxies_list.split(',')
iscsi_proxy_name = iscsi_proxies[0]
except ValueError:
abort("Invalid value specified for 'iscsi_proxies' (section %s) (must be a comma-separated list)" % (config_main_section))
try:
backend_variant=config.get(iscsi_proxy_name,'type')
except:
abort("Section '%s' or required attribute 'type' missing" % (iscsi_proxy_name))
# NetApp back-end configuration
if backend_variant.lower() == 'netapp':
# Retrieve NetApp back-end mandatory attributes.
# Mandatory attributes should be defined as keys of backend_attributes with an arbitrary value.
# Key name must match the attribute name in the configuration file.
backend_attributes = {'initiator_group':'',
'lun_namespace':'',
'volume_name':'',
'volume_snapshot_prefix':''
}
try:
for attribute in backend_attributes.keys():
backend_attributes[attribute]=config.get(iscsi_proxy_name,attribute)
except:
abort("Section '%s' or required attribute '%s' missing" % (iscsi_proxy_name,attribute))
try:
backend_attributes['mgt_user_name']=config.get(iscsi_proxy_name,'mgt_user_name')
except:
try:
backend_attributes['mgt_user_name']=config.get(config_main_section,'mgt_user_name')
except:
abort("User name to use for connecting to iSCSI proxy undefined")
try:
backend_attributes['mgt_user_private_key']=config.get(iscsi_proxy_name,'mgt_user_private_key')
except:
try:
backend_attributes['mgt_user_private_key']=config.get(config_main_section,'mgt_user_private_key')
except:
abort("SSH private key to use for connecting to iSCSI proxy undefined")
# Create iSCSI back-end object
iscsi_proxy = NetAppBackend(iscsi_proxy_name,
backend_attributes['mgt_user_name'],
backend_attributes['mgt_user_private_key'],
backend_attributes['volume_name'],
backend_attributes['lun_namespace'],
backend_attributes['initiator_group'],
backend_attributes['volume_snapshot_prefix']
)
# LVM back-end configuration
elif backend_variant.lower() == 'lvm':
# Retrieve NetApp back-end mandatory attributes.
# Mandatory attributes should be defined as keys of backend_attributes with an arbitrary value.
# Key name must match the attribute name in the configuration file.
backend_attributes = {'volume_name':'',
}
try:
for attribute in backend_attributes.keys():
backend_attributes[attribute]=config.get(iscsi_proxy_name,attribute)
except:
abort("Section '%s' or required attribute '%s' missing" % (iscsi_proxy_name,attribute))
# 'local' is a reserved name to designate the local machine: in this case, don't use ssh to
# connect to backend
if iscsi_proxy_name == 'local':
backend_attributes['mgt_user_name'] = None
backend_attributes['mgt_user_private_key'] = None
else:
try:
backend_attributes['mgt_user_name']=config.get(iscsi_proxy_name,'mgt_user_name')
except:
try:
backend_attributes['mgt_user_name']=config.get(config_main_section,'mgt_user_name')
except:
abort("User name to use for connecting to iSCSI proxy undefined")
try:
backend_attributes['mgt_user_private_key']=config.get(iscsi_proxy_name,'mgt_user_private_key')
except:
try:
backend_attributes['mgt_user_private_key']=config.get(config_main_section,'mgt_user_private_key')
except:
abort("SSH private key to use for connecting to iSCSI proxy undefined")
# Create iSCSI back-end object
iscsi_proxy = LVMBackend(iscsi_proxy_name,
backend_attributes['volume_name'],
backend_attributes['mgt_user_name'],
backend_attributes['mgt_user_private_key'],
)
elif backend_variant.lower() == 'file':
backend_attributes = {'volume_name':'',
}
try:
for attribute in backend_attributes.keys():
backend_attributes[attribute]=config.get(iscsi_proxy_name,attribute)
except:
abort("Section '%s' or required attribute '%s' missing" % (iscsi_proxy_name,attribute))
if iscsi_proxy_name == 'local':
backend_attributes['mgt_user_name'] = None
backend_attributes['mgt_user_private_key'] = None
else:
try:
backend_attributes['mgt_user_name']=config.get(iscsi_proxy_name,'mgt_user_name')
except:
try:
backend_attributes['mgt_user_name']=config.get(config_main_section,'mgt_user_name')
except:
abort("User name to use for connecting to iSCSI proxy undefined")
try:
backend_attributes['mgt_user_private_key']=config.get(iscsi_proxy_name,'mgt_user_private_key')
except:
try:
backend_attributes['mgt_user_private_key']=config.get(config_main_section,'mgt_user_private_key')
except:
abort("SSH private key to use for connecting to iSCSI proxy undefined")
# Create iSCSI back-end object
iscsi_proxy = FileBackend(iscsi_proxy_name,
backend_attributes['volume_name'],
backend_attributes['mgt_user_name'],
backend_attributes['mgt_user_private_key'],
)
# Abort if iSCSI back-end variant specified is not supported
else:
abort("Unsupported iSCSI back-end variant '%s' (supported variants: %s)" % (backend_variant,','.join(iscsi_supported_variants)))
# Execute requested action
if options.action == 'check':
debug(1,"Checking LUN existence...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.check()
elif options.action == 'create':
debug(1,"Creating LUN...")
lun = LUN(args[0],size=args[1],proxy=iscsi_proxy)
status = lun.create()
elif options.action == 'delete':
debug(1,"Deleting LUN...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.delete()
elif options.action == 'rebase':
debug(1,"Rebasing LUN...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.rebase()
elif options.action == 'snapshot':
debug(1,"Doing a LUN snapshot...")
lun = LUN(args[1],size=args[2],proxy=iscsi_proxy)
snapshot_lun = LUN(args[0],proxy=iscsi_proxy)
# Only the last error is returned
status = lun.snapshot(snapshot_lun)
status = snapshot_lun.map()
elif options.action == 'map':
debug(1,"Mapping LUN...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.map()
elif options.action == 'unmap':
debug(1,"Unmapping LUN...")
lun = LUN(args[0],proxy=iscsi_proxy)
status = lun.unmap()
elif options.action == 'getturl' :
debug(1,"Returning Transport URL...")
lun = LUN(args[0], proxy=iscsi_proxy)
turl = lun.getTurl()
if turl == "":
status = 1
else:
status = 0
print turl
else:
abort ("Internal error: unimplemented action (%s)" % (options.action))
sys.exit(status)
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contexts and constructors for integration testing."""
import asyncio
import contextlib
import functools
from typing import Sequence
from absl.testing import parameterized
import grpc
import portpicker
import tensorflow_federated as tff
from tensorflow_federated.python.tests import remote_runtime_test_utils
WORKER_PORTS = [portpicker.pick_unused_port() for _ in range(2)]
AGGREGATOR_PORTS = [portpicker.pick_unused_port() for _ in range(2)]
_GRPC_MAX_MESSAGE_LENGTH_BYTES = 1024 * 1024 * 1024
_GRPC_CHANNEL_OPTIONS = [
('grpc.max_message_length', _GRPC_MAX_MESSAGE_LENGTH_BYTES),
('grpc.max_receive_message_length', _GRPC_MAX_MESSAGE_LENGTH_BYTES),
('grpc.max_send_message_length', _GRPC_MAX_MESSAGE_LENGTH_BYTES)
]
def create_native_local_caching_context():
local_ex_factory = tff.framework.local_executor_factory()
def _wrap_local_executor_with_caching(cardinalities):
local_ex = local_ex_factory.create_executor(cardinalities)
return tff.framework.CachingExecutor(local_ex)
return tff.framework.ExecutionContext(
tff.framework.ResourceManagingExecutorFactory(
_wrap_local_executor_with_caching))
def _get_remote_executors_for_ports(ports):
executors = []
for port in ports:
server_endpoint = f'[::]:{port}'
channel = grpc.insecure_channel(
server_endpoint, options=_GRPC_CHANNEL_OPTIONS)
executors.append(tff.framework.RemoteExecutor(channel=channel))
return executors
def create_localhost_remote_tf_context(
tf_serving_ports: Sequence[str]) -> tff.framework.ExecutionContext:
"""Creates an execution context which pushes TensorFlow to remote workers."""
remote_executors = _get_remote_executors_for_ports(tf_serving_ports)
workers = [
tff.framework.ThreadDelegatingExecutor(ex) for ex in remote_executors
]
def _stack_fn(cardinalities):
event_loop = asyncio.new_event_loop()
for ex in remote_executors:
# Configure each remote worker to have a single client.
event_loop.run_until_complete(ex.set_cardinalities({tff.CLIENTS: 1}))
if cardinalities.get(tff.CLIENTS) is not None and cardinalities[
tff.CLIENTS] > len(remote_executors):
raise ValueError(
'Requested {} clients but this stack can only support at most {}.'
.format(cardinalities.get(tff.CLIENTS), len(remote_executors)))
if cardinalities.get(tff.CLIENTS) is None:
requested_workers = workers
else:
requested_workers = workers[:cardinalities[tff.CLIENTS]]
federating_strategy_factory = tff.framework.FederatedResolvingStrategy.factory(
{
tff.CLIENTS: requested_workers,
tff.SERVER: tff.framework.EagerTFExecutor()
})
fed_ex = tff.framework.FederatingExecutor(federating_strategy_factory,
tff.framework.EagerTFExecutor())
top_rre = tff.framework.ReferenceResolvingExecutor(fed_ex)
return top_rre
ex_factory = tff.framework.ResourceManagingExecutorFactory(
_stack_fn, ensure_closed=remote_executors)
# When the RRE goes in we wont need this anymore
compiler_fn = tff.backends.native.transform_mathematical_functions_to_tensorflow
return tff.framework.ExecutionContext(
executor_fn=ex_factory, compiler_fn=compiler_fn)
def _get_all_contexts():
# pyformat: disable
return [
('native_local', tff.backends.native.create_local_execution_context()),
('native_local_caching', create_native_local_caching_context()),
('native_remote',
remote_runtime_test_utils.create_localhost_remote_context(WORKER_PORTS),
remote_runtime_test_utils.create_inprocess_worker_contexts(WORKER_PORTS)),
('native_remote_intermediate_aggregator',
remote_runtime_test_utils.create_localhost_remote_context(AGGREGATOR_PORTS),
remote_runtime_test_utils.create_inprocess_aggregator_contexts(WORKER_PORTS, AGGREGATOR_PORTS)),
('native_sizing', tff.backends.native.create_sizing_execution_context()),
('native_thread_debug',
tff.backends.native.create_thread_debugging_execution_context()),
('reference', tff.backends.reference.create_reference_context()),
('test', tff.backends.test.create_test_execution_context()),
]
# pyformat: enable
def with_context(context):
"""A decorator for running tests in the given `context`."""
def decorator_context(fn):
@functools.wraps(fn)
def wrapper_context(self):
context_stack = tff.framework.get_context_stack()
with context_stack.install(context):
fn(self)
return wrapper_context
return decorator_context
def with_environment(server_contexts):
"""A decorator for running tests in an environment."""
def decorator_environment(fn):
@functools.wraps(fn)
def wrapper_environment(self):
with contextlib.ExitStack() as stack:
for server_context in server_contexts:
stack.enter_context(server_context)
fn(self)
return wrapper_environment
return decorator_environment
def with_contexts(*args):
"""A decorator for creating tests parameterized by context."""
def decorator_contexts(fn, *named_contexts):
if not named_contexts:
named_contexts = _get_all_contexts()
@parameterized.named_parameters(*named_contexts)
def wrapper_contexts(self, context, server_contexts=None):
with_context_decorator = with_context(context)
decorated_fn = with_context_decorator(fn)
if server_contexts is not None:
with_environment_decorator = with_environment(server_contexts)
decorated_fn = with_environment_decorator(decorated_fn)
decorated_fn(self)
return wrapper_contexts
if len(args) == 1 and callable(args[0]):
return decorator_contexts(args[0])
else:
return lambda fn: decorator_contexts(fn, *args)
Remove unused context-testing functions.
PiperOrigin-RevId: 366461834
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contexts and constructors for integration testing."""
import contextlib
import functools
from absl.testing import parameterized
import portpicker
import tensorflow_federated as tff
from tensorflow_federated.python.tests import remote_runtime_test_utils
WORKER_PORTS = [portpicker.pick_unused_port() for _ in range(2)]
AGGREGATOR_PORTS = [portpicker.pick_unused_port() for _ in range(2)]
def create_native_local_caching_context():
local_ex_factory = tff.framework.local_executor_factory()
def _wrap_local_executor_with_caching(cardinalities):
local_ex = local_ex_factory.create_executor(cardinalities)
return tff.framework.CachingExecutor(local_ex)
return tff.framework.ExecutionContext(
tff.framework.ResourceManagingExecutorFactory(
_wrap_local_executor_with_caching))
def _get_all_contexts():
# pyformat: disable
return [
('native_local', tff.backends.native.create_local_execution_context()),
('native_local_caching', create_native_local_caching_context()),
('native_remote',
remote_runtime_test_utils.create_localhost_remote_context(WORKER_PORTS),
remote_runtime_test_utils.create_inprocess_worker_contexts(WORKER_PORTS)),
('native_remote_intermediate_aggregator',
remote_runtime_test_utils.create_localhost_remote_context(AGGREGATOR_PORTS),
remote_runtime_test_utils.create_inprocess_aggregator_contexts(WORKER_PORTS, AGGREGATOR_PORTS)),
('native_sizing', tff.backends.native.create_sizing_execution_context()),
('native_thread_debug',
tff.backends.native.create_thread_debugging_execution_context()),
('reference', tff.backends.reference.create_reference_context()),
('test', tff.backends.test.create_test_execution_context()),
]
# pyformat: enable
def with_context(context):
"""A decorator for running tests in the given `context`."""
def decorator_context(fn):
@functools.wraps(fn)
def wrapper_context(self):
context_stack = tff.framework.get_context_stack()
with context_stack.install(context):
fn(self)
return wrapper_context
return decorator_context
def with_environment(server_contexts):
"""A decorator for running tests in an environment."""
def decorator_environment(fn):
@functools.wraps(fn)
def wrapper_environment(self):
with contextlib.ExitStack() as stack:
for server_context in server_contexts:
stack.enter_context(server_context)
fn(self)
return wrapper_environment
return decorator_environment
def with_contexts(*args):
"""A decorator for creating tests parameterized by context."""
def decorator_contexts(fn, *named_contexts):
if not named_contexts:
named_contexts = _get_all_contexts()
@parameterized.named_parameters(*named_contexts)
def wrapper_contexts(self, context, server_contexts=None):
with_context_decorator = with_context(context)
decorated_fn = with_context_decorator(fn)
if server_contexts is not None:
with_environment_decorator = with_environment(server_contexts)
decorated_fn = with_environment_decorator(decorated_fn)
decorated_fn(self)
return wrapper_contexts
if len(args) == 1 and callable(args[0]):
return decorator_contexts(args[0])
else:
return lambda fn: decorator_contexts(fn, *args)
|
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "c67d273ef0b41c041eb2f3e29be4f9b08326055e"
TFRT_SHA256 = "ec584108567e85cdfeb9208f0b4d22b9e1f63d7e8d87e1836f3b6af720d4c2d0"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
Update TFRT dependency to use revision
http://github.com/tensorflow/runtime/commit/73ef596c0dba3638242bcb57e895d4163e31da64.
PiperOrigin-RevId: 439429888
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "73ef596c0dba3638242bcb57e895d4163e31da64"
TFRT_SHA256 = "b27b63008b46d41f5dc320790c3ca122a03867fe39f3840deeae82ed45d3b3b7"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
|
##########################################################################
#
# Copyright (c) 2013-2015, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import imath
import inspect
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
import GafferOSL
import GafferOSLTest
class OSLImageTest( GafferImageTest.ImageTestCase ) :
representativeDeepImagePath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/representativeDeepImage.exr" )
def test( self ) :
for useClosure in [ False, True ]:
getRed = GafferOSL.OSLShader()
getRed.loadShader( "ImageProcessing/InChannel" )
getRed["parameters"]["channelName"].setValue( "R" )
getGreen = GafferOSL.OSLShader()
getGreen.loadShader( "ImageProcessing/InChannel" )
getGreen["parameters"]["channelName"].setValue( "G" )
getBlue = GafferOSL.OSLShader()
getBlue.loadShader( "ImageProcessing/InChannel" )
getBlue["parameters"]["channelName"].setValue( "B" )
floatToColor = GafferOSL.OSLShader()
floatToColor.loadShader( "Conversion/FloatToColor" )
floatToColor["parameters"]["r"].setInput( getBlue["out"]["channelValue"] )
floatToColor["parameters"]["g"].setInput( getGreen["out"]["channelValue"] )
floatToColor["parameters"]["b"].setInput( getRed["out"]["channelValue"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
shuffle = GafferImage.Shuffle()
shuffle["in"].setInput( reader["out"] )
shuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "channel" ) )
shuffle["channels"]["channel"]["out"].setValue( 'unchangedR' )
shuffle["channels"]["channel"]["in"].setValue( 'R' )
image = GafferOSL.OSLImage()
image["in"].setInput( shuffle["out"] )
# we haven't connected the shader yet, so the node should act as a pass through
self.assertEqual( GafferImage.ImageAlgo.image( image["out"] ), GafferImage.ImageAlgo.image( shuffle["out"] ) )
# that should all change when we hook up a shader
if useClosure:
outRGB = GafferOSL.OSLShader()
outRGB.loadShader( "ImageProcessing/OutLayer" )
outRGB["parameters"]["layerColor"].setInput( floatToColor["out"]["c"] )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outRGB["out"]["layer"] )
image["channels"].addChild( Gaffer.NameValuePlug( "", GafferOSL.ClosurePlug(), "testClosure" ) )
else:
image["channels"].addChild( Gaffer.NameValuePlug( "", imath.Color3f(), "testColor" ) )
cs = GafferTest.CapturingSlot( image.plugDirtiedSignal() )
def checkDirtiness( expected):
self.assertEqual( [ i[0].fullName() for i in cs ], [ "OSLImage." + i for i in expected ] )
del cs[:]
if useClosure:
image["channels"]["testClosure"]["value"].setInput( imageShader["out"]["out"] )
channelsDirtied = ["channels.testClosure.value", "channels.testClosure"]
else:
image["channels"]["testColor"]["value"].setInput( floatToColor["out"]["c"] )
channelsDirtied = [
"channels.testColor.value.r", "channels.testColor.value.g", "channels.testColor.value.b",
"channels.testColor.value", "channels.testColor"
]
checkDirtiness( channelsDirtied + [
"channels", "__shader", "__shading",
"__affectedChannels", "out.channelNames", "out.channelData", "out"
] )
inputImage = GafferImage.ImageAlgo.image( shuffle["out"] )
with Gaffer.ContextMonitor( image["__shading"] ) as monitor :
self.assertEqual( image["out"].channelNames(), IECore.StringVectorData( [ "A", "B", "G", "R", "unchangedR" ] ) )
# Evaluating channel names only requires evaluating the shading plug if we have a closure
self.assertEqual( monitor.combinedStatistics().numUniqueContexts(), 1 if useClosure else 0 )
# Channels we don't touch should be passed through unaltered
for channel, changed in [('B',True), ('G',True), ('R',True), ('A',False), ('unchangedR',False) ]:
self.assertEqual(
image["out"].channelDataHash( channel, imath.V2i( 0, 0 ) ) ==
shuffle["out"].channelDataHash( channel, imath.V2i( 0, 0 ) ),
not changed
)
image["out"].channelData( channel, imath.V2i( 0, 0 ) )
# Should only need one shading evaluate for all channels
self.assertEqual( monitor.combinedStatistics().numUniqueContexts(), 1 )
outputImage = GafferImage.ImageAlgo.image( image["out"] )
self.assertNotEqual( inputImage, outputImage )
self.assertEqual( outputImage["R"], inputImage["B"] )
self.assertEqual( outputImage["G"], inputImage["G"] )
self.assertEqual( outputImage["B"], inputImage["R"] )
# changes in the shader network should signal more dirtiness
getGreen["parameters"]["channelName"].setValue( "R" )
checkDirtiness( channelsDirtied + [
"channels", "__shader", "__shading",
"__affectedChannels", "out.channelNames", "out.channelData", "out"
] )
floatToColor["parameters"]["r"].setInput( getRed["out"]["channelValue"] )
checkDirtiness( channelsDirtied + [
"channels", "__shader", "__shading",
"__affectedChannels", "out.channelNames", "out.channelData", "out"
] )
inputImage = GafferImage.ImageAlgo.image( shuffle["out"] )
outputImage = GafferImage.ImageAlgo.image( image["out"] )
self.assertEqual( outputImage["R"], inputImage["R"] )
self.assertEqual( outputImage["G"], inputImage["R"] )
self.assertEqual( outputImage["B"], inputImage["R"] )
self.assertEqual( outputImage["A"], inputImage["A"] )
self.assertEqual( outputImage["unchangedR"], inputImage["unchangedR"] )
image["in"].setInput( None )
checkDirtiness( [
'in.viewNames', 'in.format', 'in.dataWindow', 'in.metadata', 'in.deep', 'in.sampleOffsets', 'in.channelNames', 'in.channelData', 'in',
'out.viewNames', '__shading', '__affectedChannels',
'out.channelNames', 'out.channelData', 'out.format', 'out.dataWindow', 'out.metadata', 'out.deep', 'out.sampleOffsets', 'out'
] )
image["defaultFormat"]["displayWindow"]["max"]["x"].setValue( 200 )
checkDirtiness( [
'defaultFormat.displayWindow.max.x', 'defaultFormat.displayWindow.max', 'defaultFormat.displayWindow', 'defaultFormat',
'__defaultIn.format', '__defaultIn.dataWindow', '__defaultIn', '__shading', '__affectedChannels',
'out.channelNames', 'out.channelData', 'out.format', 'out.dataWindow', 'out'
] )
constant = GafferImage.Constant()
image["in"].setInput( constant["out"] )
checkDirtiness( [
'in.viewNames', 'in.format', 'in.dataWindow', 'in.metadata', 'in.deep', 'in.sampleOffsets', 'in.channelNames', 'in.channelData', 'in',
'out.viewNames', '__shading', '__affectedChannels',
'out.channelNames', 'out.channelData', 'out.format', 'out.dataWindow', 'out.metadata', 'out.deep', 'out.sampleOffsets', 'out'
] )
image["in"].setInput( shuffle["out"] )
if useClosure:
outRGB["parameters"]["layerName"].setValue( "newLayer" )
else:
image["channels"][0]["name"].setValue( "newLayer" )
self.assertEqual( image["out"].channelNames(), IECore.StringVectorData(
[ "A", "B", "G", "R", "newLayer.B", "newLayer.G", "newLayer.R", "unchangedR" ]
) )
for channel in ['B', 'G', 'R', 'A', 'unchangedR' ]:
self.assertEqual(
image["out"].channelDataHash( channel, imath.V2i( 0, 0 ) ),
shuffle["out"].channelDataHash( channel, imath.V2i( 0, 0 ) )
)
self.assertEqual(
image["out"].channelData( channel, imath.V2i( 0, 0 ) ),
shuffle["out"].channelData( channel, imath.V2i( 0, 0 ) )
)
crop = GafferImage.Crop()
crop["area"].setValue( imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 0, 0 ) ) )
crop["in"].setInput( shuffle["out"] )
image["in"].setInput( crop["out"] )
if useClosure:
# When using closures, we can't find out about the new channels being added if the datawindow is
# empty
self.assertEqual( image["out"].channelNames(), IECore.StringVectorData(
[ "A", "B", "G", "R", "unchangedR" ]
) )
else:
self.assertEqual( image["out"].channelNames(), IECore.StringVectorData(
[ "A", "B", "G", "R", "newLayer.B", "newLayer.G", "newLayer.R", "unchangedR" ]
) )
def testAcceptsShaderSwitch( self ) :
script = Gaffer.ScriptNode()
script["image"] = GafferOSL.OSLImage()
script["switch"] = Gaffer.Switch()
script["switch"].setup( Gaffer.Plug() )
# We're testing a backwards compatibility special case that is
# only enabled when loading a script, hence the use of `execute()`.
script.execute( """script["image"]["shader"].setInput( script["switch"]["out"] )""" )
self.assertTrue( script["image"]["channels"]["legacyClosure"]["value"].getInput().isSame( script["switch"]["out"] ) )
def testAcceptsDot( self ) :
script = Gaffer.ScriptNode()
script["image"] = GafferOSL.OSLImage()
script["switch"] = Gaffer.Switch()
script["switch"].setup( Gaffer.Plug() )
script["dot"] = Gaffer.Dot()
script["dot"].setup( script["switch"]["out"] )
# We're testing a backwards compatibility special case that is
# only enabled when loading a script, hence the use of `execute()`.
script.execute( """script["image"]["shader"].setInput( script["dot"]["out"] )""" )
self.assertTrue( script["image"]["channels"]["legacyClosure"]["value"].getInput().isSame( script["dot"]["out"] ) )
def testChannelWithZeroValue( self ) :
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 0 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
image = GafferOSL.OSLImage()
image["in"].setInput( reader["out"] )
image["shader"].setInput( imageShader["out"]["out"] )
inputImage = GafferImage.ImageAlgo.image( reader["out"] )
outputImage = GafferImage.ImageAlgo.image( image["out"] )
self.assertEqual( outputImage["R"], IECore.FloatVectorData( [ 0 ] * inputImage["R"].size() ) )
self.assertEqual( outputImage["G"], inputImage["G"] )
self.assertEqual( outputImage["B"], inputImage["B"] )
def testPassThrough( self ) :
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 0 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
image = GafferOSL.OSLImage()
image["in"].setInput( reader["out"] )
image["shader"].setInput( imageShader["out"]["out"] )
self.assertEqual( image["out"]["format"].hash(), reader["out"]["format"].hash() )
self.assertEqual( image["out"]["dataWindow"].hash(), reader["out"]["dataWindow"].hash() )
self.assertEqual( image["out"]["metadata"].hash(), reader["out"]["metadata"].hash() )
self.assertEqual( image["out"]["format"].getValue(), reader["out"]["format"].getValue() )
self.assertEqual( image["out"]["dataWindow"].getValue(), reader["out"]["dataWindow"].getValue() )
self.assertEqual( image["out"]["metadata"].getValue(), reader["out"]["metadata"].getValue() )
def testReferencePromotedPlug( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["i"] = GafferOSL.OSLImage()
s["b"]["i"]["channels"].addChild( Gaffer.NameValuePlug( "", GafferOSL.ClosurePlug(), "testClosure", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
p = Gaffer.PlugAlgo.promote( s["b"]["i"]["channels"]["testClosure"]["value"] )
p.setName( "p" )
s["b"].exportForReference( self.temporaryDirectory() + "/test.grf" )
s["r"] = Gaffer.Reference()
s["r"].load( self.temporaryDirectory() + "/test.grf" )
s["s"] = GafferOSL.OSLShader()
s["s"].loadShader( "ImageProcessing/OutImage" )
s["r"]["p"].setInput( s["s"]["out"]["out"] )
def testDirtyPropagation( self ) :
c = GafferImage.Constant()
o = GafferOSL.OSLImage()
o["in"].setInput( c["out"] )
cs = GafferTest.CapturingSlot( o.plugDirtiedSignal() )
c["color"]["r"].setValue( 1 )
self.assertTrue( o["out"]["channelData"] in set( x[0] for x in cs ) )
def testNegativeTileCoordinates( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( -128 ), imath.V2i( 128 ) ) ) )
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 1 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
image = GafferOSL.OSLImage()
image["in"].setInput( constant["out"] )
image["shader"].setInput( imageShader["out"]["out"] )
sampler = GafferImage.Sampler( image["out"], "R", image["out"]["dataWindow"].getValue() )
for y in range( -128, 128 ) :
for x in range( -128, 128 ) :
self.assertEqual( sampler.sample( x, y ), 1, "Pixel {},{}".format( x, y ) )
def testDeep( self ) :
# Simple network to swap channels
inLayer = GafferOSL.OSLShader()
inLayer.loadShader( "ImageProcessing/InLayer" )
colorToFloat = GafferOSL.OSLShader()
colorToFloat.loadShader( "Conversion/ColorToFloat" )
colorToFloat["parameters"]["c"].setInput( inLayer["out"]["layerColor"] )
floatToColor = GafferOSL.OSLShader()
floatToColor.loadShader( "Conversion/FloatToColor" )
floatToColor["parameters"]["r"].setInput( colorToFloat["out"]["b"] )
floatToColor["parameters"]["g"].setInput( colorToFloat["out"]["r"] )
floatToColor["parameters"]["b"].setInput( colorToFloat["out"]["g"] )
# Read in a deep image
imageReader = GafferImage.ImageReader()
imageReader["fileName"].setValue( self.representativeDeepImagePath )
# Try running OSLImage on deep image, then flattening
oslImageDeep = GafferOSL.OSLImage()
oslImageDeep["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value", defaultValue = imath.Color3f( 1, 1, 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "channel", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
oslImageDeep["in"].setInput( imageReader["out"] )
oslImageDeep["channels"]["channel"]["value"].setInput( floatToColor["out"]["c"] )
postFlatten = GafferImage.DeepToFlat()
postFlatten["in"].setInput( oslImageDeep["out"] )
# Try running OSLImage on already flattened image
preFlatten = GafferImage.DeepToFlat()
preFlatten["in"].setInput( imageReader["out"] )
oslImageFlat = GafferOSL.OSLImage()
oslImageFlat["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value", defaultValue = imath.Color3f( 1, 1, 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "channel", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
oslImageFlat["in"].setInput( preFlatten["out"] )
oslImageFlat["channels"]["channel"]["value"].setInput( floatToColor["out"]["c"] )
# Results should match
self.assertImagesEqual( postFlatten["out"], oslImageFlat["out"] )
# Also test reading from UV
shaderGlobals = GafferOSL.OSLShader( "Globals" )
shaderGlobals.loadShader( "Utility/Globals" )
uvToColor = GafferOSL.OSLShader()
uvToColor.loadShader( "Conversion/FloatToColor" )
uvToColor["parameters"]["r"].setInput( shaderGlobals["out"]["globalU"] )
uvToColor["parameters"]["g"].setInput( shaderGlobals["out"]["globalV"] )
inAlpha = GafferOSL.OSLShader()
inAlpha.loadShader( "ImageProcessing/InChannel" )
inAlpha["parameters"]["channelName"].setValue( 'A' )
multiplyAlpha = GafferOSL.OSLShader()
multiplyAlpha.loadShader( "Maths/MultiplyColor" )
multiplyAlpha["parameters"]["a"].setInput( uvToColor["out"]["c"] )
multiplyAlpha["parameters"]["b"]["r"].setInput( inAlpha["out"]["channelValue"] )
multiplyAlpha["parameters"]["b"]["g"].setInput( inAlpha["out"]["channelValue"] )
multiplyAlpha["parameters"]["b"]["b"].setInput( inAlpha["out"]["channelValue"] )
oslImageDeep["channels"]["channel"]["value"].setInput( multiplyAlpha["out"]["out"] )
outImage = GafferImage.ImageAlgo.image( postFlatten["out"] )
size = outImage.dataWindow.size() + imath.V2i( 1 )
i = 0
for y in range( size.y ):
for x in range( size.x ):
self.assertAlmostEqual( outImage["R"][i], (x + 0.5) / size.x * outImage["A"][i], places = 5 )
self.assertAlmostEqual( outImage["G"][i], (size.y - y - 0.5) / size.y * outImage["A"][i], places = 5 )
i += 1
def testGlobals( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( -10 ), imath.V2i( 10 ) ) ) )
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
outP = GafferOSL.OSLShader()
outP.loadShader( "ImageProcessing/OutLayer" )
outP["parameters"]["layerColor"].setInput( globals["out"]["globalP"] )
outU = GafferOSL.OSLShader()
outU.loadShader( "ImageProcessing/OutChannel" )
outU["parameters"]["channelName"].setValue( "u" )
outU["parameters"]["channelValue"].setInput( globals["out"]["globalU"] )
outV = GafferOSL.OSLShader()
outV.loadShader( "ImageProcessing/OutChannel" )
outV["parameters"]["channelName"].setValue( "v" )
outV["parameters"]["channelValue"].setInput( globals["out"]["globalV"] )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outP["out"]["layer"] )
imageShader["parameters"]["in1"].setInput( outU["out"]["channel"] )
imageShader["parameters"]["in2"].setInput( outV["out"]["channel"] )
image = GafferOSL.OSLImage()
image["in"].setInput( constant["out"] )
image["shader"].setInput( imageShader["out"]["out"] )
displayWindow = image["out"]["format"].getValue().getDisplayWindow()
samplerR = GafferImage.Sampler( image["out"], "R", displayWindow )
samplerG = GafferImage.Sampler( image["out"], "G", displayWindow )
samplerB = GafferImage.Sampler( image["out"], "B", displayWindow )
samplerU = GafferImage.Sampler( image["out"], "u", displayWindow )
samplerV = GafferImage.Sampler( image["out"], "v", displayWindow )
size = imath.V2f( displayWindow.size() )
uvStep = imath.V2f( 1.0 ) / size
uvMin = 0.5 * uvStep
for y in range( displayWindow.min().y, displayWindow.max().y ) :
for x in range( displayWindow.min().x, displayWindow.max().x ) :
self.assertEqual( samplerR.sample( x, y ), x + 0.5, "Pixel {},{}".format( x, y ) )
self.assertEqual( samplerG.sample( x, y ), y + 0.5, "Pixel {},{}".format( x, y ) )
self.assertEqual( samplerB.sample( x, y ), 0, "Pixel {},{}".format( x, y ) )
uv = uvMin + uvStep * imath.V2f( imath.V2i( x, y ) - displayWindow.min() )
self.assertAlmostEqual( samplerU.sample( x, y ), uv.x, delta = 0.0000001, msg = "Pixel {},{}".format( x, y ) )
self.assertAlmostEqual( samplerV.sample( x, y ), uv.y, delta = 0.0000001, msg = "Pixel {},{}".format( x, y ) )
def testTextureOrientation( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 32, 32 ) )
textureFileName = os.path.dirname( __file__ ) + "/images/vRamp.tx"
outLayer = GafferOSL.OSLCode()
outLayer["out"]["layer"] = GafferOSL.ClosurePlug(
direction = Gaffer.Plug.Direction.Out,
flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
outLayer["code"].setValue( 'layer = outLayer( "", texture( "{}", u, v ) )'.format( textureFileName ) )
outImage = GafferOSL.OSLShader()
outImage.loadShader( "ImageProcessing/OutImage" )
outImage["parameters"]["in0"].setInput( outLayer["out"]["layer"] )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["shader"].setInput( outImage["out"]["out"] )
sampler = GafferImage.Sampler( oslImage["out"], "R", oslImage["out"]["dataWindow"].getValue() )
for y in range( 0, 31 ) :
self.assertAlmostEqual( sampler.sample( 5, y ), (y + 0.5) / 32.0, delta = 0.02 )
def testPullsMinimalSetOfInputChannels( self ) :
constant = GafferImage.Constant()
constant["color"].setValue( imath.Color4f( 0.1101, 0.1224, 0.1353, 0.135 ) )
constant["format"].setValue(
GafferImage.Format( GafferImage.ImagePlug.tileSize(), GafferImage.ImagePlug.tileSize() )
)
outLayer = GafferOSL.OSLShader()
outLayer.loadShader( "ImageProcessing/OutLayer" )
outImage = GafferOSL.OSLShader()
outImage.loadShader( "ImageProcessing/OutImage" )
outImage["parameters"][0].setInput( outLayer["out"]["layer"] )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["shader"].setInput( outImage["out"]["out"] )
with Gaffer.PerformanceMonitor() as pm :
GafferImage.ImageAlgo.image( oslImage["out"] )
# Because the shader doesn't use any input channels,
# the OSLImage node shouldn't have needed to pull on
# any of the RGB channels. Because the shader doesn't
# write to alpha, it does need to pull on alpha to pass
# it through. Hence we expect a single computation for
# the Constant's channelData.
s = pm.plugStatistics( constant["out"]["channelData"] )
self.assertEqual( s.computeCount, 1 )
def testShaderNetworkGeneratedInGlobalContext( self ) :
constant = GafferImage.Constant()
outLayer = GafferOSL.OSLCode()
outLayer["out"]["layer"] = GafferOSL.ClosurePlug(
direction = Gaffer.Plug.Direction.Out,
flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
outLayer["code"].setValue( 'layer = outLayer( "", color( 0, 1, 0) )' )
outImage = GafferOSL.OSLShader()
outImage.loadShader( "ImageProcessing/OutImage" )
outImage["parameters"]["in0"].setInput( outLayer["out"]["layer"] )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["shader"].setInput( outImage["out"]["out"] )
with Gaffer.ContextMonitor( oslImage["__oslCode"] ) as cm :
GafferImageTest.processTiles( oslImage["out"] )
cs = cm.combinedStatistics()
self.assertEqual( cs.numUniqueContexts(), 1 )
self.assertNotIn( "image:tileOrigin", cs.variableNames() )
self.assertNotIn( "image:channelName", cs.variableNames() )
def testAllTypes( self ) :
i = GafferOSL.OSLImage()
i["defaultFormat"].setValue( GafferImage.Format( imath.Box2i( imath.V2i(0), imath.V2i( 5 ) ) ) )
i["channels"].addChild( Gaffer.NameValuePlug( "", imath.Color3f(1,3,5) ) )
i["channels"].addChild( Gaffer.NameValuePlug( "testFloat", 42.42 ) )
i["channels"].addChild( Gaffer.NameValuePlug( "testColor", imath.Color3f(12,13,14) ) )
image = GafferImage.ImageAlgo.image( i['out'] )
self.assertEqual( image["R"], IECore.FloatVectorData( [1]*25 ) )
self.assertEqual( image["G"], IECore.FloatVectorData( [3]*25 ) )
self.assertEqual( image["B"], IECore.FloatVectorData( [5]*25 ) )
self.assertEqual( image["testFloat"], IECore.FloatVectorData( [42.42]*25 ) )
self.assertEqual( image["testColor.R"], IECore.FloatVectorData( [12]*25 ) )
self.assertEqual( image["testColor.G"], IECore.FloatVectorData( [13]*25 ) )
self.assertEqual( image["testColor.B"], IECore.FloatVectorData( [14]*25 ) )
def testClosure( self ) :
i = GafferOSL.OSLImage()
i["defaultFormat"].setValue( GafferImage.Format( imath.Box2i( imath.V2i(0), imath.V2i( 5 ) ) ) )
i["channels"].addChild( Gaffer.NameValuePlug( "testClosure", GafferOSL.ClosurePlug() ) )
code = GafferOSL.OSLCode( "OSLCode" )
code["out"].addChild( GafferOSL.ClosurePlug( "output1", direction = Gaffer.Plug.Direction.Out ) )
code["code"].setValue( 'output1 = outLayer( "blah", color( 0.1, 0.2, 0.3 ) ) + outChannel( "foo", 0.5 );' )
i["channels"][0]["value"].setInput( code["out"]["output1"] )
image = GafferImage.ImageAlgo.image( i['out'] )
self.assertEqual( image["blah.R"], IECore.FloatVectorData( [0.1]*25 ) )
self.assertEqual( image["blah.G"], IECore.FloatVectorData( [0.2]*25 ) )
self.assertEqual( image["blah.B"], IECore.FloatVectorData( [0.3]*25 ) )
self.assertEqual( image["foo"], IECore.FloatVectorData( [0.5]*25 ) )
def testUndo( self ) :
s = Gaffer.ScriptNode()
i = GafferOSL.OSLImage()
s.addChild( i )
self.assertFalse( s.undoAvailable() )
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 0 )
with Gaffer.UndoScope( s ) :
i["channels"].addChild( Gaffer.NameValuePlug( "testColor", imath.Color3f( 42 ) ) )
i["channels"].addChild( Gaffer.NameValuePlug( "testFloat", 42.42 ) )
self.assertTrue( s.undoAvailable() )
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 4 )
with Gaffer.UndoScope( s ) :
del i["channels"][0]
del i["channels"][0]
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 0 )
# Test that the internal connections are recreated correctly when undoing adding and removing channels
s.undo()
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 4 )
s.undo()
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 0 )
def testDefaultFormat( self ):
constant = GafferImage.Constant()
oslImage = GafferOSL.OSLImage()
oslImage["channels"].addChild( Gaffer.NameValuePlug( "", imath.Color3f( 0.5, 0.6, 0.7 ) ) )
self.assertEqual( oslImage["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 1920, 1080 ) ) )
self.assertEqual( oslImage["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 1920, 1080 ) ) )
oslImage["defaultFormat"].setValue( GafferImage.Format( imath.Box2i( imath.V2i(0), imath.V2i( 5 ) ) ) )
self.assertEqual( oslImage["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 5, 5 ) ) )
self.assertEqual( oslImage["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 5, 5 ) ) )
self.assertEqual( GafferImage.ImageAlgo.image( oslImage["out"] )["G"], IECore.FloatVectorData( [0.6] * 25 ) )
oslImage["in"].setInput( constant["out"] )
self.assertEqual( oslImage["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 1920, 1080 ) ) )
self.assertEqual( oslImage["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 1920, 1080 ) ) )
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i(0), imath.V2i( 4 ) ) ) )
self.assertEqual( oslImage["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 4, 4 ) ) )
self.assertEqual( oslImage["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 4, 4 ) ) )
self.assertEqual( GafferImage.ImageAlgo.image( oslImage["out"] )["G"], IECore.FloatVectorData( [0.6] * 16 ) )
# Extreme example of doing something very expensive in OSLImage
def mandelbrotNode( self ):
mandelbrotCode = GafferOSL.OSLCode()
mandelbrotCode["parameters"].addChild( Gaffer.IntPlug( "iterations", defaultValue = 0, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ) )
mandelbrotCode["out"].addChild( Gaffer.FloatPlug( "outFloat", direction = Gaffer.Plug.Direction.Out ) )
mandelbrotCode["code"].setValue( inspect.cleandoc(
"""
// Basic mandelbrot adapted from surface shader here:
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage/blob/master/src/shaders/mandelbrot.osl
point center = point (0,0,0);
float scale = 2;
point cent = center;
point c = scale * point(2*(u-0.5), 2*((1-v)-0.5), 0) + cent;
point z = c;
int i;
for (i = 1; i < iterations && dot(z,z) < 4.0; ++i) {
float x = z[0], y = z[1];
z = point (x*x - y*y, 2*x*y, 0) + c;
}
if (i < iterations) {
float f = pow(float(i)/iterations, 1/log10(float(iterations)));
outFloat = f;
} else {
outFloat = 0;
}
"""
) )
return mandelbrotCode
def testBadCachePolicyHang( self ):
# Using the legacy cache policy for OSLImage.shadingPlug creates a hang due to tbb task stealing,
# though it's a bit hard to actually demonstrate
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 128, 128, 1.000 ) )
# Need a slow to compute OSL code in order to trigger hang
mandelbrotCode = self.mandelbrotNode()
# In order to trigger the hang, we need to mix threads which are stuck waiting for an expression which
# uses the Standard policy with threads that are actually finishing, so that tbb tries to start up new
# threads while we're waiting for the expression result. To do this, we use the "var" context variable
# to create two versions of this OSLCode
mandelbrotCode["varExpression"] = Gaffer.Expression()
mandelbrotCode["varExpression"].setExpression( 'parent.parameters.iterations = 100000 + context( "var", 0 );', "OSL" )
oslImage = GafferOSL.OSLImage()
oslImage["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value", defaultValue = imath.Color3f( 1, 1, 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "channel", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
oslImage["in"].setInput( constant["out"] )
oslImage["channels"]["channel"]["value"][0].setInput( mandelbrotCode["out"]["outFloat"] )
oslImage["channels"]["channel"]["value"][1].setInput( mandelbrotCode["out"]["outFloat"] )
oslImage["channels"]["channel"]["value"][2].setInput( mandelbrotCode["out"]["outFloat"] )
# This imageStats is use to create non-blocking slow calculations
imageStats = GafferImage.ImageStats()
imageStats["in"].setInput( oslImage["out"] )
imageStats["area"].setValue( imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 64, 64 ) ) )
# This box does the non-blocking slow calculation, followed by a blocking slow calculation.
# This ensures that tasks which do just the non-block calculation will start finishing while
# the blocking slow calculation is still running, allowing tbb to try running more threads
# on the blocking calcluation, realizing they can't run, and stealing tasks onto those threads
# which can hit the Standard policy lock on the expression upstream and deadlock, unless the
# OSLImage isolates its threads correctly
expressionBox = Gaffer.Box()
expressionBox.addChild( Gaffer.FloatVectorDataPlug( "inChannelData", defaultValue = IECore.FloatVectorData( [ ] ) ) )
expressionBox.addChild( Gaffer.FloatPlug( "inStat" ) )
expressionBox.addChild( Gaffer.FloatPlug( "out", direction = Gaffer.Plug.Direction.Out ) )
expressionBox["inChannelData"].setInput( oslImage["out"]["channelData"] )
expressionBox["inStat"].setInput( imageStats["average"]["r"] )
expressionBox["contextVariables"] = Gaffer.ContextVariables()
expressionBox["contextVariables"].setup( Gaffer.FloatVectorDataPlug( "in", defaultValue = IECore.FloatVectorData( [ ] ) ) )
expressionBox["contextVariables"]["variables"].addChild( Gaffer.NameValuePlug( "image:tileOrigin", Gaffer.V2iPlug( "value" ), True, "member1" ) )
expressionBox["contextVariables"]["variables"].addChild( Gaffer.NameValuePlug( "image:channelName", Gaffer.StringPlug( "value", defaultValue = 'R' ), True, "member2" ) )
expressionBox["contextVariables"]["variables"].addChild( Gaffer.NameValuePlug( "var", Gaffer.IntPlug( "value", defaultValue = 1 ), True, "member3" ) )
expressionBox["contextVariables"]["in"].setInput( expressionBox["inChannelData"] )
expressionBox["expression"] = Gaffer.Expression()
expressionBox["expression"].setExpression( inspect.cleandoc(
"""
d = parent["contextVariables"]["out"]
parent["out"] = d[0] + parent["inStat"]
"""
) )
# Create a switch to mix which tasks perform the non-blocking or blocking calculation - we need a mixture
# to trigger the hang
switch = Gaffer.Switch()
switch.setup( Gaffer.IntPlug( "in", defaultValue = 0, ) )
switch["in"][0].setInput( expressionBox["out"] )
switch["in"][1].setInput( imageStats["average"]["r"] )
switch["switchExpression"] = Gaffer.Expression()
switch["switchExpression"].setExpression( 'parent.index = ( stoi( context( "testContext", "0" ) ) % 10 ) > 5;', "OSL" )
# In order to evaluate this expression a bunch of times at once with different values of "testContext",
# we set up a simple scene that can be evaluated with GafferSceneTest.traversScene.
# In theory, we could use a simple function that used a parallel_for to evaluate switch["out"], but for
# some reason we don't entirely understand, this does not trigger the hang
import GafferSceneTest
import GafferScene
sphere = GafferScene.Sphere()
pathFilter = GafferScene.PathFilter()
pathFilter["paths"].setValue( IECore.StringVectorData( [ '/sphere' ] ) )
customAttributes = GafferScene.CustomAttributes()
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "foo", Gaffer.FloatPlug( "value" ), True, "member1" ) )
customAttributes["attributes"]["member1"]["value"].setInput( switch["out"] )
customAttributes["in"].setInput( sphere["out"] )
customAttributes["filter"].setInput( pathFilter["out"] )
collectScenes = GafferScene.CollectScenes()
collectScenes["in"].setInput( customAttributes["out"] )
collectScenes["rootNames"].setValue( IECore.StringVectorData( [ str(i) for i in range(1000) ] ) )
collectScenes["rootNameVariable"].setValue( 'testContext' )
# When OSLImage.shadingPlug is not correctly isolated, and grain size on ShadingEngine is smaller than the
# image tile size, this fails about 50% of the time. Running it 5 times makes the failure pretty consistent.
for i in range( 5 ):
Gaffer.ValuePlug.clearCache()
Gaffer.ValuePlug.clearHashCache()
GafferSceneTest.traverseScene( collectScenes["out"] )
@GafferTest.TestRunner.PerformanceTestMethod()
def testMinimalPerf( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 4096, 4096 ) )
floatToColor = GafferOSL.OSLShader()
floatToColor.loadShader( "Conversion/FloatToColor" )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value" ), True, "channel" ) )
oslImage["channels"]["channel"]["value"].setInput( floatToColor["out"]["c"] )
GafferImage.ImageAlgo.image( constant["out"] )
# Run the fastest possible OSLImage on lots of tiles, to highlight any constant overhead
with GafferTest.TestRunner.PerformanceScope() :
GafferImage.ImageAlgo.image( oslImage["out"] )
@GafferTest.TestRunner.PerformanceTestMethod( repeat = 1)
def testCollaboratePerf( self ) :
# Test an expensive OSLImage, with many output tiles depending on the same input tiles,
# which should give TaskCollaborate a chance to show some benefit
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 128, 128 ) )
deleteChannels = GafferImage.DeleteChannels( "DeleteChannels" )
deleteChannels["in"].setInput( constant["out"] )
deleteChannels["mode"].setValue( GafferImage.DeleteChannels.Mode.Keep )
deleteChannels["channels"].setValue( 'R' )
mandelbrotCode = self.mandelbrotNode()
mandelbrotCode["parameters"]["iterations"].setValue( 500000 )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( deleteChannels["out"] )
oslImage["channels"].addChild( Gaffer.NameValuePlug( "R", Gaffer.FloatPlug( "value" ), True, "channel" ) )
oslImage["channels"]["channel"]["value"].setInput( mandelbrotCode["out"]["outFloat"] )
resize = GafferImage.Resize()
resize["in"].setInput( oslImage["out"] )
resize["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 2048 ) ), 1 ) )
# We use a resize because it pulls the input tiles repeatedly, we don't want to spend time on resizing
# pixels, so use a fast filter
resize["filter"].setValue( 'box' )
with GafferTest.TestRunner.PerformanceScope() :
GafferImage.ImageAlgo.image( resize["out"] )
if __name__ == "__main__":
unittest.main()
OSLImageTest : Add test that OSL splines match Cortex splines
##########################################################################
#
# Copyright (c) 2013-2015, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import imath
import inspect
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
import GafferOSL
import GafferOSLTest
class OSLImageTest( GafferImageTest.ImageTestCase ) :
representativeDeepImagePath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/representativeDeepImage.exr" )
def test( self ) :
for useClosure in [ False, True ]:
getRed = GafferOSL.OSLShader()
getRed.loadShader( "ImageProcessing/InChannel" )
getRed["parameters"]["channelName"].setValue( "R" )
getGreen = GafferOSL.OSLShader()
getGreen.loadShader( "ImageProcessing/InChannel" )
getGreen["parameters"]["channelName"].setValue( "G" )
getBlue = GafferOSL.OSLShader()
getBlue.loadShader( "ImageProcessing/InChannel" )
getBlue["parameters"]["channelName"].setValue( "B" )
floatToColor = GafferOSL.OSLShader()
floatToColor.loadShader( "Conversion/FloatToColor" )
floatToColor["parameters"]["r"].setInput( getBlue["out"]["channelValue"] )
floatToColor["parameters"]["g"].setInput( getGreen["out"]["channelValue"] )
floatToColor["parameters"]["b"].setInput( getRed["out"]["channelValue"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
shuffle = GafferImage.Shuffle()
shuffle["in"].setInput( reader["out"] )
shuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "channel" ) )
shuffle["channels"]["channel"]["out"].setValue( 'unchangedR' )
shuffle["channels"]["channel"]["in"].setValue( 'R' )
image = GafferOSL.OSLImage()
image["in"].setInput( shuffle["out"] )
# we haven't connected the shader yet, so the node should act as a pass through
self.assertEqual( GafferImage.ImageAlgo.image( image["out"] ), GafferImage.ImageAlgo.image( shuffle["out"] ) )
# that should all change when we hook up a shader
if useClosure:
outRGB = GafferOSL.OSLShader()
outRGB.loadShader( "ImageProcessing/OutLayer" )
outRGB["parameters"]["layerColor"].setInput( floatToColor["out"]["c"] )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outRGB["out"]["layer"] )
image["channels"].addChild( Gaffer.NameValuePlug( "", GafferOSL.ClosurePlug(), "testClosure" ) )
else:
image["channels"].addChild( Gaffer.NameValuePlug( "", imath.Color3f(), "testColor" ) )
cs = GafferTest.CapturingSlot( image.plugDirtiedSignal() )
def checkDirtiness( expected):
self.assertEqual( [ i[0].fullName() for i in cs ], [ "OSLImage." + i for i in expected ] )
del cs[:]
if useClosure:
image["channels"]["testClosure"]["value"].setInput( imageShader["out"]["out"] )
channelsDirtied = ["channels.testClosure.value", "channels.testClosure"]
else:
image["channels"]["testColor"]["value"].setInput( floatToColor["out"]["c"] )
channelsDirtied = [
"channels.testColor.value.r", "channels.testColor.value.g", "channels.testColor.value.b",
"channels.testColor.value", "channels.testColor"
]
checkDirtiness( channelsDirtied + [
"channels", "__shader", "__shading",
"__affectedChannels", "out.channelNames", "out.channelData", "out"
] )
inputImage = GafferImage.ImageAlgo.image( shuffle["out"] )
with Gaffer.ContextMonitor( image["__shading"] ) as monitor :
self.assertEqual( image["out"].channelNames(), IECore.StringVectorData( [ "A", "B", "G", "R", "unchangedR" ] ) )
# Evaluating channel names only requires evaluating the shading plug if we have a closure
self.assertEqual( monitor.combinedStatistics().numUniqueContexts(), 1 if useClosure else 0 )
# Channels we don't touch should be passed through unaltered
for channel, changed in [('B',True), ('G',True), ('R',True), ('A',False), ('unchangedR',False) ]:
self.assertEqual(
image["out"].channelDataHash( channel, imath.V2i( 0, 0 ) ) ==
shuffle["out"].channelDataHash( channel, imath.V2i( 0, 0 ) ),
not changed
)
image["out"].channelData( channel, imath.V2i( 0, 0 ) )
# Should only need one shading evaluate for all channels
self.assertEqual( monitor.combinedStatistics().numUniqueContexts(), 1 )
outputImage = GafferImage.ImageAlgo.image( image["out"] )
self.assertNotEqual( inputImage, outputImage )
self.assertEqual( outputImage["R"], inputImage["B"] )
self.assertEqual( outputImage["G"], inputImage["G"] )
self.assertEqual( outputImage["B"], inputImage["R"] )
# changes in the shader network should signal more dirtiness
getGreen["parameters"]["channelName"].setValue( "R" )
checkDirtiness( channelsDirtied + [
"channels", "__shader", "__shading",
"__affectedChannels", "out.channelNames", "out.channelData", "out"
] )
floatToColor["parameters"]["r"].setInput( getRed["out"]["channelValue"] )
checkDirtiness( channelsDirtied + [
"channels", "__shader", "__shading",
"__affectedChannels", "out.channelNames", "out.channelData", "out"
] )
inputImage = GafferImage.ImageAlgo.image( shuffle["out"] )
outputImage = GafferImage.ImageAlgo.image( image["out"] )
self.assertEqual( outputImage["R"], inputImage["R"] )
self.assertEqual( outputImage["G"], inputImage["R"] )
self.assertEqual( outputImage["B"], inputImage["R"] )
self.assertEqual( outputImage["A"], inputImage["A"] )
self.assertEqual( outputImage["unchangedR"], inputImage["unchangedR"] )
image["in"].setInput( None )
checkDirtiness( [
'in.viewNames', 'in.format', 'in.dataWindow', 'in.metadata', 'in.deep', 'in.sampleOffsets', 'in.channelNames', 'in.channelData', 'in',
'out.viewNames', '__shading', '__affectedChannels',
'out.channelNames', 'out.channelData', 'out.format', 'out.dataWindow', 'out.metadata', 'out.deep', 'out.sampleOffsets', 'out'
] )
image["defaultFormat"]["displayWindow"]["max"]["x"].setValue( 200 )
checkDirtiness( [
'defaultFormat.displayWindow.max.x', 'defaultFormat.displayWindow.max', 'defaultFormat.displayWindow', 'defaultFormat',
'__defaultIn.format', '__defaultIn.dataWindow', '__defaultIn', '__shading', '__affectedChannels',
'out.channelNames', 'out.channelData', 'out.format', 'out.dataWindow', 'out'
] )
constant = GafferImage.Constant()
image["in"].setInput( constant["out"] )
checkDirtiness( [
'in.viewNames', 'in.format', 'in.dataWindow', 'in.metadata', 'in.deep', 'in.sampleOffsets', 'in.channelNames', 'in.channelData', 'in',
'out.viewNames', '__shading', '__affectedChannels',
'out.channelNames', 'out.channelData', 'out.format', 'out.dataWindow', 'out.metadata', 'out.deep', 'out.sampleOffsets', 'out'
] )
image["in"].setInput( shuffle["out"] )
if useClosure:
outRGB["parameters"]["layerName"].setValue( "newLayer" )
else:
image["channels"][0]["name"].setValue( "newLayer" )
self.assertEqual( image["out"].channelNames(), IECore.StringVectorData(
[ "A", "B", "G", "R", "newLayer.B", "newLayer.G", "newLayer.R", "unchangedR" ]
) )
for channel in ['B', 'G', 'R', 'A', 'unchangedR' ]:
self.assertEqual(
image["out"].channelDataHash( channel, imath.V2i( 0, 0 ) ),
shuffle["out"].channelDataHash( channel, imath.V2i( 0, 0 ) )
)
self.assertEqual(
image["out"].channelData( channel, imath.V2i( 0, 0 ) ),
shuffle["out"].channelData( channel, imath.V2i( 0, 0 ) )
)
crop = GafferImage.Crop()
crop["area"].setValue( imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 0, 0 ) ) )
crop["in"].setInput( shuffle["out"] )
image["in"].setInput( crop["out"] )
if useClosure:
# When using closures, we can't find out about the new channels being added if the datawindow is
# empty
self.assertEqual( image["out"].channelNames(), IECore.StringVectorData(
[ "A", "B", "G", "R", "unchangedR" ]
) )
else:
self.assertEqual( image["out"].channelNames(), IECore.StringVectorData(
[ "A", "B", "G", "R", "newLayer.B", "newLayer.G", "newLayer.R", "unchangedR" ]
) )
def testAcceptsShaderSwitch( self ) :
script = Gaffer.ScriptNode()
script["image"] = GafferOSL.OSLImage()
script["switch"] = Gaffer.Switch()
script["switch"].setup( Gaffer.Plug() )
# We're testing a backwards compatibility special case that is
# only enabled when loading a script, hence the use of `execute()`.
script.execute( """script["image"]["shader"].setInput( script["switch"]["out"] )""" )
self.assertTrue( script["image"]["channels"]["legacyClosure"]["value"].getInput().isSame( script["switch"]["out"] ) )
def testAcceptsDot( self ) :
script = Gaffer.ScriptNode()
script["image"] = GafferOSL.OSLImage()
script["switch"] = Gaffer.Switch()
script["switch"].setup( Gaffer.Plug() )
script["dot"] = Gaffer.Dot()
script["dot"].setup( script["switch"]["out"] )
# We're testing a backwards compatibility special case that is
# only enabled when loading a script, hence the use of `execute()`.
script.execute( """script["image"]["shader"].setInput( script["dot"]["out"] )""" )
self.assertTrue( script["image"]["channels"]["legacyClosure"]["value"].getInput().isSame( script["dot"]["out"] ) )
def testChannelWithZeroValue( self ) :
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 0 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
image = GafferOSL.OSLImage()
image["in"].setInput( reader["out"] )
image["shader"].setInput( imageShader["out"]["out"] )
inputImage = GafferImage.ImageAlgo.image( reader["out"] )
outputImage = GafferImage.ImageAlgo.image( image["out"] )
self.assertEqual( outputImage["R"], IECore.FloatVectorData( [ 0 ] * inputImage["R"].size() ) )
self.assertEqual( outputImage["G"], inputImage["G"] )
self.assertEqual( outputImage["B"], inputImage["B"] )
def testPassThrough( self ) :
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 0 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
image = GafferOSL.OSLImage()
image["in"].setInput( reader["out"] )
image["shader"].setInput( imageShader["out"]["out"] )
self.assertEqual( image["out"]["format"].hash(), reader["out"]["format"].hash() )
self.assertEqual( image["out"]["dataWindow"].hash(), reader["out"]["dataWindow"].hash() )
self.assertEqual( image["out"]["metadata"].hash(), reader["out"]["metadata"].hash() )
self.assertEqual( image["out"]["format"].getValue(), reader["out"]["format"].getValue() )
self.assertEqual( image["out"]["dataWindow"].getValue(), reader["out"]["dataWindow"].getValue() )
self.assertEqual( image["out"]["metadata"].getValue(), reader["out"]["metadata"].getValue() )
def testReferencePromotedPlug( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["i"] = GafferOSL.OSLImage()
s["b"]["i"]["channels"].addChild( Gaffer.NameValuePlug( "", GafferOSL.ClosurePlug(), "testClosure", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
p = Gaffer.PlugAlgo.promote( s["b"]["i"]["channels"]["testClosure"]["value"] )
p.setName( "p" )
s["b"].exportForReference( self.temporaryDirectory() + "/test.grf" )
s["r"] = Gaffer.Reference()
s["r"].load( self.temporaryDirectory() + "/test.grf" )
s["s"] = GafferOSL.OSLShader()
s["s"].loadShader( "ImageProcessing/OutImage" )
s["r"]["p"].setInput( s["s"]["out"]["out"] )
def testDirtyPropagation( self ) :
c = GafferImage.Constant()
o = GafferOSL.OSLImage()
o["in"].setInput( c["out"] )
cs = GafferTest.CapturingSlot( o.plugDirtiedSignal() )
c["color"]["r"].setValue( 1 )
self.assertTrue( o["out"]["channelData"] in set( x[0] for x in cs ) )
def testNegativeTileCoordinates( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( -128 ), imath.V2i( 128 ) ) ) )
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 1 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
image = GafferOSL.OSLImage()
image["in"].setInput( constant["out"] )
image["shader"].setInput( imageShader["out"]["out"] )
sampler = GafferImage.Sampler( image["out"], "R", image["out"]["dataWindow"].getValue() )
for y in range( -128, 128 ) :
for x in range( -128, 128 ) :
self.assertEqual( sampler.sample( x, y ), 1, "Pixel {},{}".format( x, y ) )
def testDeep( self ) :
# Simple network to swap channels
inLayer = GafferOSL.OSLShader()
inLayer.loadShader( "ImageProcessing/InLayer" )
colorToFloat = GafferOSL.OSLShader()
colorToFloat.loadShader( "Conversion/ColorToFloat" )
colorToFloat["parameters"]["c"].setInput( inLayer["out"]["layerColor"] )
floatToColor = GafferOSL.OSLShader()
floatToColor.loadShader( "Conversion/FloatToColor" )
floatToColor["parameters"]["r"].setInput( colorToFloat["out"]["b"] )
floatToColor["parameters"]["g"].setInput( colorToFloat["out"]["r"] )
floatToColor["parameters"]["b"].setInput( colorToFloat["out"]["g"] )
# Read in a deep image
imageReader = GafferImage.ImageReader()
imageReader["fileName"].setValue( self.representativeDeepImagePath )
# Try running OSLImage on deep image, then flattening
oslImageDeep = GafferOSL.OSLImage()
oslImageDeep["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value", defaultValue = imath.Color3f( 1, 1, 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "channel", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
oslImageDeep["in"].setInput( imageReader["out"] )
oslImageDeep["channels"]["channel"]["value"].setInput( floatToColor["out"]["c"] )
postFlatten = GafferImage.DeepToFlat()
postFlatten["in"].setInput( oslImageDeep["out"] )
# Try running OSLImage on already flattened image
preFlatten = GafferImage.DeepToFlat()
preFlatten["in"].setInput( imageReader["out"] )
oslImageFlat = GafferOSL.OSLImage()
oslImageFlat["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value", defaultValue = imath.Color3f( 1, 1, 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "channel", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
oslImageFlat["in"].setInput( preFlatten["out"] )
oslImageFlat["channels"]["channel"]["value"].setInput( floatToColor["out"]["c"] )
# Results should match
self.assertImagesEqual( postFlatten["out"], oslImageFlat["out"] )
# Also test reading from UV
shaderGlobals = GafferOSL.OSLShader( "Globals" )
shaderGlobals.loadShader( "Utility/Globals" )
uvToColor = GafferOSL.OSLShader()
uvToColor.loadShader( "Conversion/FloatToColor" )
uvToColor["parameters"]["r"].setInput( shaderGlobals["out"]["globalU"] )
uvToColor["parameters"]["g"].setInput( shaderGlobals["out"]["globalV"] )
inAlpha = GafferOSL.OSLShader()
inAlpha.loadShader( "ImageProcessing/InChannel" )
inAlpha["parameters"]["channelName"].setValue( 'A' )
multiplyAlpha = GafferOSL.OSLShader()
multiplyAlpha.loadShader( "Maths/MultiplyColor" )
multiplyAlpha["parameters"]["a"].setInput( uvToColor["out"]["c"] )
multiplyAlpha["parameters"]["b"]["r"].setInput( inAlpha["out"]["channelValue"] )
multiplyAlpha["parameters"]["b"]["g"].setInput( inAlpha["out"]["channelValue"] )
multiplyAlpha["parameters"]["b"]["b"].setInput( inAlpha["out"]["channelValue"] )
oslImageDeep["channels"]["channel"]["value"].setInput( multiplyAlpha["out"]["out"] )
outImage = GafferImage.ImageAlgo.image( postFlatten["out"] )
size = outImage.dataWindow.size() + imath.V2i( 1 )
i = 0
for y in range( size.y ):
for x in range( size.x ):
self.assertAlmostEqual( outImage["R"][i], (x + 0.5) / size.x * outImage["A"][i], places = 5 )
self.assertAlmostEqual( outImage["G"][i], (size.y - y - 0.5) / size.y * outImage["A"][i], places = 5 )
i += 1
def testGlobals( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( -10 ), imath.V2i( 10 ) ) ) )
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
outP = GafferOSL.OSLShader()
outP.loadShader( "ImageProcessing/OutLayer" )
outP["parameters"]["layerColor"].setInput( globals["out"]["globalP"] )
outU = GafferOSL.OSLShader()
outU.loadShader( "ImageProcessing/OutChannel" )
outU["parameters"]["channelName"].setValue( "u" )
outU["parameters"]["channelValue"].setInput( globals["out"]["globalU"] )
outV = GafferOSL.OSLShader()
outV.loadShader( "ImageProcessing/OutChannel" )
outV["parameters"]["channelName"].setValue( "v" )
outV["parameters"]["channelValue"].setInput( globals["out"]["globalV"] )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outP["out"]["layer"] )
imageShader["parameters"]["in1"].setInput( outU["out"]["channel"] )
imageShader["parameters"]["in2"].setInput( outV["out"]["channel"] )
image = GafferOSL.OSLImage()
image["in"].setInput( constant["out"] )
image["shader"].setInput( imageShader["out"]["out"] )
displayWindow = image["out"]["format"].getValue().getDisplayWindow()
samplerR = GafferImage.Sampler( image["out"], "R", displayWindow )
samplerG = GafferImage.Sampler( image["out"], "G", displayWindow )
samplerB = GafferImage.Sampler( image["out"], "B", displayWindow )
samplerU = GafferImage.Sampler( image["out"], "u", displayWindow )
samplerV = GafferImage.Sampler( image["out"], "v", displayWindow )
size = imath.V2f( displayWindow.size() )
uvStep = imath.V2f( 1.0 ) / size
uvMin = 0.5 * uvStep
for y in range( displayWindow.min().y, displayWindow.max().y ) :
for x in range( displayWindow.min().x, displayWindow.max().x ) :
self.assertEqual( samplerR.sample( x, y ), x + 0.5, "Pixel {},{}".format( x, y ) )
self.assertEqual( samplerG.sample( x, y ), y + 0.5, "Pixel {},{}".format( x, y ) )
self.assertEqual( samplerB.sample( x, y ), 0, "Pixel {},{}".format( x, y ) )
uv = uvMin + uvStep * imath.V2f( imath.V2i( x, y ) - displayWindow.min() )
self.assertAlmostEqual( samplerU.sample( x, y ), uv.x, delta = 0.0000001, msg = "Pixel {},{}".format( x, y ) )
self.assertAlmostEqual( samplerV.sample( x, y ), uv.y, delta = 0.0000001, msg = "Pixel {},{}".format( x, y ) )
def testTextureOrientation( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 32, 32 ) )
textureFileName = os.path.dirname( __file__ ) + "/images/vRamp.tx"
outLayer = GafferOSL.OSLCode()
outLayer["out"]["layer"] = GafferOSL.ClosurePlug(
direction = Gaffer.Plug.Direction.Out,
flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
outLayer["code"].setValue( 'layer = outLayer( "", texture( "{}", u, v ) )'.format( textureFileName ) )
outImage = GafferOSL.OSLShader()
outImage.loadShader( "ImageProcessing/OutImage" )
outImage["parameters"]["in0"].setInput( outLayer["out"]["layer"] )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["shader"].setInput( outImage["out"]["out"] )
sampler = GafferImage.Sampler( oslImage["out"], "R", oslImage["out"]["dataWindow"].getValue() )
for y in range( 0, 31 ) :
self.assertAlmostEqual( sampler.sample( 5, y ), (y + 0.5) / 32.0, delta = 0.02 )
def testPullsMinimalSetOfInputChannels( self ) :
constant = GafferImage.Constant()
constant["color"].setValue( imath.Color4f( 0.1101, 0.1224, 0.1353, 0.135 ) )
constant["format"].setValue(
GafferImage.Format( GafferImage.ImagePlug.tileSize(), GafferImage.ImagePlug.tileSize() )
)
outLayer = GafferOSL.OSLShader()
outLayer.loadShader( "ImageProcessing/OutLayer" )
outImage = GafferOSL.OSLShader()
outImage.loadShader( "ImageProcessing/OutImage" )
outImage["parameters"][0].setInput( outLayer["out"]["layer"] )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["shader"].setInput( outImage["out"]["out"] )
with Gaffer.PerformanceMonitor() as pm :
GafferImage.ImageAlgo.image( oslImage["out"] )
# Because the shader doesn't use any input channels,
# the OSLImage node shouldn't have needed to pull on
# any of the RGB channels. Because the shader doesn't
# write to alpha, it does need to pull on alpha to pass
# it through. Hence we expect a single computation for
# the Constant's channelData.
s = pm.plugStatistics( constant["out"]["channelData"] )
self.assertEqual( s.computeCount, 1 )
def testShaderNetworkGeneratedInGlobalContext( self ) :
constant = GafferImage.Constant()
outLayer = GafferOSL.OSLCode()
outLayer["out"]["layer"] = GafferOSL.ClosurePlug(
direction = Gaffer.Plug.Direction.Out,
flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
outLayer["code"].setValue( 'layer = outLayer( "", color( 0, 1, 0) )' )
outImage = GafferOSL.OSLShader()
outImage.loadShader( "ImageProcessing/OutImage" )
outImage["parameters"]["in0"].setInput( outLayer["out"]["layer"] )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["shader"].setInput( outImage["out"]["out"] )
with Gaffer.ContextMonitor( oslImage["__oslCode"] ) as cm :
GafferImageTest.processTiles( oslImage["out"] )
cs = cm.combinedStatistics()
self.assertEqual( cs.numUniqueContexts(), 1 )
self.assertNotIn( "image:tileOrigin", cs.variableNames() )
self.assertNotIn( "image:channelName", cs.variableNames() )
def testAllTypes( self ) :
i = GafferOSL.OSLImage()
i["defaultFormat"].setValue( GafferImage.Format( imath.Box2i( imath.V2i(0), imath.V2i( 5 ) ) ) )
i["channels"].addChild( Gaffer.NameValuePlug( "", imath.Color3f(1,3,5) ) )
i["channels"].addChild( Gaffer.NameValuePlug( "testFloat", 42.42 ) )
i["channels"].addChild( Gaffer.NameValuePlug( "testColor", imath.Color3f(12,13,14) ) )
image = GafferImage.ImageAlgo.image( i['out'] )
self.assertEqual( image["R"], IECore.FloatVectorData( [1]*25 ) )
self.assertEqual( image["G"], IECore.FloatVectorData( [3]*25 ) )
self.assertEqual( image["B"], IECore.FloatVectorData( [5]*25 ) )
self.assertEqual( image["testFloat"], IECore.FloatVectorData( [42.42]*25 ) )
self.assertEqual( image["testColor.R"], IECore.FloatVectorData( [12]*25 ) )
self.assertEqual( image["testColor.G"], IECore.FloatVectorData( [13]*25 ) )
self.assertEqual( image["testColor.B"], IECore.FloatVectorData( [14]*25 ) )
def testClosure( self ) :
i = GafferOSL.OSLImage()
i["defaultFormat"].setValue( GafferImage.Format( imath.Box2i( imath.V2i(0), imath.V2i( 5 ) ) ) )
i["channels"].addChild( Gaffer.NameValuePlug( "testClosure", GafferOSL.ClosurePlug() ) )
code = GafferOSL.OSLCode( "OSLCode" )
code["out"].addChild( GafferOSL.ClosurePlug( "output1", direction = Gaffer.Plug.Direction.Out ) )
code["code"].setValue( 'output1 = outLayer( "blah", color( 0.1, 0.2, 0.3 ) ) + outChannel( "foo", 0.5 );' )
i["channels"][0]["value"].setInput( code["out"]["output1"] )
image = GafferImage.ImageAlgo.image( i['out'] )
self.assertEqual( image["blah.R"], IECore.FloatVectorData( [0.1]*25 ) )
self.assertEqual( image["blah.G"], IECore.FloatVectorData( [0.2]*25 ) )
self.assertEqual( image["blah.B"], IECore.FloatVectorData( [0.3]*25 ) )
self.assertEqual( image["foo"], IECore.FloatVectorData( [0.5]*25 ) )
def testUndo( self ) :
s = Gaffer.ScriptNode()
i = GafferOSL.OSLImage()
s.addChild( i )
self.assertFalse( s.undoAvailable() )
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 0 )
with Gaffer.UndoScope( s ) :
i["channels"].addChild( Gaffer.NameValuePlug( "testColor", imath.Color3f( 42 ) ) )
i["channels"].addChild( Gaffer.NameValuePlug( "testFloat", 42.42 ) )
self.assertTrue( s.undoAvailable() )
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 4 )
with Gaffer.UndoScope( s ) :
del i["channels"][0]
del i["channels"][0]
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 0 )
# Test that the internal connections are recreated correctly when undoing adding and removing channels
s.undo()
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 4 )
s.undo()
self.assertEqual( len( i["__oslCode"]["parameters"].children() ), 0 )
def testDefaultFormat( self ):
constant = GafferImage.Constant()
oslImage = GafferOSL.OSLImage()
oslImage["channels"].addChild( Gaffer.NameValuePlug( "", imath.Color3f( 0.5, 0.6, 0.7 ) ) )
self.assertEqual( oslImage["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 1920, 1080 ) ) )
self.assertEqual( oslImage["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 1920, 1080 ) ) )
oslImage["defaultFormat"].setValue( GafferImage.Format( imath.Box2i( imath.V2i(0), imath.V2i( 5 ) ) ) )
self.assertEqual( oslImage["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 5, 5 ) ) )
self.assertEqual( oslImage["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 5, 5 ) ) )
self.assertEqual( GafferImage.ImageAlgo.image( oslImage["out"] )["G"], IECore.FloatVectorData( [0.6] * 25 ) )
oslImage["in"].setInput( constant["out"] )
self.assertEqual( oslImage["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 1920, 1080 ) ) )
self.assertEqual( oslImage["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 1920, 1080 ) ) )
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i(0), imath.V2i( 4 ) ) ) )
self.assertEqual( oslImage["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 4, 4 ) ) )
self.assertEqual( oslImage["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 4, 4 ) ) )
self.assertEqual( GafferImage.ImageAlgo.image( oslImage["out"] )["G"], IECore.FloatVectorData( [0.6] * 16 ) )
# Extreme example of doing something very expensive in OSLImage
def mandelbrotNode( self ):
mandelbrotCode = GafferOSL.OSLCode()
mandelbrotCode["parameters"].addChild( Gaffer.IntPlug( "iterations", defaultValue = 0, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ) )
mandelbrotCode["out"].addChild( Gaffer.FloatPlug( "outFloat", direction = Gaffer.Plug.Direction.Out ) )
mandelbrotCode["code"].setValue( inspect.cleandoc(
"""
// Basic mandelbrot adapted from surface shader here:
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage/blob/master/src/shaders/mandelbrot.osl
point center = point (0,0,0);
float scale = 2;
point cent = center;
point c = scale * point(2*(u-0.5), 2*((1-v)-0.5), 0) + cent;
point z = c;
int i;
for (i = 1; i < iterations && dot(z,z) < 4.0; ++i) {
float x = z[0], y = z[1];
z = point (x*x - y*y, 2*x*y, 0) + c;
}
if (i < iterations) {
float f = pow(float(i)/iterations, 1/log10(float(iterations)));
outFloat = f;
} else {
outFloat = 0;
}
"""
) )
return mandelbrotCode
def testBadCachePolicyHang( self ):
# Using the legacy cache policy for OSLImage.shadingPlug creates a hang due to tbb task stealing,
# though it's a bit hard to actually demonstrate
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 128, 128, 1.000 ) )
# Need a slow to compute OSL code in order to trigger hang
mandelbrotCode = self.mandelbrotNode()
# In order to trigger the hang, we need to mix threads which are stuck waiting for an expression which
# uses the Standard policy with threads that are actually finishing, so that tbb tries to start up new
# threads while we're waiting for the expression result. To do this, we use the "var" context variable
# to create two versions of this OSLCode
mandelbrotCode["varExpression"] = Gaffer.Expression()
mandelbrotCode["varExpression"].setExpression( 'parent.parameters.iterations = 100000 + context( "var", 0 );', "OSL" )
oslImage = GafferOSL.OSLImage()
oslImage["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value", defaultValue = imath.Color3f( 1, 1, 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "channel", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
oslImage["in"].setInput( constant["out"] )
oslImage["channels"]["channel"]["value"][0].setInput( mandelbrotCode["out"]["outFloat"] )
oslImage["channels"]["channel"]["value"][1].setInput( mandelbrotCode["out"]["outFloat"] )
oslImage["channels"]["channel"]["value"][2].setInput( mandelbrotCode["out"]["outFloat"] )
# This imageStats is use to create non-blocking slow calculations
imageStats = GafferImage.ImageStats()
imageStats["in"].setInput( oslImage["out"] )
imageStats["area"].setValue( imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 64, 64 ) ) )
# This box does the non-blocking slow calculation, followed by a blocking slow calculation.
# This ensures that tasks which do just the non-block calculation will start finishing while
# the blocking slow calculation is still running, allowing tbb to try running more threads
# on the blocking calcluation, realizing they can't run, and stealing tasks onto those threads
# which can hit the Standard policy lock on the expression upstream and deadlock, unless the
# OSLImage isolates its threads correctly
expressionBox = Gaffer.Box()
expressionBox.addChild( Gaffer.FloatVectorDataPlug( "inChannelData", defaultValue = IECore.FloatVectorData( [ ] ) ) )
expressionBox.addChild( Gaffer.FloatPlug( "inStat" ) )
expressionBox.addChild( Gaffer.FloatPlug( "out", direction = Gaffer.Plug.Direction.Out ) )
expressionBox["inChannelData"].setInput( oslImage["out"]["channelData"] )
expressionBox["inStat"].setInput( imageStats["average"]["r"] )
expressionBox["contextVariables"] = Gaffer.ContextVariables()
expressionBox["contextVariables"].setup( Gaffer.FloatVectorDataPlug( "in", defaultValue = IECore.FloatVectorData( [ ] ) ) )
expressionBox["contextVariables"]["variables"].addChild( Gaffer.NameValuePlug( "image:tileOrigin", Gaffer.V2iPlug( "value" ), True, "member1" ) )
expressionBox["contextVariables"]["variables"].addChild( Gaffer.NameValuePlug( "image:channelName", Gaffer.StringPlug( "value", defaultValue = 'R' ), True, "member2" ) )
expressionBox["contextVariables"]["variables"].addChild( Gaffer.NameValuePlug( "var", Gaffer.IntPlug( "value", defaultValue = 1 ), True, "member3" ) )
expressionBox["contextVariables"]["in"].setInput( expressionBox["inChannelData"] )
expressionBox["expression"] = Gaffer.Expression()
expressionBox["expression"].setExpression( inspect.cleandoc(
"""
d = parent["contextVariables"]["out"]
parent["out"] = d[0] + parent["inStat"]
"""
) )
# Create a switch to mix which tasks perform the non-blocking or blocking calculation - we need a mixture
# to trigger the hang
switch = Gaffer.Switch()
switch.setup( Gaffer.IntPlug( "in", defaultValue = 0, ) )
switch["in"][0].setInput( expressionBox["out"] )
switch["in"][1].setInput( imageStats["average"]["r"] )
switch["switchExpression"] = Gaffer.Expression()
switch["switchExpression"].setExpression( 'parent.index = ( stoi( context( "testContext", "0" ) ) % 10 ) > 5;', "OSL" )
# In order to evaluate this expression a bunch of times at once with different values of "testContext",
# we set up a simple scene that can be evaluated with GafferSceneTest.traversScene.
# In theory, we could use a simple function that used a parallel_for to evaluate switch["out"], but for
# some reason we don't entirely understand, this does not trigger the hang
import GafferSceneTest
import GafferScene
sphere = GafferScene.Sphere()
pathFilter = GafferScene.PathFilter()
pathFilter["paths"].setValue( IECore.StringVectorData( [ '/sphere' ] ) )
customAttributes = GafferScene.CustomAttributes()
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "foo", Gaffer.FloatPlug( "value" ), True, "member1" ) )
customAttributes["attributes"]["member1"]["value"].setInput( switch["out"] )
customAttributes["in"].setInput( sphere["out"] )
customAttributes["filter"].setInput( pathFilter["out"] )
collectScenes = GafferScene.CollectScenes()
collectScenes["in"].setInput( customAttributes["out"] )
collectScenes["rootNames"].setValue( IECore.StringVectorData( [ str(i) for i in range(1000) ] ) )
collectScenes["rootNameVariable"].setValue( 'testContext' )
# When OSLImage.shadingPlug is not correctly isolated, and grain size on ShadingEngine is smaller than the
# image tile size, this fails about 50% of the time. Running it 5 times makes the failure pretty consistent.
for i in range( 5 ):
Gaffer.ValuePlug.clearCache()
Gaffer.ValuePlug.clearHashCache()
GafferSceneTest.traverseScene( collectScenes["out"] )
@GafferTest.TestRunner.PerformanceTestMethod()
def testMinimalPerf( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 4096, 4096 ) )
floatToColor = GafferOSL.OSLShader()
floatToColor.loadShader( "Conversion/FloatToColor" )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value" ), True, "channel" ) )
oslImage["channels"]["channel"]["value"].setInput( floatToColor["out"]["c"] )
GafferImage.ImageAlgo.image( constant["out"] )
# Run the fastest possible OSLImage on lots of tiles, to highlight any constant overhead
with GafferTest.TestRunner.PerformanceScope() :
GafferImage.ImageAlgo.image( oslImage["out"] )
@GafferTest.TestRunner.PerformanceTestMethod( repeat = 1)
def testCollaboratePerf( self ) :
# Test an expensive OSLImage, with many output tiles depending on the same input tiles,
# which should give TaskCollaborate a chance to show some benefit
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 128, 128 ) )
deleteChannels = GafferImage.DeleteChannels( "DeleteChannels" )
deleteChannels["in"].setInput( constant["out"] )
deleteChannels["mode"].setValue( GafferImage.DeleteChannels.Mode.Keep )
deleteChannels["channels"].setValue( 'R' )
mandelbrotCode = self.mandelbrotNode()
mandelbrotCode["parameters"]["iterations"].setValue( 500000 )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( deleteChannels["out"] )
oslImage["channels"].addChild( Gaffer.NameValuePlug( "R", Gaffer.FloatPlug( "value" ), True, "channel" ) )
oslImage["channels"]["channel"]["value"].setInput( mandelbrotCode["out"]["outFloat"] )
resize = GafferImage.Resize()
resize["in"].setInput( oslImage["out"] )
resize["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 2048 ) ), 1 ) )
# We use a resize because it pulls the input tiles repeatedly, we don't want to spend time on resizing
# pixels, so use a fast filter
resize["filter"].setValue( 'box' )
with GafferTest.TestRunner.PerformanceScope() :
GafferImage.ImageAlgo.image( resize["out"] )
def testOSLSplineMatch( self ):
g = GafferOSL.OSLShader()
g.loadShader( "Utility/Globals" )
colorSpline = GafferOSL.OSLShader()
colorSpline.loadShader( "Pattern/ColorSpline" )
colorSpline["parameters"]["x"].setInput( g["out"]["globalU"] )
# Values chosen to trigger a precision issue with OSL's splineinverse on a constant basis
# if it is not avoided. The values have also been selected a little to avoid showing
# issues we don't want to deal with:
# * The X values are non-decreasing when evaluated as catmullRom ( non-monotonic X values
# are a weird special case that can't be handled well, and OSL and Cortex deal with it
# differently badly
# * The values are chosen so that the discontinuities in "constant" mode don't lie
# directly on a pixel center - if they did, it's comes down solely to floating point
# precision which side we lie on.
colorSpline["parameters"]["spline"].setValue(
Gaffer.SplineDefinitionfColor3f(
(
( 0.1580, imath.Color3f( 0.71, 0.21, 0.39 ) ),
( 0.2249, imath.Color3f( 0, 0.30, 0 ) ),
( 0.2631, imath.Color3f( 0, 0.46, 0 ) ),
( 0.3609, imath.Color3f( 0.71, 0.39, 0.054 ) ),
( 0.3826, imath.Color3f( 0, 0, 0 ) ),
( 0.4116, imath.Color3f( 0.87, 0.31, 1 ) ),
( 0.4300, imath.Color3f( 0, 0, 0 ) ),
( 0.4607, imath.Color3f( 0.71, 0.21, 0.39 ) ),
( 0.5996, imath.Color3f( 0, 1, 1 ) ),
( 0.9235, imath.Color3f( 1, 0.25, 0.25 ) )
), Gaffer.SplineDefinitionInterpolation.Constant
)
)
oslImage = GafferOSL.OSLImage( "OSLImage" )
oslImage["channels"].addChild( Gaffer.NameValuePlug( "", Gaffer.Color3fPlug( "value" ), True, "channel" ) )
oslImage["channels"]["channel"]["value"].setInput( colorSpline["out"]["c"] )
oslImage["defaultFormat"].setValue( GafferImage.Format( 3000, 64, 1.000 ) )
for i in Gaffer.SplineDefinitionInterpolation.names.values():
colorSpline["parameters"]["spline"]["interpolation"].setValue( i )
cortexSpline = colorSpline["parameters"]["spline"].getValue().spline()
samplers = [
GafferImage.Sampler( oslImage["out"], c, imath.Box2i( imath.V2i( 0 ), imath.V2i( 3000, 1 ) ) )
for c in [ "R", "G", "B" ]
]
for x in range( 3000 ):
result = cortexSpline( ( x + 0.5 ) / 3000.0 )
for c in range(3):
self.assertAlmostEqual( samplers[c].sample( x, 0 ), result[c], places = 3 )
if __name__ == "__main__":
unittest.main()
|
import utils
def on_mode(irc, conn, event):
if irc.is_channel(event.target):
channel = event.target
modes = utils.split_modes(event.arguments)
for mode in modes:
if mode.startswith("+b"):
mask = mode.split()[1]
irc.state["channels"][channel]["bans"].append(mask)
elif mode.startswith("-b"):
mask = mode.split()[1]
if mask in irc.state["channels"][channel]["bans"]:
irc.state["channels"][channel]["bans"].remove(mask)
elif mode.startswith("+q"):
mask = mode.split()[1]
irc.state["channels"][channel]["quiets"].append(mask)
elif mode.startswith("-q"):
mask = mode.split()[1]
if mask in irc.state["channels"][channel]["quiets"]:
irc.state["channels"][channel]["quiets"].remove(mask)
elif mode.startswith("+e"):
mask = mode.split()[1]
irc.state["channels"][channel]["excepts"].append(mask)
elif mode.startswith("-e"):
mask = mode.split()[1]
if mask in irc.state["channels"][channel]["excepts"]:
irc.state["channels"][channel]["excepts"].remove(mask)
elif mode.startswith("+I"):
mask = mode.split()[1]
irc.state["channels"][channel]["invites"].append(mask)
elif mode.startswith("-I"):
mask = mode.split()[1]
if mask in irc.state["channels"][channel]["invites"]:
irc.state["channels"][channel]["invites"].remove(mask)
elif mode.startswith("+k"):
key = mode.split()[1]
irc.channels[channel]["key"] = key
elif mode.startswith("-k"):
irc.channels[channel][key] = ""
elif mode.startswith("+o"):
nick = mode.split()[1]
if nick == irc.get_nick():
log.info("Recieved op in {} from {}".format(channel, event.source))
log.info("Syncing {} exempts".format(channel))
irc.mode(channel, "e")
log.info("Syncing {} invites".format(channel))
irc.mode(channel, "I")
if nick not in irc.state["channels"][channel]["ops"]:
irc.state["channels"][channel]["ops"].append(nick)
elif mode.startswith("-o"):
nick = mode.split()[1]
if nick in irc.state["channels"][channel]["ops"]:
irc.state["channels"][channel]["ops"].remove(nick)
elif mode.startswith("+v"):
nick = mode.split()[1]
if nick not in irc.state["channels"][channel]["voices"]:
irc.state["channels"][channel]["voices"].append(nick)
elif mode.startswith("-v"):
nick = mode.split()[1]
if nick in irc.state["channels"][channel]["voices"]:
irc.state["channels"][channel]["voices"].remove(nick)
Update MODE.py [ci sckip]
oops
import utils
def on_mode(irc, conn, event):
if irc.is_channel(event.target):
channel = event.target
modes = utils.split_modes(event.arguments)
for mode in modes:
if mode.startswith("+b"):
mask = mode.split()[1]
irc.state["channels"][channel]["bans"].append(mask)
elif mode.startswith("-b"):
mask = mode.split()[1]
if mask in irc.state["channels"][channel]["bans"]:
irc.state["channels"][channel]["bans"].remove(mask)
elif mode.startswith("+q"):
mask = mode.split()[1]
irc.state["channels"][channel]["quiets"].append(mask)
elif mode.startswith("-q"):
mask = mode.split()[1]
if mask in irc.state["channels"][channel]["quiets"]:
irc.state["channels"][channel]["quiets"].remove(mask)
elif mode.startswith("+e"):
mask = mode.split()[1]
irc.state["channels"][channel]["excepts"].append(mask)
elif mode.startswith("-e"):
mask = mode.split()[1]
if mask in irc.state["channels"][channel]["excepts"]:
irc.state["channels"][channel]["excepts"].remove(mask)
elif mode.startswith("+I"):
mask = mode.split()[1]
irc.state["channels"][channel]["invites"].append(mask)
elif mode.startswith("-I"):
mask = mode.split()[1]
if mask in irc.state["channels"][channel]["invites"]:
irc.state["channels"][channel]["invites"].remove(mask)
elif mode.startswith("+k"):
key = mode.split()[1]
irc.channels[channel]["key"] = key
elif mode.startswith("-k"):
irc.channels[channel]["key"] = ""
elif mode.startswith("+o"):
nick = mode.split()[1]
if nick == irc.get_nick():
log.info("Recieved op in {} from {}".format(channel, event.source))
log.info("Syncing {} exempts".format(channel))
irc.mode(channel, "e")
log.info("Syncing {} invites".format(channel))
irc.mode(channel, "I")
if nick not in irc.state["channels"][channel]["ops"]:
irc.state["channels"][channel]["ops"].append(nick)
elif mode.startswith("-o"):
nick = mode.split()[1]
if nick in irc.state["channels"][channel]["ops"]:
irc.state["channels"][channel]["ops"].remove(nick)
elif mode.startswith("+v"):
nick = mode.split()[1]
if nick not in irc.state["channels"][channel]["voices"]:
irc.state["channels"][channel]["voices"].append(nick)
elif mode.startswith("-v"):
nick = mode.split()[1]
if nick in irc.state["channels"][channel]["voices"]:
irc.state["channels"][channel]["voices"].remove(nick)
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import logging
import swapper
from django.conf import settings
from django.core.validators import RegexValidator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from embed_video.fields import EmbedVideoField
from sorl.thumbnail import ImageField
from accelerator_abstract.models.accelerator_model import AcceleratorModel
from accelerator_abstract.models.base_startup_role import BaseStartupRole
logger = logging.getLogger(__name__)
DEFAULT_PROFILE_BACKGROUND_COLOR = '217181' # default dark blue
DEFAULT_PROFILE_TEXT_COLOR = 'FFFFFF'
STARTUP_COMMUNITIES = (
('red', 'Red'),
('blue', 'Blue'),
('green', 'Green'),
)
STARTUP_NO_ORG_WARNING_MSG = "Startup {} has no organization"
@python_2_unicode_compatible
class BaseStartup(AcceleratorModel):
organization = models.ForeignKey(swapper.get_model_name(
AcceleratorModel.Meta.app_label, 'Organization'), blank=True,
null=True, related_name='startups')
user = models.ForeignKey(settings.AUTH_USER_MODEL)
is_visible = models.BooleanField(
default=True,
help_text=('Startup Profiles will be published to external websites '
'through the the API.'))
primary_industry = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, 'Industry'),
verbose_name='Primary Industry categorization',
related_name='startups')
additional_industries = models.ManyToManyField(
swapper.get_model_name(AcceleratorModel.Meta.app_label, 'Industry'),
verbose_name='Additional Industries',
related_name='secondary_startups',
db_table="{}_startup_related_industry".format(
AcceleratorModel.Meta.app_label),
blank=True,
help_text=(
'You may select up to 5 related industries.'
),
)
short_pitch = models.CharField(
max_length=140,
blank=False,
help_text='Your startup in 140 characters or less.')
full_elevator_pitch = models.TextField(
max_length=500,
blank=False,
help_text='Your startup in 500 characters or less.')
linked_in_url = models.URLField(max_length=100, blank=True)
facebook_url = models.URLField(max_length=100, blank=True)
high_resolution_logo = ImageField(
upload_to='startup_pics',
verbose_name='High Resolution Logo',
blank=True)
video_elevator_pitch_url = EmbedVideoField(
max_length=100,
blank=True,
help_text=(
'Upload your 1-3 minute video pitch to Vimeo or Youtube. '
'Paste the shared link here.')
)
acknowledgement = models.BooleanField(
default=False,
help_text=(
'I understand that my Startup Profile is a pre-requisite '
'for applying to any MassChallenge Program'
)
)
created_datetime = models.DateTimeField(blank=True, null=True)
last_updated_datetime = models.DateTimeField(blank=True, null=True)
community = models.CharField(
max_length=64,
choices=STARTUP_COMMUNITIES,
blank=True,
)
# profile color fields are deprecated - do not delete until we know
# what the marketing site is doing with startup display
profile_background_color = models.CharField(
max_length=7,
blank=True,
default=DEFAULT_PROFILE_BACKGROUND_COLOR,
validators=[RegexValidator(
'^([0-9a-fA-F]{3}|[0-9a-fA-F]{6}|)$',
'Color must be 3 or 6-digit hexecimal number, '
'such as FF0000 for red.'), ])
profile_text_color = models.CharField(
max_length=7,
blank=True,
default=DEFAULT_PROFILE_TEXT_COLOR,
validators=[RegexValidator('^([0-9a-fA-F]{3}|[0-9a-fA-F]{6}|)$',
'Color must be 3 or 6-digit hexecimal '
'number, such as FF0000 for red.'), ])
recommendation_tags = models.ManyToManyField(
swapper.get_model_name('accelerator', 'RecommendationTag'),
blank=True)
currency = models.ForeignKey(swapper.get_model_name(
'accelerator', 'Currency'), blank=True,
null=True)
location_national = models.CharField(
max_length=100,
blank=True,
default='',
help_text=('Please specify the country where your main office '
'(headquarters) is located'))
location_regional = models.CharField(
max_length=100,
blank=True,
default='',
help_text=('Please specify the state, region or province where your '
'main office (headquarters) is located (if applicable).'))
location_city = models.CharField(
max_length=100,
blank=True,
default='',
help_text=('Please specify the city where your main '
'office (headquarters) is located. (e.g. Boston)'))
location_postcode = models.CharField(
max_length=100,
blank=True,
default='',
help_text=('Please specify the postal code for your main office '
'(headquarters). (ZIP code, Postcode, codigo postal, '
'etc.)'))
date_founded = models.CharField(
max_length=100,
blank=True,
help_text='Month and Year when your startup was founded.'
)
landing_page = models.CharField(max_length=255, null=True, blank=True)
class Meta(AcceleratorModel.Meta):
db_table = '{}_startup'.format(AcceleratorModel.Meta.app_label)
abstract = True
verbose_name_plural = 'Startups'
ordering = ['organization__name']
def __str__(self):
return self.name or ""
@property
def name(self):
return self._get_org_attr("name")
@name.setter
def name(self, value):
self._set_org_attr("name", value)
@property
def website_url(self):
return self._get_org_attr("website_url")
@website_url.setter
def website_url(self, website_url):
self._set_org_attr("website_url", website_url)
@property
def twitter_handle(self):
return self._get_org_attr("twitter_handle")
@twitter_handle.setter
def twitter_handle(self, twitter_handle):
self._set_org_attr("twitter_handle", twitter_handle)
@property
def public_inquiry_email(self):
return self._get_org_attr("public_inquiry_email")
@public_inquiry_email.setter
def public_inquiry_email(self, public_inquiry_email):
self._set_org_attr("public_inquiry_email", public_inquiry_email)
def _get_org_attr(self, attr):
if self.organization:
return getattr(self.organization, attr)
else:
logger.warning(STARTUP_NO_ORG_WARNING_MSG.format(self.pk))
return None
def _set_org_attr(self, attr, value):
if self.organization:
setattr(self.organization, attr, value)
self.organization.save()
return
else:
logger.warning(STARTUP_NO_ORG_WARNING_MSG.format(self.pk))
return None
def program_startup_statuses(self):
from accelerator.models.program_startup_status import (
ProgramStartupStatus
)
return ProgramStartupStatus.objects.filter(
startupstatus__startup=self)
def is_finalist(self, program=None):
"""if program is given, check whether this startup is a finalist
in that program. Otherwise, check whether this startup is a finalist
in any program"""
if program is None:
return self.program_startup_statuses().filter(
startup_role__name=BaseStartupRole.FINALIST).exists()
return self.program_startup_statuses().filter(
startup_role__name=BaseStartupRole.FINALIST,
program__exact=program
).exists()
is_finalist.boolean = True
[AC-1] add 'finalist_startup_statuses' property to startup model class to give the StartupIndex the values it needs
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import logging
import swapper
from django.conf import settings
from django.core.validators import RegexValidator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from embed_video.fields import EmbedVideoField
from sorl.thumbnail import ImageField
from accelerator_abstract.models.accelerator_model import AcceleratorModel
from accelerator_abstract.models.base_startup_role import BaseStartupRole
logger = logging.getLogger(__name__)
DEFAULT_PROFILE_BACKGROUND_COLOR = '217181' # default dark blue
DEFAULT_PROFILE_TEXT_COLOR = 'FFFFFF'
STARTUP_COMMUNITIES = (
('red', 'Red'),
('blue', 'Blue'),
('green', 'Green'),
)
STARTUP_NO_ORG_WARNING_MSG = "Startup {} has no organization"
@python_2_unicode_compatible
class BaseStartup(AcceleratorModel):
organization = models.ForeignKey(swapper.get_model_name(
AcceleratorModel.Meta.app_label, 'Organization'), blank=True,
null=True, related_name='startups')
user = models.ForeignKey(settings.AUTH_USER_MODEL)
is_visible = models.BooleanField(
default=True,
help_text=('Startup Profiles will be published to external websites '
'through the the API.'))
primary_industry = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, 'Industry'),
verbose_name='Primary Industry categorization',
related_name='startups')
additional_industries = models.ManyToManyField(
swapper.get_model_name(AcceleratorModel.Meta.app_label, 'Industry'),
verbose_name='Additional Industries',
related_name='secondary_startups',
db_table="{}_startup_related_industry".format(
AcceleratorModel.Meta.app_label),
blank=True,
help_text=(
'You may select up to 5 related industries.'
),
)
short_pitch = models.CharField(
max_length=140,
blank=False,
help_text='Your startup in 140 characters or less.')
full_elevator_pitch = models.TextField(
max_length=500,
blank=False,
help_text='Your startup in 500 characters or less.')
linked_in_url = models.URLField(max_length=100, blank=True)
facebook_url = models.URLField(max_length=100, blank=True)
high_resolution_logo = ImageField(
upload_to='startup_pics',
verbose_name='High Resolution Logo',
blank=True)
video_elevator_pitch_url = EmbedVideoField(
max_length=100,
blank=True,
help_text=(
'Upload your 1-3 minute video pitch to Vimeo or Youtube. '
'Paste the shared link here.')
)
acknowledgement = models.BooleanField(
default=False,
help_text=(
'I understand that my Startup Profile is a pre-requisite '
'for applying to any MassChallenge Program'
)
)
created_datetime = models.DateTimeField(blank=True, null=True)
last_updated_datetime = models.DateTimeField(blank=True, null=True)
community = models.CharField(
max_length=64,
choices=STARTUP_COMMUNITIES,
blank=True,
)
# profile color fields are deprecated - do not delete until we know
# what the marketing site is doing with startup display
profile_background_color = models.CharField(
max_length=7,
blank=True,
default=DEFAULT_PROFILE_BACKGROUND_COLOR,
validators=[RegexValidator(
'^([0-9a-fA-F]{3}|[0-9a-fA-F]{6}|)$',
'Color must be 3 or 6-digit hexecimal number, '
'such as FF0000 for red.'), ])
profile_text_color = models.CharField(
max_length=7,
blank=True,
default=DEFAULT_PROFILE_TEXT_COLOR,
validators=[RegexValidator('^([0-9a-fA-F]{3}|[0-9a-fA-F]{6}|)$',
'Color must be 3 or 6-digit hexecimal '
'number, such as FF0000 for red.'), ])
recommendation_tags = models.ManyToManyField(
swapper.get_model_name('accelerator', 'RecommendationTag'),
blank=True)
currency = models.ForeignKey(swapper.get_model_name(
'accelerator', 'Currency'), blank=True,
null=True)
location_national = models.CharField(
max_length=100,
blank=True,
default='',
help_text=('Please specify the country where your main office '
'(headquarters) is located'))
location_regional = models.CharField(
max_length=100,
blank=True,
default='',
help_text=('Please specify the state, region or province where your '
'main office (headquarters) is located (if applicable).'))
location_city = models.CharField(
max_length=100,
blank=True,
default='',
help_text=('Please specify the city where your main '
'office (headquarters) is located. (e.g. Boston)'))
location_postcode = models.CharField(
max_length=100,
blank=True,
default='',
help_text=('Please specify the postal code for your main office '
'(headquarters). (ZIP code, Postcode, codigo postal, '
'etc.)'))
date_founded = models.CharField(
max_length=100,
blank=True,
help_text='Month and Year when your startup was founded.'
)
landing_page = models.CharField(max_length=255, null=True, blank=True)
class Meta(AcceleratorModel.Meta):
db_table = '{}_startup'.format(AcceleratorModel.Meta.app_label)
abstract = True
verbose_name_plural = 'Startups'
ordering = ['organization__name']
def __str__(self):
return self.name or ""
@property
def name(self):
return self._get_org_attr("name")
@name.setter
def name(self, value):
self._set_org_attr("name", value)
@property
def website_url(self):
return self._get_org_attr("website_url")
@website_url.setter
def website_url(self, website_url):
self._set_org_attr("website_url", website_url)
@property
def twitter_handle(self):
return self._get_org_attr("twitter_handle")
@twitter_handle.setter
def twitter_handle(self, twitter_handle):
self._set_org_attr("twitter_handle", twitter_handle)
@property
def public_inquiry_email(self):
return self._get_org_attr("public_inquiry_email")
@public_inquiry_email.setter
def public_inquiry_email(self, public_inquiry_email):
self._set_org_attr("public_inquiry_email", public_inquiry_email)
def _get_org_attr(self, attr):
if self.organization:
return getattr(self.organization, attr)
else:
logger.warning(STARTUP_NO_ORG_WARNING_MSG.format(self.pk))
return None
def _set_org_attr(self, attr, value):
if self.organization:
setattr(self.organization, attr, value)
self.organization.save()
return
else:
logger.warning(STARTUP_NO_ORG_WARNING_MSG.format(self.pk))
return None
def program_startup_statuses(self):
from accelerator.models.program_startup_status import (
ProgramStartupStatus
)
return ProgramStartupStatus.objects.filter(
startupstatus__startup=self)
@property
def finalist_startup_statuses(self):
statuses = self.program_startup_statuses().filter(
startup_role__name=BaseStartupRole.FINALIST).values_list(
'program__name', flat=True).distinct()
return list(statuses)
def is_finalist(self, program=None):
"""if program is given, check whether this startup is a finalist
in that program. Otherwise, check whether this startup is a finalist
in any program"""
if program is None:
return self.program_startup_statuses().filter(
startup_role__name=BaseStartupRole.FINALIST).exists()
return self.program_startup_statuses().filter(
startup_role__name=BaseStartupRole.FINALIST,
program__exact=program
).exists()
is_finalist.boolean = True
|
"""
Range of numeric values
"""
import datetime
import pscheduler
from jsonval import json_validate
class NumericRange():
"Range of numbers"
def __init__(self, nrange):
"""Construct a range from a JSON NumericRange."""
# TODO: Would be nice if this class could treat missing
# lower/upper as -/+ infinity.
# TODO: Figure out why this can't just point to a NumericRange
valid, message = pscheduler.json_validate(nrange, {
"type": "object",
"properties": {
"lower": { "$ref": "#/pScheduler/Numeric" },
"upper": { "$ref": "#/pScheduler/Numeric" }
},
"additionalProperties": False,
"required": [ "lower", "upper" ]
})
if not valid:
raise ValueError("Invalid numeric range: %s" % message)
lower = nrange['lower']
if type(lower) in [ str, unicode ]:
self.lower = pscheduler.si_as_integer(lower)
else:
self.lower = lower
self.lower_str = str(lower)
upper = nrange['upper']
if type(upper) in [ str, unicode ]:
self.upper = pscheduler.si_as_integer(upper)
else:
self.upper = upper
self.upper_str = str(upper)
def __contains__(self, number):
"""See if the range contains the specified number, which can be any Numeric."""
if type(number) == float:
test_value = number
else:
test_value = pscheduler.si_as_integer(number)
return self.lower <= test_value <= self.upper
def contains(self, number, invert=False):
"""Like __contains__, but can do inversion and returns a message stub
Return value is (contains, stub), where 'contains' is a boolean
and 'stub' describes why the check failed (e.g., "is not in PT1M..PT1H")
"""
in_range = number in self
if (in_range and invert) or (not in_range and not invert):
return False, ("not %s %s..%s" %
( "outside" if invert else "in",
self.lower_str, self.upper_str ))
return True, None
# Test program
if __name__ == "__main__":
nrange = NumericRange({
"lower": 3.14,
"upper": "100K"
})
for value in [
1,
6.78,
"10K",
"100K",
"1M"
]:
result = value in nrange
print value, result
for invert in [ False, True ]:
print "%s Invert=%s %s" % (value, invert,
nrange.contains(value, invert=invert))
print
updating calls to sinumber due to rename
"""
Range of numeric values
"""
import datetime
import pscheduler
from jsonval import json_validate
class NumericRange():
"Range of numbers"
def __init__(self, nrange):
"""Construct a range from a JSON NumericRange."""
# TODO: Would be nice if this class could treat missing
# lower/upper as -/+ infinity.
# TODO: Figure out why this can't just point to a NumericRange
valid, message = pscheduler.json_validate(nrange, {
"type": "object",
"properties": {
"lower": { "$ref": "#/pScheduler/Numeric" },
"upper": { "$ref": "#/pScheduler/Numeric" }
},
"additionalProperties": False,
"required": [ "lower", "upper" ]
})
if not valid:
raise ValueError("Invalid numeric range: %s" % message)
lower = nrange['lower']
if type(lower) in [ str, unicode ]:
self.lower = pscheduler.si_as_number(lower)
else:
self.lower = lower
self.lower_str = str(lower)
upper = nrange['upper']
if type(upper) in [ str, unicode ]:
self.upper = pscheduler.si_as_number(upper)
else:
self.upper = upper
self.upper_str = str(upper)
def __contains__(self, number):
"""See if the range contains the specified number, which can be any Numeric."""
if type(number) == float:
test_value = number
else:
test_value = pscheduler.si_as_number(number)
return self.lower <= test_value <= self.upper
def contains(self, number, invert=False):
"""Like __contains__, but can do inversion and returns a message stub
Return value is (contains, stub), where 'contains' is a boolean
and 'stub' describes why the check failed (e.g., "is not in PT1M..PT1H")
"""
in_range = number in self
if (in_range and invert) or (not in_range and not invert):
return False, ("not %s %s..%s" %
( "outside" if invert else "in",
self.lower_str, self.upper_str ))
return True, None
# Test program
if __name__ == "__main__":
nrange = NumericRange({
"lower": 3.14,
"upper": "100K"
})
for value in [
1,
6.78,
"10K",
"100K",
"1M"
]:
result = value in nrange
print value, result
for invert in [ False, True ]:
print "%s Invert=%s %s" % (value, invert,
nrange.contains(value, invert=invert))
print
|
from datetime import datetime, date
import six
from uuid import UUID
import delorean
from radar.serializers.core import Field
from radar.validation.core import ValidationError
class StringField(Field):
default_error_messages = {
'invalid': 'A valid string is required.'
}
def __init__(self, **kwargs):
self.trim_whitespace = kwargs.pop('trim_whitespace', True)
super(StringField, self).__init__(**kwargs)
def to_value(self, data):
if data is None:
return None
if isinstance(data, dict) or isinstance(data, list) or isinstance(data, bool):
self.fail('invalid')
value = six.text_type(data)
if self.trim_whitespace:
value = value.strip()
return value
def to_data(self, value):
if value is None:
return None
data = six.text_type(value)
return data
class BooleanField(Field):
default_error_messages = {
'invalid': 'A valid boolean is required.'
}
TRUE_VALUES = {'t', 'true', 'y', 'yes', '1', 1, True}
FALSE_VALUES = {'f', 'false', 'n', 'no', '0', 0, False}
def to_value(self, data):
if data is None:
return None
if hasattr(data, 'lower'):
data = data.lower()
# Check for TypeError as list and dict aren't hashable
try:
if data in self.TRUE_VALUES:
return True
elif data in self.FALSE_VALUES:
return False
else:
self.fail('invalid')
except TypeError:
self.fail('invalid')
def to_data(self, value):
if value is None:
return None
return bool(value)
class IntegerField(Field):
default_error_messages = {
'invalid': 'A valid integer is required.'
}
def to_value(self, data):
if data is None:
return None
if isinstance(data, basestring):
data = data.strip()
if len(data) == 0:
return None
try:
value = int(data)
value_f = float(data)
# No floats
if value != value_f:
self.fail('invalid')
except (ValueError, TypeError):
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
return int(value)
class FloatField(Field):
default_error_messages = {
'invalid': 'A valid number is required.'
}
def to_value(self, data):
if data is None:
return None
if isinstance(data, basestring):
data = data.strip()
if len(data) == 0:
return None
try:
value = float(data)
except (ValueError, TypeError):
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
return float(value)
class DateField(Field):
default_error_messages = {
'invalid': 'Date has wrong format.',
'datetime': 'Expected a date but got a datetime.',
}
def to_value(self, data):
if data is None:
return None
elif isinstance(data, datetime):
self.fail('datetime')
elif isinstance(data, date):
# Already a date
return data
elif not isinstance(data, six.string_types):
# Not a string
self.fail('invalid')
else:
try:
value = delorean.parse(data).date
except ValueError:
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
# TODO always %Y-%m-%d
return value.isoformat()
class DateTimeField(Field):
default_error_messages = {
'invalid': 'Datetime has wrong format.',
'date': 'Expected a date but got a datetime.',
}
def to_value(self, data):
if data is None:
return None
elif isinstance(data, datetime):
# Already a datetime
return data
elif isinstance(data, date):
self.fail('date')
elif not isinstance(data, six.string_types):
# Not a string
self.fail('invalid')
else:
try:
value = delorean.parse(data).datetime
except ValueError:
self.fail('invalid')
return value
def to_data(self, value):
# TODO always %Y-%m-%dT%H:%M:%S+00:00
return value.isoformat()
class ListField(Field):
default_error_messages = {
'not_a_list': 'Expected a list.'
}
def __init__(self, field, **kwargs):
super(ListField, self).__init__(**kwargs)
self.field = field
def to_value(self, data):
if data is None:
return None
if not isinstance(data, list):
self.fail('not_a_list')
values = []
errors = {}
for i, x in enumerate(data):
try:
value = self.field.to_value(x)
except ValidationError as e:
errors[i] = e.errors
else:
values.append(value)
if any(errors):
raise ValidationError(errors)
return values
def to_data(self, values):
if values is None:
return None
data = []
for value in values:
if value is None:
data.append(None)
else:
data.append(self.field.to_data(value))
return data
def transform_errors(self, errors):
transformed_errors = {}
# Errors on the list field
if '_' in errors:
transformed_errors['_'] = errors['_']
for i, field_errors in errors.items():
if isinstance(i, int):
transformed_field_errors = self.field.transform_errors(field_errors)
transformed_errors[i] = transformed_field_errors
return transformed_errors
class CommaSeparatedStringField(Field):
def to_value(self, data):
if data is None:
return None
return [x.strip() for x in data.split(',')]
def to_data(self, value):
return ','.join(value)
class UUIDField(Field):
default_error_messages = {
'invalid': 'A valid UUID is required.'
}
def __init__(self, **kwargs):
super(UUIDField, self).__init__(**kwargs)
def to_value(self, data):
if data is None:
return None
if isinstance(data, dict) or isinstance(data, list) or isinstance(data, bool):
self.fail('invalid')
value = six.text_type(data)
try:
UUID(data)
except ValueError:
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
data = six.text_type(value)
return data
class EnumField(Field):
default_error_messages = {
'invalid': 'Not a valid value.'
}
def __init__(self, enum, **kwargs):
super(EnumField, self).__init__(self, **kwargs)
self.enum = enum
def to_value(self, data):
if data is None:
return None
try:
value = self.enum(data)
except ValueError:
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
data = six.text_type(value)
return data
Fix ListField bug
from datetime import datetime, date
import six
from uuid import UUID
import delorean
from radar.serializers.core import Field
from radar.validation.core import ValidationError
class StringField(Field):
default_error_messages = {
'invalid': 'A valid string is required.'
}
def __init__(self, **kwargs):
self.trim_whitespace = kwargs.pop('trim_whitespace', True)
super(StringField, self).__init__(**kwargs)
def to_value(self, data):
if data is None:
return None
if isinstance(data, dict) or isinstance(data, list) or isinstance(data, bool):
self.fail('invalid')
value = six.text_type(data)
if self.trim_whitespace:
value = value.strip()
return value
def to_data(self, value):
if value is None:
return None
data = six.text_type(value)
return data
class BooleanField(Field):
default_error_messages = {
'invalid': 'A valid boolean is required.'
}
TRUE_VALUES = {'t', 'true', 'y', 'yes', '1', 1, True}
FALSE_VALUES = {'f', 'false', 'n', 'no', '0', 0, False}
def to_value(self, data):
if data is None:
return None
if hasattr(data, 'lower'):
data = data.lower()
# Check for TypeError as list and dict aren't hashable
try:
if data in self.TRUE_VALUES:
return True
elif data in self.FALSE_VALUES:
return False
else:
self.fail('invalid')
except TypeError:
self.fail('invalid')
def to_data(self, value):
if value is None:
return None
return bool(value)
class IntegerField(Field):
default_error_messages = {
'invalid': 'A valid integer is required.'
}
def to_value(self, data):
if data is None:
return None
if isinstance(data, basestring):
data = data.strip()
if len(data) == 0:
return None
try:
value = int(data)
value_f = float(data)
# No floats
if value != value_f:
self.fail('invalid')
except (ValueError, TypeError):
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
return int(value)
class FloatField(Field):
default_error_messages = {
'invalid': 'A valid number is required.'
}
def to_value(self, data):
if data is None:
return None
if isinstance(data, basestring):
data = data.strip()
if len(data) == 0:
return None
try:
value = float(data)
except (ValueError, TypeError):
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
return float(value)
class DateField(Field):
default_error_messages = {
'invalid': 'Date has wrong format.',
'datetime': 'Expected a date but got a datetime.',
}
def to_value(self, data):
if data is None:
return None
elif isinstance(data, datetime):
self.fail('datetime')
elif isinstance(data, date):
# Already a date
return data
elif not isinstance(data, six.string_types):
# Not a string
self.fail('invalid')
else:
try:
value = delorean.parse(data).date
except ValueError:
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
# TODO always %Y-%m-%d
return value.isoformat()
class DateTimeField(Field):
default_error_messages = {
'invalid': 'Datetime has wrong format.',
'date': 'Expected a date but got a datetime.',
}
def to_value(self, data):
if data is None:
return None
elif isinstance(data, datetime):
# Already a datetime
return data
elif isinstance(data, date):
self.fail('date')
elif not isinstance(data, six.string_types):
# Not a string
self.fail('invalid')
else:
try:
value = delorean.parse(data).datetime
except ValueError:
self.fail('invalid')
return value
def to_data(self, value):
# TODO always %Y-%m-%dT%H:%M:%S+00:00
return value.isoformat()
class ListField(Field):
default_error_messages = {
'not_a_list': 'Expected a list.'
}
def __init__(self, field, **kwargs):
super(ListField, self).__init__(**kwargs)
self.field = field
def to_value(self, data):
if data is None:
return None
if not isinstance(data, list):
self.fail('not_a_list')
values = []
errors = {}
for i, x in enumerate(data):
try:
value = self.field.to_value(x)
except ValidationError as e:
print i, e
errors[i] = e.errors
else:
values.append(value)
if errors:
raise ValidationError(errors)
return values
def to_data(self, values):
if values is None:
return None
data = []
for value in values:
if value is None:
data.append(None)
else:
data.append(self.field.to_data(value))
return data
def transform_errors(self, errors):
transformed_errors = {}
# Errors on the list field
if '_' in errors:
transformed_errors['_'] = errors['_']
for i, field_errors in errors.items():
if isinstance(i, int):
transformed_field_errors = self.field.transform_errors(field_errors)
transformed_errors[i] = transformed_field_errors
return transformed_errors
class CommaSeparatedStringField(Field):
def to_value(self, data):
if data is None:
return None
return [x.strip() for x in data.split(',')]
def to_data(self, value):
return ','.join(value)
class UUIDField(Field):
default_error_messages = {
'invalid': 'A valid UUID is required.'
}
def __init__(self, **kwargs):
super(UUIDField, self).__init__(**kwargs)
def to_value(self, data):
if data is None:
return None
if isinstance(data, dict) or isinstance(data, list) or isinstance(data, bool):
self.fail('invalid')
value = six.text_type(data)
try:
UUID(data)
except ValueError:
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
data = six.text_type(value)
return data
class EnumField(Field):
default_error_messages = {
'invalid': 'Not a valid value.'
}
def __init__(self, enum, **kwargs):
super(EnumField, self).__init__(self, **kwargs)
self.enum = enum
def to_value(self, data):
if data is None:
return None
try:
value = self.enum(data)
except ValueError:
self.fail('invalid')
return value
def to_data(self, value):
if value is None:
return None
data = six.text_type(value)
return data
|
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
from parsl.monitoring.web_app.app import app, get_db, close_db
from parsl.monitoring.web_app.utils import dropdown
def display_workflow(workflow_name):
sql_conn = get_db()
df_workflows = pd.read_sql_query('SELECT workflow_name, time_began, rundir, run_id FROM workflows WHERE workflow_name=(?)',
sql_conn, params=(workflow_name, ))
return html.Div(children=[
html.H2(id='workflow_name', children=df_workflows['workflow_name'][0]),
dropdown(id='run_info_dropdown', dataframe=df_workflows.sort_values(by='time_began', ascending=False), field='rundir'),
html.Div(id='workflow_content')
# html.Div(id='tables')
])
@app.callback(Output('workflow_content', 'children'),
[Input('run_info_dropdown', 'value')])
def workflow(run_id):
return [html.A(id='run_id', children=run_id, hidden=True),
load_radio_items(),
dcc.Graph(id='workflow_details'),
total_tasks_graph(run_id)]
# TODO: task_resources is not created for all workflows. Throws error
@app.callback(Output('workflow_details', 'figure'),
[Input('radio', 'value')],
[State('run_info_dropdown', 'value')])
def workflow_details(field, run_id):
sql_conn = get_db()
df_resources = pd.read_sql_query('SELECT {field}, timestamp, task_id FROM task_resources WHERE run_id=(?)'.format(field=field), sql_conn, params=(run_id, ))
df_task = pd.read_sql_query('SELECT task_id, task_time_completed FROM task WHERE run_id=(?)', sql_conn, params=(run_id, ))
close_db()
def count_running():
dic = dict()
count = 0
n = []
for i in range(len(df_resources)):
task_id = df_resources.iloc[i]['task_id']
value = float(df_resources.iloc[i][field])
if task_id in dic:
count -= dic[task_id][0]
dic[task_id] = (value, df_task[df_task['task_id'] == task_id]['task_time_completed'].iloc[0])
count += value
remove = []
for k, v in dic.items():
if v[1] < df_resources.iloc[i]['timestamp']:
count -= v[0]
remove.append(k)
for k in remove: del dic[k]
n.append(count)
return n
return go.Figure(
data=[go.Scatter(x=df_resources['timestamp'],
y=count_running(),
name='tasks')],
layout=go.Layout(xaxis=dict(tickformat='%H:%M:%S', range=[min(df_resources.timestamp), max(df_resources.timestamp)]),
title="Resource usage")
)
def total_tasks_graph(run_id):
sql_conn = get_db()
df_status = pd.read_sql_query('SELECT run_id, task_id, task_status_name, timestamp FROM task_status WHERE run_id=(?)', sql_conn, params=(run_id, ))
df_task = pd.read_sql_query('SELECT task_id, task_fn_hash FROM task WHERE run_id=(?)', sql_conn, params=(run_id,))
close_db()
def count_running(array):
count = 0
n = []
for i in array:
if i:
count += 1
elif count > 0:
count -= 1
n.append(count)
return n
# Fill up dict "apps" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}
apps = dict()
for i in range(len(df_task)):
row = df_task.iloc[i]
if row['task_fn_hash'] in apps:
apps[row['task_fn_hash']].append(row['task_id'])
else:
apps[row['task_fn_hash']] = [row['task_id']]
return dcc.Graph(id='total_tasks',
figure=go.Figure(
data=[go.Scatter(x=df_status[df_status['task_id'].isin(tasks)]['timestamp'],
y=count_running(df_status[df_status['task_id'].isin(tasks)]['task_status_name'] == 'running'),
name=app)
for app, tasks in apps.items()] +
[go.Scatter(x=df_status['timestamp'],
y=count_running(df_status['task_status_name'] == 'running'),
name='all')],
layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S',
range=[min(df_status['timestamp']), max(df_status['timestamp'])]),
title="Total tasks")
))
#
# @app.callback(
# Output('tables', 'children'),
# [Input('graph', 'clickData')],
# [State('task_id', 'children')])
# def load_task_table(clicked, task_id):
# if not clicked:
# return
# sql_conn = get_db()
#
# df_resources = pd.read_sql_query("SELECT * FROM task_resources WHERE run_id=(?)", sql_conn, params=(task_id, ))
# df_task = pd.read_sql_query("SELECT * FROM task WHERE run_id=(?)", sql_conn, params=(task_id, ))
#
# return [html.Table(
# [html.Tr([html.Th(col) for col in df_resources.columns])] + \
# [html.Tr([html.Td(html.A(df_resources.loc[df_resources['task_id'] == str(point['curveNumber'])].iloc[i][col])) for col in df_resources.loc[df_resources['task_id'] == str(point['curveNumber'])].columns]) for i in range(len(df_resources.loc[df_resources['task_id'] == str(point['curveNumber'])]))]) for point in clicked['points']]
#
def load_radio_items():
return dcc.RadioItems(
id='radio',
options=[{'label': 'psutil_process_time_user', 'value': 'psutil_process_time_user'},
{'label': 'psutil_process_time_system', 'value': 'psutil_process_time_system'},
{'label': 'psutil_process_memory_percent', 'value': 'psutil_process_memory_percent'}],
value='psutil_process_memory_percent',
)
Added total tasks plot. Failed/done
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
from parsl.monitoring.web_app.app import app, get_db, close_db
from parsl.monitoring.web_app.utils import dropdown
from datetime import datetime
def display_workflow(workflow_name):
sql_conn = get_db()
df_workflows = pd.read_sql_query('SELECT workflow_name, time_began, rundir, run_id FROM workflows WHERE workflow_name=(?)',
sql_conn, params=(workflow_name, ))
return html.Div(children=[
html.H2(id='workflow_name', children=df_workflows['workflow_name'][0]),
dropdown(id='run_number_dropdown', dataframe=df_workflows.sort_values(by='time_began', ascending=False), field='rundir'),
html.Div(id='workflow_content')
])
@app.callback(Output('workflow_content', 'children'),
[Input('run_number_dropdown', 'value')])
def workflow_content(run_id):
return [html.A(id='run_id', children=run_id, hidden=True),
dcc.RadioItems(
id='resource_usage_radio_items',
options=[{'label': 'User time', 'value': 'psutil_process_time_user'},
{'label': 'System time', 'value': 'psutil_process_time_system'},
{'label': 'Memory usage', 'value': 'psutil_process_memory_percent'}],
value='psutil_process_time_user',
),
dcc.Graph(id='resource_usage_plot'),
tasks_per_app_plot(run_id),
total_tasks_plot(run_id)]
# TODO: task_resources is not created for all workflows. Throws error
@app.callback(Output('resource_usage_plot', 'figure'),
[Input('resource_usage_radio_items', 'value')],
[State('run_number_dropdown', 'value')])
def resource_usage_plot(field, run_id):
sql_conn = get_db()
df_resources = pd.read_sql_query('SELECT {field}, timestamp, task_id FROM task_resources WHERE run_id=(?)'.format(field=field), sql_conn, params=(run_id, ))
df_task = pd.read_sql_query('SELECT task_id, task_time_completed FROM task WHERE run_id=(?)', sql_conn, params=(run_id, ))
close_db()
def y_axis_setup():
dic = dict()
count = 0
items = []
for i in range(len(df_resources)):
task_id = df_resources.iloc[i]['task_id']
value = float(df_resources.iloc[i][field])
if task_id in dic:
count -= dic[task_id][0]
dic[task_id] = (value, df_task[df_task['task_id'] == task_id]['task_time_completed'].iloc[0])
count += value
remove = []
for k, v in dic.items():
if v[1] < df_resources.iloc[i]['timestamp']:
count -= v[0]
remove.append(k)
for k in remove: del dic[k]
items.append(count)
return items
return go.Figure(
data=[go.Scatter(x=df_resources['timestamp'],
y=y_axis_setup(),
name='tasks')],
layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S', range=[min(df_resources.timestamp), max(df_resources.timestamp)]),
title="Resource usage")
)
def tasks_per_app_plot(run_id):
sql_conn = get_db()
df_status = pd.read_sql_query('SELECT run_id, task_id, task_status_name, timestamp FROM task_status WHERE run_id=(?)', sql_conn, params=(run_id, ))
df_task = pd.read_sql_query('SELECT task_id, task_fn_hash FROM task WHERE run_id=(?)', sql_conn, params=(run_id,))
close_db()
def y_axis_setup(array):
count = 0
items = []
for n in array:
if n:
count += 1
elif count > 0:
count -= 1
items.append(count)
return items
# Fill up dict "apps" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}
apps = dict()
for i in range(len(df_task)):
row = df_task.iloc[i]
if row['task_fn_hash'] in apps:
apps[row['task_fn_hash']].append(row['task_id'])
else:
apps[row['task_fn_hash']] = [row['task_id']]
return dcc.Graph(id='total_tasks',
figure=go.Figure(
data=[go.Scatter(x=df_status[df_status['task_id'].isin(tasks)]['timestamp'],
y=y_axis_setup(df_status[df_status['task_id'].isin(tasks)]['task_status_name'] == 'running'),
name=app)
for app, tasks in apps.items()] +
[go.Scatter(x=df_status['timestamp'],
y=y_axis_setup(df_status['task_status_name'] == 'running'),
name='all')],
layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S', range=[min(df_status['timestamp']), max(df_status['timestamp'])]),
title="Tasks per app")
))
def total_tasks_plot(run_id):
sql_conn = get_db()
df_status = pd.read_sql_query('SELECT run_id, task_id, task_status_name, timestamp FROM task_status WHERE run_id=(?)', sql_conn, params=(run_id, ))
df_task = pd.read_sql_query('SELECT task_id, task_fn_hash FROM task WHERE run_id=(?)', sql_conn, params=(run_id,))
close_db()
columns = 20
# 2018-10-09 13:47:03
def timestamp_to_int(time):
return int(datetime.strptime(time, '%Y-%m-%d %H:%M:%S').timestamp())
def int_to_timestamp(n):
return datetime.fromtimestamp(n)
min_time = timestamp_to_int(min(df_status['timestamp']))
max_time = timestamp_to_int(max(df_status['timestamp']))
time_step = int((max_time - min_time) / columns)
x_axis = []
for i in range(min_time, max_time, time_step):
x_axis.append(int_to_timestamp(i).strftime('%Y-%m-%d %H:%M:%S'))
def y_axis_setup(value):
items = []
for i in range(len(x_axis) - 1):
x = df_status['timestamp'] >= x_axis[i]
y = df_status['timestamp'] < x_axis[i+1]
items.append(sum(df_status.loc[[a and b for a, b in zip(x, y)]]['task_status_name'] == value))
return items
return dcc.Graph(id='total_tasks_plot',
figure=go.Figure(data=[go.Bar(x=x_axis,
y=y_axis_setup('done'),
name='done'),
go.Bar(x=x_axis,
y=y_axis_setup('failed'),
name='failed')],
layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S', range=[min(df_status['timestamp']), max(df_status['timestamp'])]),
barmode='stack',
title="Total tasks")))
#
# @app.callback(
# Output('tables', 'children'),
# [Input('graph', 'clickData')],
# [State('task_id', 'children')])
# def load_task_table(clicked, task_id):
# if not clicked:
# return
# sql_conn = get_db()
#
# df_resources = pd.read_sql_query("SELECT * FROM task_resources WHERE run_id=(?)", sql_conn, params=(task_id, ))
# df_task = pd.read_sql_query("SELECT * FROM task WHERE run_id=(?)", sql_conn, params=(task_id, ))
#
# return [html.Table(
# [html.Tr([html.Th(col) for col in df_resources.columns])] + \
# [html.Tr([html.Td(html.A(df_resources.loc[df_resources['task_id'] == str(point['curveNumber'])].iloc[i][col])) for col in df_resources.loc[df_resources['task_id'] == str(point['curveNumber'])].columns]) for i in range(len(df_resources.loc[df_resources['task_id'] == str(point['curveNumber'])]))]) for point in clicked['points']]
# |
"""Utility to write simple reports in HTML format."""
import re
import subprocess
import sys
from jinja2 import Environment, PackageLoader
from sympy.printing.latex import LatexPrinter
class Report:
"""Simple report for output drudge results.
This class helps to write symbolic results to files for batch processing
jobs. It is not recommended to be used directly. Users should use the
method provided by drudge class instead in ``with`` statements.
"""
def __init__(self, filename: str, title):
"""Initialize the report object."""
self._filename = filename
filename_parts = filename.split('.')
if len(filename_parts) < 2:
raise ValueError(
'Invalid filename, unable to determine type', filename
)
ext = filename_parts[-1].lower()
if ext not in {'html', 'tex', 'pdf'}:
raise ValueError('Invalid extension', ext, 'in', filename)
self._ext = ext
self._basename = '.'.join(filename_parts[:-1])
self._sects = []
self._ctx = {
'title': title,
'sects': self._sects
}
def add(
self, title=None, content=None, description=None,
env='[', **kwargs
):
r"""Add a section to the result.
Parameters
----------
title
The title of the equation. It will be used as a section header.
When it is given as a None, the section header will not be added.
content
The actual tensor or tensor definition to be printed. It can be
given as a None to skip any equation rendering.
description
A verbal description of the content. It will be typeset before the
actual equation as normal text. A None value will cause it to be
suppressed.
env
The environment to put the equation in. A value of ``'['`` will use
``\[`` and ``\]`` as the deliminator of the math environment. Other
values will be put inside the common ``\begin{}`` and ``\end{}``
tags of LaTeX.
kwargs
All the rest of the keyword arguments are forwarded to the
:py:meth:`Drudge.format_latex` method.
Note
----
**For long equations in LaTeX environments,** normally ``env='align'``
and ``sep_lines=True`` can be set to allow each term to occupy separate
lines, automatic page break will be inserted, or ``env='dmath'`` and
``sep_lines=False`` can be used to use ``breqn`` package to
automatically flow the terms.
"""
if env == '[':
opening, closing = r'\[', r'\]'
else:
opening, closing = [
r'\{}{{{}}}'.format(i, env)
for i in ['begin', 'end']
]
self._sects.append({
'title': title,
'description': description,
'expr': content.latex(**kwargs) if content is not None else None,
'opening': opening,
'closing': closing
})
def write(self):
"""Write the report.
Note that this method also closes the output file.
"""
env = Environment(
loader=PackageLoader('drudge'),
lstrip_blocks=True, trim_blocks=True
)
if self._ext == 'html':
templ_name = 'report.html'
filename = self._filename
elif self._ext == 'tex' or self._ext == 'pdf':
templ_name = 'report.tex'
filename = self._basename + '.tex'
else:
assert False
templ = env.get_template(templ_name)
with open(filename, 'w') as fp:
templ.stream(self._ctx).dump(fp)
if self._ext == 'pdf':
stat = subprocess.run(
['pdflatex', filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Do not crash program only because LaTeX does not compile.
if stat.returncode != 0:
err_msg = 'pdflatex failed for {}. Error: \n{}\n{}'.format(
filename, stat.stdout, stat.stderr
)
print(err_msg, file=sys.stderr)
class ScalarLatexPrinter(LatexPrinter):
"""Specialized LaTeX printers for usage in drudge.
Basically this class tries to fix some problems with using the original
LaTeX printer from SymPy in common drudge tasks.
Specifically, for indexed objects, if the base already contains a subscript,
it will be raised into a superscript wrapped inside a pair of parenthesis.
"""
def _print_Indexed(self, expr):
base = self._print(expr.base)
match = re.match(r'(.*)_\{(.*)\}', base)
if match and len(match.group(2)) > 0:
base = ''.join([
match.group(1), '^{(', match.group(2), ')}'
])
if base.startswith('_'):
base = base[1:]
indices = ','.join(self._print(i) for i in expr.indices)
return ''.join([
base, '_{', indices, '}'
])
Give warning when pdflatex cannot be found for report
There is no need to crash the program since the TeX source code is still
perfectly usable.
"""Utility to write simple reports in HTML format."""
import re
import shutil
import subprocess
import warnings
from jinja2 import Environment, PackageLoader
from sympy.printing.latex import LatexPrinter
class Report:
"""Simple report for output drudge results.
This class helps to write symbolic results to files for batch processing
jobs. It is not recommended to be used directly. Users should use the
method provided by drudge class instead in ``with`` statements.
"""
def __init__(self, filename: str, title):
"""Initialize the report object."""
self._filename = filename
filename_parts = filename.split('.')
if len(filename_parts) < 2:
raise ValueError(
'Invalid filename, unable to determine type', filename
)
ext = filename_parts[-1].lower()
if ext not in {'html', 'tex', 'pdf'}:
raise ValueError('Invalid extension', ext, 'in', filename)
self._ext = ext
self._basename = '.'.join(filename_parts[:-1])
self._sects = []
self._ctx = {
'title': title,
'sects': self._sects
}
def add(
self, title=None, content=None, description=None,
env='[', **kwargs
):
r"""Add a section to the result.
Parameters
----------
title
The title of the equation. It will be used as a section header.
When it is given as a None, the section header will not be added.
content
The actual tensor or tensor definition to be printed. It can be
given as a None to skip any equation rendering.
description
A verbal description of the content. It will be typeset before the
actual equation as normal text. A None value will cause it to be
suppressed.
env
The environment to put the equation in. A value of ``'['`` will use
``\[`` and ``\]`` as the deliminator of the math environment. Other
values will be put inside the common ``\begin{}`` and ``\end{}``
tags of LaTeX.
kwargs
All the rest of the keyword arguments are forwarded to the
:py:meth:`Drudge.format_latex` method.
Note
----
**For long equations in LaTeX environments,** normally ``env='align'``
and ``sep_lines=True`` can be set to allow each term to occupy separate
lines, automatic page break will be inserted, or ``env='dmath'`` and
``sep_lines=False`` can be used to use ``breqn`` package to
automatically flow the terms.
"""
if env == '[':
opening, closing = r'\[', r'\]'
else:
opening, closing = [
r'\{}{{{}}}'.format(i, env)
for i in ['begin', 'end']
]
self._sects.append({
'title': title,
'description': description,
'expr': content.latex(**kwargs) if content is not None else None,
'opening': opening,
'closing': closing
})
def write(self):
"""Write the report.
Note that this method also closes the output file.
"""
env = Environment(
loader=PackageLoader('drudge'),
lstrip_blocks=True, trim_blocks=True
)
if self._ext == 'html':
templ_name = 'report.html'
filename = self._filename
elif self._ext == 'tex' or self._ext == 'pdf':
templ_name = 'report.tex'
filename = self._basename + '.tex'
else:
assert False
templ = env.get_template(templ_name)
with open(filename, 'w') as fp:
templ.stream(self._ctx).dump(fp)
if self._ext == 'pdf':
if shutil.which(_PDFLATEX) is not None:
stat = subprocess.run(
[_PDFLATEX, filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Do not crash program only because LaTeX does not compile.
if stat.returncode != 0:
err_msg = '{} failed for {}. Error: \n{}\n{}'.format(
_PDFLATEX, filename, stat.stdout, stat.stderr
)
warnings.warn(err_msg)
else:
warnings.warn('{} cannot be found.'.format(_PDFLATEX))
_PDFLATEX = 'pdflatex'
class ScalarLatexPrinter(LatexPrinter):
"""Specialized LaTeX printers for usage in drudge.
Basically this class tries to fix some problems with using the original
LaTeX printer from SymPy in common drudge tasks.
Specifically, for indexed objects, if the base already contains a subscript,
it will be raised into a superscript wrapped inside a pair of parenthesis.
"""
def _print_Indexed(self, expr):
base = self._print(expr.base)
match = re.match(r'(.*)_\{(.*)\}', base)
if match and len(match.group(2)) > 0:
base = ''.join([
match.group(1), '^{(', match.group(2), ')}'
])
if base.startswith('_'):
base = base[1:]
indices = ','.join(self._print(i) for i in expr.indices)
return ''.join([
base, '_{', indices, '}'
])
|
#!/usr/bin/env python
import csv
import sqlite3
class GffReader:
## Loads the gff file into a sqlite database
def load(self, filename):
database = sqlite3.connect(':memory:')
c = database.cursor()
# Create the sqlite database: id | seq_id | source | type | start | stop | score | strand | phase | name | parent
c.execute('CREATE TABLE gff(id TEXT PRIMARY KEY, seq_id TEXT, source TEXT, type TEXT, start INTEGER, stop INTEGER, score TEXT, strand TEXT, phase TEXT, name TEXT, parent TEXT)')
with open(filename, 'rb') as gff:
reader = csv.reader(gff, delimiter='\t', quotechar='|')
entry_id = 0
for line in reader:
entry_id = entry_id+1
entry_name = 'hello'
entry_parent = '1'
c.execute('INSERT INTO gff VALUES("'+str(entry_id)+'","'+line[0]+'","'+line[1]+'","'+line[2]+'",'+line[3]+','+line[4]+',"'+line[5]+'","'+line[6]+'","'+line[7]+'","'+entry_name+'","'+entry_parent+'")')
return database
## mostly useless function, just here to demonstrate gff-reading and unit test setup...
def summary_stats(self, filename):
line_count = 0
gene_count = 0
mRNA_count = 0
exon_count = 0
CDS_count = 0
start_codon_count = 0
stop_codon_count = 0
with open(filename, 'rb') as gff:
reader = csv.reader(gff, delimiter=' ', quotechar='|')
for line in reader:
line_count += 1
feature_type = line[2]
if feature_type == "gene":
gene_count += 1
elif feature_type == "mRNA":
mRNA_count += 1
elif feature_type == "exon":
exon_count += 1
elif feature_type == "CDS":
CDS_count += 1
elif feature_type == "start_codon":
start_codon_count += 1
elif feature_type == "stop_codon":
stop_codon_count += 1
else:
sys.stderr.write("Warning: unknown feature type")
return (filename + ": " + str(line_count) + " lines, " + str(gene_count) + " genes, " + str(mRNA_count) + " mRNA, " +
str(exon_count) + " exons, " + str(CDS_count) + " CDS, " + str(start_codon_count) + " start codons, " +
str(stop_codon_count) + " stop codons")
Fixed gff reader fo' sho'
#!/usr/bin/env python
import csv
import sqlite3
class GffReader:
## Loads the gff file into a sqlite database
def load(self, filename):
database = sqlite3.connect(':memory:')
c = database.cursor()
# Create the sqlite database: id | seq_id | source | type | start | stop | score | strand | phase | name | parent
c.execute('CREATE TABLE gff(id TEXT PRIMARY KEY, seq_id TEXT, source TEXT, type TEXT, start INTEGER, stop INTEGER, score TEXT, strand TEXT, phase TEXT, name TEXT, parent TEXT)')
with open(filename, 'rb') as gff:
reader = csv.reader(gff, delimiter='\t', quotechar='|')
for line in reader:
attributes = line[8].split(';')
entry_id = 'NULL'
entry_name = 'NULL'
entry_parent = 'NULL'
for attr in attributes:
name_val = attr.split('=')
if name_val[0] == 'ID':
entry_id = '"'+name_val[1]+'"'
elif name_val[0] == 'Name':
entry_name = '"'+name_val[1]+'"'
elif name_val[0] == 'Parent':
entry_parent = '"'+name_val[1]+'"'
c.execute('INSERT INTO gff VALUES('+entry_id+',"'+line[0]+'","'+line[1]+'","'+line[2]+'",'+line[3]+','+line[4]+',"'+line[5]+'","'+line[6]+'","'+line[7]+'",'+entry_name+','+entry_parent+')')
return database
## mostly useless function, just here to demonstrate gff-reading and unit test setup...
def summary_stats(self, filename):
line_count = 0
gene_count = 0
mRNA_count = 0
exon_count = 0
CDS_count = 0
start_codon_count = 0
stop_codon_count = 0
with open(filename, 'rb') as gff:
reader = csv.reader(gff, delimiter=' ', quotechar='|')
for line in reader:
line_count += 1
feature_type = line[2]
if feature_type == "gene":
gene_count += 1
elif feature_type == "mRNA":
mRNA_count += 1
elif feature_type == "exon":
exon_count += 1
elif feature_type == "CDS":
CDS_count += 1
elif feature_type == "start_codon":
start_codon_count += 1
elif feature_type == "stop_codon":
stop_codon_count += 1
else:
sys.stderr.write("Warning: unknown feature type")
return (filename + ": " + str(line_count) + " lines, " + str(gene_count) + " genes, " + str(mRNA_count) + " mRNA, " +
str(exon_count) + " exons, " + str(CDS_count) + " CDS, " + str(start_codon_count) + " start codons, " +
str(stop_codon_count) + " stop codons")
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 14:48:19 2017
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
from statsmodels.stats.moment_helpers import cov2corr
from statsmodels.stats.base import HolderTuple
# shortcut function
logdet = lambda x: np.linalg.slogdet(x)[1] # noqa: E731
def test_mvmean(data, mean_null=0, return_results=True):
"""Hotellings test for multivariate mean in one sample
Parameters
----------
data : array_like
data with observations in rows and variables in columns
mean_null : array_like
mean of the multivariate data under the null hypothesis
return_results : bool
If true, then a results instance is returned. If False, then only
the test statistic and pvalue are returned.
Returns
-------
results : instance of a results class with attributes
statistic, pvalue, t2 and df
(statistic, pvalue) : tuple
If return_results is false, then only the test statistic and the
pvalue are returned.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
mean = x.mean(0)
cov = np.cov(x, rowvar=False, ddof=1)
diff = mean - mean_null
t2 = nobs * diff.dot(np.linalg.solve(cov, diff))
factor = (nobs - 1) * k_vars / (nobs - k_vars)
statistic = t2 / factor
df = (k_vars, nobs - k_vars)
pvalue = stats.f.sf(statistic, df[0], df[1])
if return_results:
res = HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
t2=t2,
distr="F")
return res
else:
return statistic, pvalue
def test_mvmean_two_sample(data_x, data_y, return_results=True):
"""Hotellings test for multivariate mean in two samples
Parameters
----------
data_x : array_like
first sample data with observations in rows and variables in columns
data_y : array_like
second sample data with observations in rows and variables in columns
return_results : bool
If true, then a results instance is returned. If False, then only
the test statistic and pvalue are returned.
Returns
-------
results : instance of a results class with attributes
statistic, pvalue, t2 and df
(statistic, pvalue) : tuple
If return_results is false, then only the test statistic and the
pvalue are returned.
"""
x = np.asarray(data_x)
y = np.asarray(data_y)
nobs_x, k_vars = x.shape
nobs_y, k_vars = y.shape
mean_x = x.mean(0)
mean_y = y.mean(0)
cov_x = np.cov(x, rowvar=False, ddof=1)
cov_y = np.cov(y, rowvar=False, ddof=1)
combined_cov = ((nobs_x - 1) * cov_x + (nobs_y - 1) * cov_y) / (nobs_x - 1 + nobs_y - 1)
diff = mean_x - mean_y
t2 = (nobs_x * nobs_y) / (nobs_x + nobs_y) * diff.dot(np.linalg.solve(combined_cov, diff))
factor = ((nobs_x + nobs_y - 2) * k_vars) / (nobs_x + nobs_y - k_vars - 1)
statistic = t2 / factor
df = (k_vars, nobs_x + nobs_y - 1 - k_vars)
pvalue = stats.f.sf(statistic, df[0], df[1])
if return_results:
res = HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
t2=t2,
distr="F")
return res
else:
return statistic, pvalue
def confint_mvmean(data, lin_transf=None, alpha=0.5, simult=False):
"""Confidence interval for linear transformation of a multivariate mean
Either pointwise or simultaneous confidence intervals are returned.
Parameters
----------
data : array_like
data with observations in rows and variables in columns
lin_transf : array_like or None
The linear transformation or contrast matrix for transforming the
vector of means. If this is None, then the identity matrix is used
which specifies the means themselves.
alpha : float in (0, 1)
confidence level for the confidence interval, commonly used is
alpha=0.05.
simult: bool
If ``simult`` is False (default), then the pointwise confidence
interval is returned.
Otherwise, a simultaneous confidence interval is returned.
Warning: additional simultaneous confidence intervals might be added
and the default for those might change.
Returns
-------
low : ndarray
lower confidence bound on the linear transformed
upp : ndarray
upper confidence bound on the linear transformed
values : ndarray
mean or their linear transformation, center of the confidence region
Notes
-----
Pointwise confidence interval is based on Johnson and Wichern
equation (5-21) page 224.
Simultaneous confidence interval is based on Johnson and Wichern
Result 5.3 page 225.
This looks like Sheffe simultaneous confidence intervals.
Bonferroni corrected simultaneous confidence interval might be added in
future
References
----------
Johnson, Richard A., and Dean W. Wichern. 2007. Applied Multivariate
Statistical Analysis. 6th ed. Upper Saddle River, N.J: Pearson Prentice
Hall.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
if lin_transf is None:
lin_transf = np.eye(k_vars)
mean = x.mean(0)
cov = np.cov(x, rowvar=False, ddof=0)
ci = confint_mvmean_fromstats(mean, cov, nobs, lin_transf=lin_transf,
alpha=alpha, simult=simult)
return ci
def confint_mvmean_fromstats(mean, cov, nobs, lin_transf=None, alpha=0.05,
simult=False):
"""Confidence interval for linear transformation of a multivariate mean
Either pointwise or simultaneous conficence intervals are returned.
Data is provided in the form of summary statistics, mean, cov, nobs.
Parameters
----------
mean : ndarray
cov : ndarray
nobs : int
lin_transf : array_like or None
The linear transformation or contrast matrix for transforming the
vector of means. If this is None, then the identity matrix is used
which specifies the means themselves.
alpha : float in (0, 1)
confidence level for the confidence interval, commonly used is
alpha=0.05.
simult: bool
If simult is False (default), then pointwise confidence interval is
returned.
Otherwise, a simultaneous confidence interval is returned.
Warning: additional simultaneous confidence intervals might be added
and the default for those might change.
Notes
-----
Pointwise confidence interval is based on Johnson and Wichern
equation (5-21) page 224.
Simultaneous confidence interval is based on Johnson and Wichern
Result 5.3 page 225.
This looks like Sheffe simultaneous confidence intervals.
Bonferroni corrected simultaneous confidence interval might be added in
future
References
----------
Johnson, Richard A., and Dean W. Wichern. 2007. Applied Multivariate
Statistical Analysis. 6th ed. Upper Saddle River, N.J: Pearson Prentice
Hall.
"""
mean = np.asarray(mean)
cov = np.asarray(cov)
c = np.atleast_2d(lin_transf)
k_vars = len(mean)
if simult is False:
values = c.dot(mean)
quad_form = (c * cov.dot(c.T).T).sum(1)
df = nobs - 1
t_critval = stats.t.isf(alpha / 2, df)
ci_diff = np.sqrt(quad_form / df) * t_critval
low = values - ci_diff
upp = values + ci_diff
else:
values = c.dot(mean)
quad_form = (c * cov.dot(c.T).T).sum(1)
factor = (nobs - 1) * k_vars / (nobs - k_vars) / nobs
df = (k_vars, nobs - k_vars)
f_critval = stats.f.isf(alpha, df[0], df[1])
ci_diff = np.sqrt(factor * quad_form * f_critval)
low = values - ci_diff
upp = values + ci_diff
return low, upp, values # , (f_critval, factor, quad_form, df)
"""
Created on Tue Nov 7 13:22:44 2017
Author: Josef Perktold
References
----------
Stata manual for mvtest covariances
Rencher and Christensen 2012
Bartlett 1954
Stata refers to Rencher and Christensen for the formulas. Those correspond
to the formula collection in Bartlett 1954 for several of them.
""" # pylint: disable=W0105
def cov_test(cov, nobs, cov_null):
"""One sample hypothesis test for covariance equal to null covariance
The Null hypothesis is that cov = cov_null, against the alternative that
it is not equal to cov_null
Parameters
----------
cov : array_like
Covariance matrix of the data, estimated with denominator ``(N - 1)``,
i.e. `ddof=1`.
nobs : int
number of observations used in the estimation of the covariance
cov_null : nd_array
covariance under the null hypothesis
Returns
-------
res : instance of HolderTuple
results with ``statistic, pvalue`` and other attributes like ``df``
References
----------
Bartlett, M. S. 1954. “A Note on the Multiplying Factors for Various Χ2
Approximations.” Journal of the Royal Statistical Society. Series B
(Methodological) 16 (2): 296–98.
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
# using Stata formulas where cov_sample use nobs in denominator
# Bartlett 1954 has fewer terms
S = np.asarray(cov) * (nobs - 1) / nobs
S0 = np.asarray(cov_null)
k = cov.shape[0]
n = nobs
fact = nobs - 1.
fact *= 1 - (2 * k + 1 - 2 / (k + 1)) / (6 * (n - 1) - 1)
fact2 = logdet(S0) - logdet(n / (n - 1) * S)
fact2 += np.trace(n / (n - 1) * np.linalg.solve(S0, S)) - k
statistic = fact * fact2
df = k * (k + 1) / 2
pvalue = stats.chi2.sf(statistic, df)
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
distr="chi2",
null="equal value",
cov_null=cov_null
)
def cov_test_spherical(cov, nobs):
r"""One sample hypothesis test that covariance matrix is spherical
The Null and alternative hypotheses are
$H0 : \Sigma = \sigma I \\
H1 : \Sigma \neq \sigma I$
where $\sigma$ is the common variances with unspecified value.
Parameters
----------
cov : array_like
Covariance matrix of the data, estimated with denominator ``(N - 1)``,
i.e. `ddof=1`.
nobs : int
number of observations used in the estimation of the covariance
Returns
-------
res : instance of HolderTuple
results with ``statistic, pvalue`` and other attributes like ``df``
References
----------
Bartlett, M. S. 1954. “A Note on the Multiplying Factors for Various Χ2
Approximations.” Journal of the Royal Statistical Society. Series B
(Methodological) 16 (2): 296–98.
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
# unchanged Stata formula, but denom is cov cancels, AFAICS
# Bartlett 1954 correction factor in IIIc
cov = np.asarray(cov)
k = cov.shape[0]
statistic = nobs - 1 - (2 * k**2 + k + 2) / (6 * k)
statistic *= k * np.log(np.trace(cov)) - logdet(cov) - k * np.log(k)
df = k * (k + 1) / 2 - 1
pvalue = stats.chi2.sf(statistic, df)
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
distr="chi2",
null="spherical"
)
def cov_test_diagonal(cov, nobs):
r"""One sample hypothesis test that covariance matrix is diagonal matrix.
The Null and alternative hypotheses are
$H0 : \Sigma = diag(\sigma_i) \\
H1 : \Sigma \neq diag(\sigma_i)$
where $\sigma_i$ are the variances with unspecified values.
Parameters
----------
cov : array_like
Covariance matrix of the data, estimated with denominator ``(N - 1)``,
i.e. `ddof=1`.
nobs : int
number of observations used in the estimation of the covariance
Returns
-------
res : instance of HolderTuple
results with ``statistic, pvalue`` and other attributes like ``df``
References
----------
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
cov = np.asarray(cov)
k = cov.shape[0]
R = cov2corr(cov)
statistic = -(nobs - 1 - (2 * k + 5) / 6) * logdet(R)
df = k * (k - 1) / 2
pvalue = stats.chi2.sf(statistic, df)
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
distr="chi2",
null="diagonal"
)
def _get_blocks(mat, block_len):
"""get diagonal blocks from matrix
"""
k = len(mat)
idx = np.cumsum(block_len)
if idx[-1] == k:
idx = idx[:-1]
elif idx[-1] > k:
raise ValueError("sum of block_len larger than shape of mat")
else:
# allow one missing block that is the remainder
pass
idx_blocks = np.split(np.arange(k), idx)
blocks = []
for ii in idx_blocks:
blocks.append(mat[ii[:, None], ii])
return blocks, idx_blocks
def cov_test_blockdiagonal(cov, nobs, block_len):
r"""One sample hypothesis test that covariance is block diagonal.
The Null and alternative hypotheses are
$H0 : \Sigma = diag(\Sigma_i) \\
H1 : \Sigma \neq diag(\Sigma_i)$
where $\Sigma_i$ are covariance blocks with unspecified values.
Parameters
----------
cov : array_like
Covariance matrix of the data, estimated with denominator ``(N - 1)``,
i.e. `ddof=1`.
nobs : int
number of observations used in the estimation of the covariance
block_len : list
list of length of each square block
Returns
-------
res : instance of HolderTuple
results with ``statistic, pvalue`` and other attributes like ``df``
References
----------
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
cov = np.asarray(cov)
cov_blocks = _get_blocks(cov, block_len)[0]
k = cov.shape[0]
k_blocks = [c.shape[0] for c in cov_blocks]
if k != sum(k_blocks):
msg = "sample covariances and blocks do not have matching shape"
raise ValueError(msg)
logdet_blocks = sum(logdet(c) for c in cov_blocks)
a2 = k**2 - sum(ki**2 for ki in k_blocks)
a3 = k**3 - sum(ki**3 for ki in k_blocks)
statistic = (nobs - 1 - (2 * a3 + 3 * a2) / (6. * a2))
statistic *= logdet_blocks - logdet(cov)
df = a2 / 2
pvalue = stats.chi2.sf(statistic, df)
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
distr="chi2",
null="block-diagonal"
)
def cov_test_oneway(cov_list, nobs_list):
r"""Multiple sample hypothesis test that covariance matrices are equal.
This is commonly known as Box-M test
The Null and alternative hypotheses are
$H0 : \Sigma_i = \Sigma_j for all i and j \\
H1 : \Sigma_i \neq diag(\Sigma_j) for at least one i and j$
where $\Sigma_i$ is the covariance of sample $i$.
Parameters
----------
cov_list : list of array_like
Covariance matrices of the sample, estimated with denominator
``(N - 1)``, i.e. `ddof=1`.
nobs_list : list
List of the number of observations used in the estimation of the
covariance for each sample.
Returns
-------
res : instance of HolderTuple
Results contains test statistic and pvalues for both chisquare and F
distribution based tests, identified by the name ending "_chi2" and
"_f".
Attributes ``statistic, pvalue`` refer to the F-test version.
Notes
-----
approximations to distribution of test statistic is by Box
References
----------
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
# Note stata uses nobs in cov, this uses nobs - 1
cov_list = list(map(np.asarray, cov_list))
m = len(cov_list)
nobs = sum(nobs_list) # total number of observations
k = cov_list[0].shape[0]
cov_pooled = sum((n - 1) * c for (n, c) in zip(nobs_list, cov_list))
cov_pooled /= (nobs - m)
stat0 = (nobs - m) * logdet(cov_pooled)
stat0 -= sum((n - 1) * logdet(c) for (n, c) in zip(nobs_list, cov_list))
# Box's chi2
c1 = sum(1 / (n - 1) for n in nobs_list) - 1 / (nobs - m)
c1 *= (2 * k*k + 3 * k - 1) / (6 * (k + 1) * (m - 1))
df_chi2 = (m - 1) * k * (k + 1) / 2
statistic_chi2 = (1 - c1) * stat0
pvalue_chi2 = stats.chi2.sf(statistic_chi2, df_chi2)
c2 = sum(1 / (n - 1)**2 for n in nobs_list) - 1 / (nobs - m)**2
c2 *= (k - 1) * (k + 2) / (6 * (m - 1))
a1 = df_chi2
a2 = (a1 + 2) / abs(c2 - c1**2)
b1 = (1 - c1 - a1 / a2) / a1
b2 = (1 - c1 + 2 / a2) / a2
if c2 > c1**2:
statistic_f = b1 * stat0
else:
tmp = b2 * stat0
statistic_f = a2 / a1 * tmp / (1 + tmp)
print("in branch 2")
df_f = (a1, a2)
pvalue_f = stats.f.sf(statistic_f, *df_f)
return HolderTuple(statistic=statistic_f, # name convention, using F here
pvalue=pvalue_f, # name convention, using F here
statistic_base=stat0,
statistic_chi2=statistic_chi2,
pvalue_chi2=pvalue_chi2,
df_chi2=df_chi2,
distr_chi2='chi2',
statistic_f=statistic_f,
pvalue_f=pvalue_f,
df_f=df_f,
distr_f='F')
ENH: Address Review Comments
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 14:48:19 2017
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
from statsmodels.stats.moment_helpers import cov2corr
from statsmodels.stats.base import HolderTuple
from statsmodels.tools.validation import array_like
# shortcut function
logdet = lambda x: np.linalg.slogdet(x)[1] # noqa: E731
def test_mvmean(data, mean_null=0, return_results=True):
"""Hotellings test for multivariate mean in one sample
Parameters
----------
data : array_like
data with observations in rows and variables in columns
mean_null : array_like
mean of the multivariate data under the null hypothesis
return_results : bool
If true, then a results instance is returned. If False, then only
the test statistic and pvalue are returned.
Returns
-------
results : instance of a results class with attributes
statistic, pvalue, t2 and df
(statistic, pvalue) : tuple
If return_results is false, then only the test statistic and the
pvalue are returned.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
mean = x.mean(0)
cov = np.cov(x, rowvar=False, ddof=1)
diff = mean - mean_null
t2 = nobs * diff.dot(np.linalg.solve(cov, diff))
factor = (nobs - 1) * k_vars / (nobs - k_vars)
statistic = t2 / factor
df = (k_vars, nobs - k_vars)
pvalue = stats.f.sf(statistic, df[0], df[1])
if return_results:
res = HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
t2=t2,
distr="F")
return res
else:
return statistic, pvalue
def test_mvmean_two_sample(x, y, return_results=True):
"""Hotellings test for multivariate mean in two samples
Parameters
----------
x : array_like
first sample data with observations in rows and variables in columns
y : array_like
second sample data with observations in rows and variables in columns
Returns
-------
results : instance of a results class with attributes
statistic, pvalue, t2 and df
"""
x = array_like(x, "x", ndim=2)
y = array_like(y, "x", ndim=2)
nobs_x, k_vars = x.shape
nobs_y, k_vars = y.shape
mean_x = x.mean(0)
mean_y = y.mean(0)
cov_x = np.cov(x, rowvar=False, ddof=1)
cov_y = np.cov(y, rowvar=False, ddof=1)
combined_cov = ((nobs_x - 1) * cov_x + (nobs_y - 1) * cov_y) / (nobs_x - 1 + nobs_y - 1)
diff = mean_x - mean_y
t2 = (nobs_x * nobs_y) / (nobs_x + nobs_y) * diff @ (np.linalg.solve(combined_cov, diff))
factor = ((nobs_x + nobs_y - 2) * k_vars) / (nobs_x + nobs_y - k_vars - 1)
statistic = t2 / factor
df = (k_vars, nobs_x + nobs_y - 1 - k_vars)
pvalue = stats.f.sf(statistic, df[0], df[1])
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
t2=t2,
distr="F")
def confint_mvmean(data, lin_transf=None, alpha=0.5, simult=False):
"""Confidence interval for linear transformation of a multivariate mean
Either pointwise or simultaneous confidence intervals are returned.
Parameters
----------
data : array_like
data with observations in rows and variables in columns
lin_transf : array_like or None
The linear transformation or contrast matrix for transforming the
vector of means. If this is None, then the identity matrix is used
which specifies the means themselves.
alpha : float in (0, 1)
confidence level for the confidence interval, commonly used is
alpha=0.05.
simult: bool
If ``simult`` is False (default), then the pointwise confidence
interval is returned.
Otherwise, a simultaneous confidence interval is returned.
Warning: additional simultaneous confidence intervals might be added
and the default for those might change.
Returns
-------
low : ndarray
lower confidence bound on the linear transformed
upp : ndarray
upper confidence bound on the linear transformed
values : ndarray
mean or their linear transformation, center of the confidence region
Notes
-----
Pointwise confidence interval is based on Johnson and Wichern
equation (5-21) page 224.
Simultaneous confidence interval is based on Johnson and Wichern
Result 5.3 page 225.
This looks like Sheffe simultaneous confidence intervals.
Bonferroni corrected simultaneous confidence interval might be added in
future
References
----------
Johnson, Richard A., and Dean W. Wichern. 2007. Applied Multivariate
Statistical Analysis. 6th ed. Upper Saddle River, N.J: Pearson Prentice
Hall.
"""
x = np.asarray(data)
nobs, k_vars = x.shape
if lin_transf is None:
lin_transf = np.eye(k_vars)
mean = x.mean(0)
cov = np.cov(x, rowvar=False, ddof=0)
ci = confint_mvmean_fromstats(mean, cov, nobs, lin_transf=lin_transf,
alpha=alpha, simult=simult)
return ci
def confint_mvmean_fromstats(mean, cov, nobs, lin_transf=None, alpha=0.05,
simult=False):
"""Confidence interval for linear transformation of a multivariate mean
Either pointwise or simultaneous conficence intervals are returned.
Data is provided in the form of summary statistics, mean, cov, nobs.
Parameters
----------
mean : ndarray
cov : ndarray
nobs : int
lin_transf : array_like or None
The linear transformation or contrast matrix for transforming the
vector of means. If this is None, then the identity matrix is used
which specifies the means themselves.
alpha : float in (0, 1)
confidence level for the confidence interval, commonly used is
alpha=0.05.
simult: bool
If simult is False (default), then pointwise confidence interval is
returned.
Otherwise, a simultaneous confidence interval is returned.
Warning: additional simultaneous confidence intervals might be added
and the default for those might change.
Notes
-----
Pointwise confidence interval is based on Johnson and Wichern
equation (5-21) page 224.
Simultaneous confidence interval is based on Johnson and Wichern
Result 5.3 page 225.
This looks like Sheffe simultaneous confidence intervals.
Bonferroni corrected simultaneous confidence interval might be added in
future
References
----------
Johnson, Richard A., and Dean W. Wichern. 2007. Applied Multivariate
Statistical Analysis. 6th ed. Upper Saddle River, N.J: Pearson Prentice
Hall.
"""
mean = np.asarray(mean)
cov = np.asarray(cov)
c = np.atleast_2d(lin_transf)
k_vars = len(mean)
if simult is False:
values = c.dot(mean)
quad_form = (c * cov.dot(c.T).T).sum(1)
df = nobs - 1
t_critval = stats.t.isf(alpha / 2, df)
ci_diff = np.sqrt(quad_form / df) * t_critval
low = values - ci_diff
upp = values + ci_diff
else:
values = c.dot(mean)
quad_form = (c * cov.dot(c.T).T).sum(1)
factor = (nobs - 1) * k_vars / (nobs - k_vars) / nobs
df = (k_vars, nobs - k_vars)
f_critval = stats.f.isf(alpha, df[0], df[1])
ci_diff = np.sqrt(factor * quad_form * f_critval)
low = values - ci_diff
upp = values + ci_diff
return low, upp, values # , (f_critval, factor, quad_form, df)
"""
Created on Tue Nov 7 13:22:44 2017
Author: Josef Perktold
References
----------
Stata manual for mvtest covariances
Rencher and Christensen 2012
Bartlett 1954
Stata refers to Rencher and Christensen for the formulas. Those correspond
to the formula collection in Bartlett 1954 for several of them.
""" # pylint: disable=W0105
def cov_test(cov, nobs, cov_null):
"""One sample hypothesis test for covariance equal to null covariance
The Null hypothesis is that cov = cov_null, against the alternative that
it is not equal to cov_null
Parameters
----------
cov : array_like
Covariance matrix of the data, estimated with denominator ``(N - 1)``,
i.e. `ddof=1`.
nobs : int
number of observations used in the estimation of the covariance
cov_null : nd_array
covariance under the null hypothesis
Returns
-------
res : instance of HolderTuple
results with ``statistic, pvalue`` and other attributes like ``df``
References
----------
Bartlett, M. S. 1954. “A Note on the Multiplying Factors for Various Χ2
Approximations.” Journal of the Royal Statistical Society. Series B
(Methodological) 16 (2): 296–98.
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
# using Stata formulas where cov_sample use nobs in denominator
# Bartlett 1954 has fewer terms
S = np.asarray(cov) * (nobs - 1) / nobs
S0 = np.asarray(cov_null)
k = cov.shape[0]
n = nobs
fact = nobs - 1.
fact *= 1 - (2 * k + 1 - 2 / (k + 1)) / (6 * (n - 1) - 1)
fact2 = logdet(S0) - logdet(n / (n - 1) * S)
fact2 += np.trace(n / (n - 1) * np.linalg.solve(S0, S)) - k
statistic = fact * fact2
df = k * (k + 1) / 2
pvalue = stats.chi2.sf(statistic, df)
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
distr="chi2",
null="equal value",
cov_null=cov_null
)
def cov_test_spherical(cov, nobs):
r"""One sample hypothesis test that covariance matrix is spherical
The Null and alternative hypotheses are
$H0 : \Sigma = \sigma I \\
H1 : \Sigma \neq \sigma I$
where $\sigma$ is the common variances with unspecified value.
Parameters
----------
cov : array_like
Covariance matrix of the data, estimated with denominator ``(N - 1)``,
i.e. `ddof=1`.
nobs : int
number of observations used in the estimation of the covariance
Returns
-------
res : instance of HolderTuple
results with ``statistic, pvalue`` and other attributes like ``df``
References
----------
Bartlett, M. S. 1954. “A Note on the Multiplying Factors for Various Χ2
Approximations.” Journal of the Royal Statistical Society. Series B
(Methodological) 16 (2): 296–98.
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
# unchanged Stata formula, but denom is cov cancels, AFAICS
# Bartlett 1954 correction factor in IIIc
cov = np.asarray(cov)
k = cov.shape[0]
statistic = nobs - 1 - (2 * k**2 + k + 2) / (6 * k)
statistic *= k * np.log(np.trace(cov)) - logdet(cov) - k * np.log(k)
df = k * (k + 1) / 2 - 1
pvalue = stats.chi2.sf(statistic, df)
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
distr="chi2",
null="spherical"
)
def cov_test_diagonal(cov, nobs):
r"""One sample hypothesis test that covariance matrix is diagonal matrix.
The Null and alternative hypotheses are
$H0 : \Sigma = diag(\sigma_i) \\
H1 : \Sigma \neq diag(\sigma_i)$
where $\sigma_i$ are the variances with unspecified values.
Parameters
----------
cov : array_like
Covariance matrix of the data, estimated with denominator ``(N - 1)``,
i.e. `ddof=1`.
nobs : int
number of observations used in the estimation of the covariance
Returns
-------
res : instance of HolderTuple
results with ``statistic, pvalue`` and other attributes like ``df``
References
----------
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
cov = np.asarray(cov)
k = cov.shape[0]
R = cov2corr(cov)
statistic = -(nobs - 1 - (2 * k + 5) / 6) * logdet(R)
df = k * (k - 1) / 2
pvalue = stats.chi2.sf(statistic, df)
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
distr="chi2",
null="diagonal"
)
def _get_blocks(mat, block_len):
"""get diagonal blocks from matrix
"""
k = len(mat)
idx = np.cumsum(block_len)
if idx[-1] == k:
idx = idx[:-1]
elif idx[-1] > k:
raise ValueError("sum of block_len larger than shape of mat")
else:
# allow one missing block that is the remainder
pass
idx_blocks = np.split(np.arange(k), idx)
blocks = []
for ii in idx_blocks:
blocks.append(mat[ii[:, None], ii])
return blocks, idx_blocks
def cov_test_blockdiagonal(cov, nobs, block_len):
r"""One sample hypothesis test that covariance is block diagonal.
The Null and alternative hypotheses are
$H0 : \Sigma = diag(\Sigma_i) \\
H1 : \Sigma \neq diag(\Sigma_i)$
where $\Sigma_i$ are covariance blocks with unspecified values.
Parameters
----------
cov : array_like
Covariance matrix of the data, estimated with denominator ``(N - 1)``,
i.e. `ddof=1`.
nobs : int
number of observations used in the estimation of the covariance
block_len : list
list of length of each square block
Returns
-------
res : instance of HolderTuple
results with ``statistic, pvalue`` and other attributes like ``df``
References
----------
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
cov = np.asarray(cov)
cov_blocks = _get_blocks(cov, block_len)[0]
k = cov.shape[0]
k_blocks = [c.shape[0] for c in cov_blocks]
if k != sum(k_blocks):
msg = "sample covariances and blocks do not have matching shape"
raise ValueError(msg)
logdet_blocks = sum(logdet(c) for c in cov_blocks)
a2 = k**2 - sum(ki**2 for ki in k_blocks)
a3 = k**3 - sum(ki**3 for ki in k_blocks)
statistic = (nobs - 1 - (2 * a3 + 3 * a2) / (6. * a2))
statistic *= logdet_blocks - logdet(cov)
df = a2 / 2
pvalue = stats.chi2.sf(statistic, df)
return HolderTuple(statistic=statistic,
pvalue=pvalue,
df=df,
distr="chi2",
null="block-diagonal"
)
def cov_test_oneway(cov_list, nobs_list):
r"""Multiple sample hypothesis test that covariance matrices are equal.
This is commonly known as Box-M test
The Null and alternative hypotheses are
$H0 : \Sigma_i = \Sigma_j for all i and j \\
H1 : \Sigma_i \neq diag(\Sigma_j) for at least one i and j$
where $\Sigma_i$ is the covariance of sample $i$.
Parameters
----------
cov_list : list of array_like
Covariance matrices of the sample, estimated with denominator
``(N - 1)``, i.e. `ddof=1`.
nobs_list : list
List of the number of observations used in the estimation of the
covariance for each sample.
Returns
-------
res : instance of HolderTuple
Results contains test statistic and pvalues for both chisquare and F
distribution based tests, identified by the name ending "_chi2" and
"_f".
Attributes ``statistic, pvalue`` refer to the F-test version.
Notes
-----
approximations to distribution of test statistic is by Box
References
----------
Rencher, Alvin C., and William F. Christensen. 2012. Methods of
Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and
Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.
https://doi.org/10.1002/9781118391686.
StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.
Stata Press Publication.
"""
# Note stata uses nobs in cov, this uses nobs - 1
cov_list = list(map(np.asarray, cov_list))
m = len(cov_list)
nobs = sum(nobs_list) # total number of observations
k = cov_list[0].shape[0]
cov_pooled = sum((n - 1) * c for (n, c) in zip(nobs_list, cov_list))
cov_pooled /= (nobs - m)
stat0 = (nobs - m) * logdet(cov_pooled)
stat0 -= sum((n - 1) * logdet(c) for (n, c) in zip(nobs_list, cov_list))
# Box's chi2
c1 = sum(1 / (n - 1) for n in nobs_list) - 1 / (nobs - m)
c1 *= (2 * k*k + 3 * k - 1) / (6 * (k + 1) * (m - 1))
df_chi2 = (m - 1) * k * (k + 1) / 2
statistic_chi2 = (1 - c1) * stat0
pvalue_chi2 = stats.chi2.sf(statistic_chi2, df_chi2)
c2 = sum(1 / (n - 1)**2 for n in nobs_list) - 1 / (nobs - m)**2
c2 *= (k - 1) * (k + 2) / (6 * (m - 1))
a1 = df_chi2
a2 = (a1 + 2) / abs(c2 - c1**2)
b1 = (1 - c1 - a1 / a2) / a1
b2 = (1 - c1 + 2 / a2) / a2
if c2 > c1**2:
statistic_f = b1 * stat0
else:
tmp = b2 * stat0
statistic_f = a2 / a1 * tmp / (1 + tmp)
print("in branch 2")
df_f = (a1, a2)
pvalue_f = stats.f.sf(statistic_f, *df_f)
return HolderTuple(statistic=statistic_f, # name convention, using F here
pvalue=pvalue_f, # name convention, using F here
statistic_base=stat0,
statistic_chi2=statistic_chi2,
pvalue_chi2=pvalue_chi2,
df_chi2=df_chi2,
distr_chi2='chi2',
statistic_f=statistic_f,
pvalue_f=pvalue_f,
df_f=df_f,
distr_f='F')
|
# -*- encoding: utf-8 -*-
"""Highlighted unittest command line interface.
"""
import sys
import argparse
import os
import unittest
import operator
import time
from textwrap import dedent
import subprocess
from hunittest.line_printer import LinePrinter
from hunittest.unittestresultlib import HTestResult
from hunittest.filter_rules import RuleOperator
from hunittest.filter_rules import FilterAction
from hunittest.filter_rules import PatternType
from hunittest.filter_rules import FilterRules
from hunittest.collectlib import collect_all
from hunittest.completionlib import test_spec_completer
from hunittest.collectlib import setup_top_level_directory
try:
import argcomplete
except ImportError:
sys.stderr.write("info: you can get shell completion by installing "
"'argcomplete'\n")
ARGCOMPLETE_ENABLED = False
else:
ARGCOMPLETE_ENABLED = True
def reported_collect(printer, test_specs, pattern, filter_rules):
collection = collect_all(test_specs, pattern)
test_names = []
for n, test_name in enumerate(filter_rules(collection)):
printer.overwrite("collecting {:d}: {}"
.format(n+1, test_name))
test_names.append(test_name)
if len(test_names) == 0:
printer.overwrite("no test collected")
else:
printer.overwrite("collected {:d} test(s)".format(len(test_names)))
return test_names
def complete_arg(arg, completer):
if ARGCOMPLETE_ENABLED:
arg.completer = completer
return arg
EPILOG = \
"""
Copyright (c) 2015, Nicolas Desprès
All rights reserved.
"""
def git_describe(cwd="."):
"""Return the description of this repository.
This function use git-describe(1) because the features is not available
in pygit2 version 0.22.0.
"""
# TODO(Nicolas Despres): Use pygit2 ASAP.
cmd = ["git", "describe", "--always", "--dirty", "--match", "v*"]
description = subprocess.check_output(cmd, cwd=cwd)
return description.decode().strip()
def get_version():
return git_describe(cwd=os.path.dirname(os.path.realpath(__file__)))
def build_cli():
def top_level_directory_param(param_str):
top_level_directory = param_str
for preproc in (os.path.expanduser,
os.path.expandvars,
os.path.abspath):
top_level_directory = preproc(top_level_directory)
if not os.path.isdir(top_level_directory):
raise argparse.ArgumentTypeError("must be a directory: '{}'"
.format(param_str))
return top_level_directory
parser = argparse.ArgumentParser(
description=__doc__,
epilog=dedent(EPILOG),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Disable smart terminal output.")
parser.add_argument(
"-p", "--pattern",
action="store",
default=r"^test_",
help="Only module name matching this pattern gets collected")
parser.add_argument(
"-e", "--exclude",
metavar="GLOB_PATTERN",
action=FilterAction,
filter_rule_operator=RuleOperator.exclude,
pattern_type=PatternType.glob,
help="Add an exclude glob pattern filter rule.")
parser.add_argument(
"-i", "--include",
metavar="GLOB_PATTERN",
action=FilterAction,
filter_rule_operator=RuleOperator.include,
pattern_type=PatternType.glob,
help="Add an include glob pattern filter rule.")
parser.add_argument(
"--re",
metavar="REGEX_PATTERN",
action=FilterAction,
filter_rule_operator=RuleOperator.exclude,
pattern_type=PatternType.regex,
help="Add an exclude regex pattern filter rule.")
parser.add_argument(
"--ri",
metavar="REGEX_PATTERN",
action=FilterAction,
filter_rule_operator=RuleOperator.include,
pattern_type=PatternType.regex,
help="Add an include regex pattern filter rule.")
parser.add_argument(
"-c", "--collect-only",
action="store_true",
help="Only collect test (do not run anything).")
parser.add_argument(
"-f", "--failfast",
action="store_true",
help="Stop on first failure")
parser.add_argument(
"-q", "--quiet",
action="store_true",
help="Print nothing. Exit status is the outcome.")
parser.add_argument(
"-t", "--top-level-directory",
type=top_level_directory_param,
action="store",
default=os.getcwd(),
help="Top level directory of project")
parser.add_argument(
"--version",
action="store_true",
help="Print version information and exit")
arg = parser.add_argument(
"test_specs",
action="store",
nargs=argparse.REMAINDER,
default=None,
help="Test directory/module/TestCase/test_method.")
complete_arg(arg, test_spec_completer)
return parser
def main(argv):
cli = build_cli()
if ARGCOMPLETE_ENABLED:
argcomplete.autocomplete(cli)
options = cli.parse_args(argv[1:])
if options.version:
print(get_version())
return 0
setup_top_level_directory(options.top_level_directory)
filter_rules = options.filter_rules
if filter_rules is None:
filter_rules = FilterRules()
test_specs = options.test_specs
if not test_specs:
test_specs = list(get_current_packages())
isatty = False if options.verbose else None
printer = LinePrinter(isatty=isatty, quiet=options.quiet)
try:
test_names = reported_collect(printer, test_specs, options.pattern,
filter_rules)
if options.collect_only:
printer.new_line()
return 0
test_suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = HTestResult(printer, len(test_names), failfast=options.failfast)
test_suite.run(result)
result.print_summary()
printer.new_line()
return 0 if result.wasSuccessful() else 1
except Exception as e:
printer.write_exception()
return 2
if __name__ == "__main__":
sys.exit(main(sys.argv))
Document exit code.
# -*- encoding: utf-8 -*-
"""Highlighted unittest command line interface.
"""
import sys
import argparse
import os
import unittest
import operator
import time
from textwrap import dedent
import subprocess
from hunittest.line_printer import LinePrinter
from hunittest.unittestresultlib import HTestResult
from hunittest.filter_rules import RuleOperator
from hunittest.filter_rules import FilterAction
from hunittest.filter_rules import PatternType
from hunittest.filter_rules import FilterRules
from hunittest.collectlib import collect_all
from hunittest.completionlib import test_spec_completer
from hunittest.collectlib import setup_top_level_directory
try:
import argcomplete
except ImportError:
sys.stderr.write("info: you can get shell completion by installing "
"'argcomplete'\n")
ARGCOMPLETE_ENABLED = False
else:
ARGCOMPLETE_ENABLED = True
def reported_collect(printer, test_specs, pattern, filter_rules):
collection = collect_all(test_specs, pattern)
test_names = []
for n, test_name in enumerate(filter_rules(collection)):
printer.overwrite("collecting {:d}: {}"
.format(n+1, test_name))
test_names.append(test_name)
if len(test_names) == 0:
printer.overwrite("no test collected")
else:
printer.overwrite("collected {:d} test(s)".format(len(test_names)))
return test_names
def complete_arg(arg, completer):
if ARGCOMPLETE_ENABLED:
arg.completer = completer
return arg
EPILOG = \
"""
Exit code:
0 - test suite was successful
1 - test suite was not successful
2 - an internal error happened.
Copyright (c) 2015, Nicolas Desprès
All rights reserved.
"""
def git_describe(cwd="."):
"""Return the description of this repository.
This function use git-describe(1) because the features is not available
in pygit2 version 0.22.0.
"""
# TODO(Nicolas Despres): Use pygit2 ASAP.
cmd = ["git", "describe", "--always", "--dirty", "--match", "v*"]
description = subprocess.check_output(cmd, cwd=cwd)
return description.decode().strip()
def get_version():
return git_describe(cwd=os.path.dirname(os.path.realpath(__file__)))
def build_cli():
def top_level_directory_param(param_str):
top_level_directory = param_str
for preproc in (os.path.expanduser,
os.path.expandvars,
os.path.abspath):
top_level_directory = preproc(top_level_directory)
if not os.path.isdir(top_level_directory):
raise argparse.ArgumentTypeError("must be a directory: '{}'"
.format(param_str))
return top_level_directory
parser = argparse.ArgumentParser(
description=__doc__,
epilog=dedent(EPILOG),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Disable smart terminal output.")
parser.add_argument(
"-p", "--pattern",
action="store",
default=r"^test_",
help="Only module name matching this pattern gets collected")
parser.add_argument(
"-e", "--exclude",
metavar="GLOB_PATTERN",
action=FilterAction,
filter_rule_operator=RuleOperator.exclude,
pattern_type=PatternType.glob,
help="Add an exclude glob pattern filter rule.")
parser.add_argument(
"-i", "--include",
metavar="GLOB_PATTERN",
action=FilterAction,
filter_rule_operator=RuleOperator.include,
pattern_type=PatternType.glob,
help="Add an include glob pattern filter rule.")
parser.add_argument(
"--re",
metavar="REGEX_PATTERN",
action=FilterAction,
filter_rule_operator=RuleOperator.exclude,
pattern_type=PatternType.regex,
help="Add an exclude regex pattern filter rule.")
parser.add_argument(
"--ri",
metavar="REGEX_PATTERN",
action=FilterAction,
filter_rule_operator=RuleOperator.include,
pattern_type=PatternType.regex,
help="Add an include regex pattern filter rule.")
parser.add_argument(
"-c", "--collect-only",
action="store_true",
help="Only collect test (do not run anything).")
parser.add_argument(
"-f", "--failfast",
action="store_true",
help="Stop on first failure")
parser.add_argument(
"-q", "--quiet",
action="store_true",
help="Print nothing. Exit status is the outcome.")
parser.add_argument(
"-t", "--top-level-directory",
type=top_level_directory_param,
action="store",
default=os.getcwd(),
help="Top level directory of project")
parser.add_argument(
"--version",
action="store_true",
help="Print version information and exit")
arg = parser.add_argument(
"test_specs",
action="store",
nargs=argparse.REMAINDER,
default=None,
help="Test directory/module/TestCase/test_method.")
complete_arg(arg, test_spec_completer)
return parser
def main(argv):
cli = build_cli()
if ARGCOMPLETE_ENABLED:
argcomplete.autocomplete(cli)
options = cli.parse_args(argv[1:])
if options.version:
print(get_version())
return 0
setup_top_level_directory(options.top_level_directory)
filter_rules = options.filter_rules
if filter_rules is None:
filter_rules = FilterRules()
test_specs = options.test_specs
if not test_specs:
test_specs = list(get_current_packages())
isatty = False if options.verbose else None
printer = LinePrinter(isatty=isatty, quiet=options.quiet)
try:
test_names = reported_collect(printer, test_specs, options.pattern,
filter_rules)
if options.collect_only:
printer.new_line()
return 0
test_suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = HTestResult(printer, len(test_names), failfast=options.failfast)
test_suite.run(result)
result.print_summary()
printer.new_line()
return 0 if result.wasSuccessful() else 1
except Exception as e:
printer.write_exception()
return 2
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
# Giles: chat.py
# Copyright 2012 Phil Bordelon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from giles.state import State
from giles.utils import name_is_valid
import traceback
CHANNEL = "channel"
PLAYER = "player"
TABLE = "table"
class Chat(object):
def __init__(self, server):
self.server = server
def handle(self, player):
state = player.state
substate = state.get_sub()
if substate == None:
# The player just entered chat. Welcome them, place them, subscribe
# them to the global channel.
player.tell("\nWelcome to chat. For help, type 'help' (without the quotes).\n\n")
player.move(self.server.get_space("main"),
custom_join="^!%s^. has connected to the server.\n" % player)
self.list_players_in_space(player.location, player)
self.server.channel_manager.connect(player, "global")
# Turn timestamps on for them.
player.config["timestamps"] = True
state.set_sub("prompt")
elif substate == "prompt":
player.prompt()
state.set_sub("input")
elif substate == "input":
command = player.client.get_command()
if command:
# Wipe out extraneous whitespace.
command = command.strip()
if len(command):
# We got what might be a legitimate command. Parse and
# manage it. First, see if the player is focused or not.
# If they are, we direct everything that doesn't begin
# with a '/' to their table; otherwise we send it to the
# standard parser. If they're not focused, we just punt
# them to the standard parser to begin with.
focus_table = player.config["focus_table"]
if focus_table:
if command[0] in ('/',):
# Make sure the subcommand is actually something.
possible_command = command[1:].strip()
if len(possible_command):
self.parse(command[1:], player)
else:
state.set_sub("prompt")
else:
self.table("%s %s" % (focus_table, command), player)
# We have to reprompt here.
state.set_sub("prompt")
else:
self.parse(command, player)
else:
# Just whitespace. Reprompt.
state.set_sub("prompt")
def parse(self, command, player):
did_quit = False
# First, handle the weird cases: starting characters with text
# immediately after. These are shortcuts for longer commands.
# Everything else is either a token of its own or will be
# tokenized further by another handler.
if command[0] in ('"', "'"):
# It's a say. Handle it that way.
self.say(command[1:].strip(), player)
elif command[0] in ('-', ','):
# It's an emote.
self.emote(command[1:].strip(), player)
elif command[0] in (':',):
# It's a send to a channel.
self.send(command[1:].strip(), player)
elif command[0] in (';',):
# It's a send to the last channel.
self.last_send(command[1:].strip(), player)
elif command[0] in ('>',):
# It's a tell.
self.tell(command[1:].strip(), player)
elif command[0] in ('/',):
# It's a command for a game table.
self.table(command[1:].strip(), player)
elif command[0] in ('\\',):
# It's a command for the last game table.
self.last_table(command[1:].strip(), player)
else:
# All right, now we're into actual commands. Split into components,
# lowercase the first one, and pass the rest off as necessary.
command_elements = command.split()
primary = command_elements[0].lower()
if len(command_elements) > 1:
secondary = " ".join(command_elements[1:])
else:
secondary = None
if primary in ('say',):
self.say(secondary, player)
elif primary in ('emote', 'me', 'em'):
self.emote(secondary, player)
elif primary in ('connect', 'co'):
self.connect(secondary, player)
elif primary in ('disconnect', 'dc'):
self.disconnect(secondary, player)
elif primary in ('channels', 'chan'):
self.channels(player)
elif primary in ('invite', 'inv'):
self.invite(secondary, player)
elif primary in ('send',):
self.send(secondary, player)
elif primary in ('tell', 't'):
self.tell(secondary, player)
elif primary in ('move', 'm'):
self.move(secondary, player)
elif primary in ('who', 'w'):
self.who(player)
elif primary in ('game', 'games', 'g'):
self.game(secondary, player)
elif primary in ('table', 'tab'):
self.table(secondary, player)
elif primary in ('roll', 'r'):
self.roll(secondary, player, secret=False)
elif primary in ('sroll', 'sr'):
self.roll(secondary, player, secret=True)
elif primary in ('set',):
self.config(secondary, player)
elif primary in ('alias',):
self.alias(secondary, player)
elif primary in ('become',):
self.become(secondary, player)
elif primary in ('help', 'h', '?'):
self.show_help(player)
elif primary in ('admin',):
self.admin(secondary, player)
elif primary in ('focus', 'f'):
self.focus(secondary, player)
elif primary in ('unfocus', 'unf'):
self.unfocus(player)
elif primary in ('quit', 'exit',):
self.quit(player)
did_quit = True
else:
player.tell_cc("Unknown command. Type ^!help^. for help.\n")
# Unless the player quit, we'll want to go back to the prompt.
if not did_quit:
player.state.set_sub("prompt")
def say(self, message, player):
if message:
player.location.notify_cc("^Y%s^~: %s^~\n" % (player, message))
self.server.log.log("[%s] %s: %s" % (player.location.name, player, message))
else:
player.tell("You must actually say something worthwhile.\n")
def emote(self, message, player):
if message:
player.location.notify_cc("^Y%s^~ %s^~\n" % (player, message))
self.server.log.log("[%s] %s %s" % (player.location.name, player, message))
else:
player.tell("You must actually emote something worthwhile.\n")
def connect(self, connect_str, player):
# If the string has a single element, it's a channel with no key.
if connect_str:
connect_bits = connect_str.split()
# De-alias; bail if it fails.
channel_name = self.de_alias(player, connect_bits[0], CHANNEL)
if not channel_name:
return
if len(connect_bits) == 1:
did_connect = self.server.channel_manager.connect(player, channel_name)
else:
did_connect = self.server.channel_manager.connect(player, channel_name, " ".join(connect_bits[1:]))
if did_connect:
player.config["last_channel"] = channel_name
else:
player.tell("Failed to connect to channel.\n")
else:
player.tell("You must give a channel to connect to.\n")
def disconnect(self, disconnect_str, player):
if disconnect_str:
# De-alias; bail if it fails.
channel_name = self.de_alias(player, disconnect_str, CHANNEL)
if not channel_name:
return
self.server.channel_manager.disconnect(player, channel_name)
else:
player.tell("You must give a channel to disconnect from.\n")
def channels(self, player):
channel_list = self.server.channel_manager.list_player_channel_names(player)
if channel_list:
player.tell("Channels you're connected to:\n\n")
for channel in channel_list:
player.tell_cc(" ^G%s^~\n" % channel)
else:
player.tell("You are not connected to any channels.\n")
def invite(self, payload, player):
# Need, at a minimum, two bits: the invitee and the channel.
if payload:
elements = payload.split()
if len(elements) < 2:
player.tell("You must give a player and a channel.\n")
return
target = elements[0]
intended_channel = elements[1]
invite_channel = self.server.channel_manager.has_channel(intended_channel)
invite_player = self.server.get_player(target)
if not invite_channel:
player.tell_cc("^!%s^~ doesn't even exist.\n" % (intended_channel))
self.server.log.log("%s invited to nonextant channel :%s" %
(player, intended_channel))
elif not invite_player:
player.tell_cc("^!%s^~ does not appear to be connected.\n" %
(invite_player))
self.server.log.log("Non-extant player %s invited to %s by %s" %
(target, intended_channel, player))
elif not invite_channel.is_connected(player):
player.tell("You can't invite to a channel you're not in.\n")
self.server.log.log("%s wasn't in %s but tried to invite %s there anyhow" %
(player, invite_channel, invite_player))
elif invite_channel.is_connected(invite_player):
player.tell_cc("^!%s^~ is already in that channel.\n" %
(invite_player))
self.server.log.log("%s invited %s to %s, where ey already was." %
(player, invite_player, invite_channel))
elif invite_player == player:
player.tell("Sending an invitation to yourself would be a waste of 47 cents.\n")
self.server.log.log("%s invited emself to %s." %
(player, invite_channel))
else:
# Okay, the player is on the channel, and the other player is online and not already in the channel.
msg_first = ("You invite ^!%s^~ to :^!%s^~.\n" %
(invite_player, invite_channel))
msg_second = ("You have been invited to :^!%s^~ by ^!%s^~.\n" %
(invite_channel, invite_player))
msg_second += ("To join, type: ^!connect %s " %
(invite_channel))
# Let's see whether the channel's keyed or not.
if invite_channel.key:
msg_second += invite_channel.key
msg_second += "^~\n"
msg_log = ("%s invites %s to :%s" %
(player, invite_player, invite_channel))
player.tell_cc(msg_first)
invite_player.tell_cc(msg_second)
self.server.log.log(msg_log)
else:
player.tell("You must give a player and a channel.\n")
def send(self, send_str, player):
# Need, at a minimum, two bits: the channel and the message.
if send_str:
send_str_bits = send_str.split()
if len(send_str_bits) < 2:
player.tell("You must give both a channel and a message.\n")
return
# De-alias the channel name; bail if it fails.
channel_name = self.de_alias(player, send_str_bits[0], CHANNEL)
if not channel_name:
return
success = self.server.channel_manager.send(player, " ".join(send_str_bits[1:]),
channel_name)
if not success:
player.tell("Failed to send.\n")
else:
player.config["last_channel"] = channel_name
def last_send(self, send_str, player):
channel_name = player.config["last_channel"]
if not channel_name:
player.tell("You must have a last channel to use this command.\n")
return
to_send = " ".join(send_str.split())
if to_send:
self.server.channel_manager.send(player, to_send, channel_name)
else:
player.tell("You must actually send some text.\n")
def tell(self, payload, player):
# Need, at a minimum, two bits: the target and the message.
if payload:
elements = payload.split()
if len(elements) < 2:
player.tell("You must give both a target and a message.\n")
return
target = elements[0]
if target[-1] == ',':
# Strip comma from target; allows "Tell bob, y helo there"
target = target[:-1]
# De-alias the target. Return if dealiasing failed.
target = self.de_alias(player, target, PLAYER)
if not target:
return
other = self.server.get_player(target)
if other == player:
player.tell("Talking to yourself?\n")
elif other:
msg = " ".join(elements[1:])
other.tell_cc("^R%s^~ tells you: %s\n" % (player, msg))
player.tell_cc("You tell ^R%s^~: %s\n" % (other, msg))
self.server.log.log("%s tells %s: %s" % (player, other, msg))
else:
player.tell_cc("Player ^R%s^~ not found.\n" % target)
else:
player.tell("You must give a player and a message.\n")
def list_players_in_space(self, location, player):
player.tell_cc("Players in ^Y%s^~:\n" % location.name)
list_str = " "
state = "bold"
for other in location.players:
if state == "bold":
list_str += "^!%s^. " % other
state = "regular"
elif state == "regular":
list_str += "%s " % other
state = "bold"
player.tell_cc(list_str + "\n\n")
def list_players_not_in_space(self, location, player):
player.tell_cc("Players elsewhere:\n")
list_str = " "
state = "bold"
for other in self.server.players:
if other.location != location:
if state == "bold":
list_str += "^!%s^. " % other
state = "regular"
elif state == "regular":
list_str += "%s " % other
state = "bold"
player.tell_cc(list_str + "\n\n")
def move(self, space_name, player):
if space_name:
old_space_name = player.location.name
player.move(self.server.get_space(space_name))
self.list_players_in_space(player.location, player)
self.server.log.log("%s moved from %s to %s." % (player, old_space_name, space_name))
else:
player.tell("You must give a space to move to.\n")
def who(self, player):
player.tell("\n")
self.list_players_in_space(player.location, player)
self.list_players_not_in_space(player.location, player)
def roll(self, roll_string, player, secret=False):
if roll_string:
self.server.die_roller.roll(roll_string, player, secret)
self.server.log.log("%s rolled %s." % (player, roll_string))
else:
player.tell("Invalid roll.\n")
# List of shortcuts for "list" and "new".
_GAME_LIST_COMMANDS = ('list', 'ls', 'l')
_GAME_NEW_COMMANDS = ('new', 'n')
def game(self, game_string, player):
valid = False
made_new_table = False
if game_string:
string_bits = game_string.split()
primary = string_bits[0].lower()
if len(string_bits) == 1:
if primary in self._GAME_LIST_COMMANDS:
self.server.game_master.list_games(player)
valid = True
elif primary in ('active', 'ac', 'a'):
self.server.game_master.list_tables(player, show_private=False)
valid = True
elif len(string_bits) == 2:
# Possibly a request to list games with a tag.
if primary in self._GAME_LIST_COMMANDS:
tag = string_bits[1].lower()
self.server.game_master.list_games(player, tag)
valid = True
elif len(string_bits) == 3:
# First is new, second is game, third is table.
if primary in self._GAME_NEW_COMMANDS:
# De-alias the table; bail if it fails.
table_name = self.de_alias(player, string_bits[2], TABLE)
if not table_name:
return
valid = self.server.game_master.new_table(player,
string_bits[1], table_name)
if valid:
made_new_table = True
elif len(string_bits) == 4 or len(string_bits) == 5:
# New, [private], scope, game, table.
# Assume we didn't get a private command...
valid_so_far = True
private = False
offset = 0
if len(string_bits) == 5:
# Ah, we did. Set the private flag and move the scope over.
if string_bits[1].lower() in ('private', 'pr', 'p'):
private = True
offset = 1
valid_so_far = True
else:
valid_so_far = False
if valid_so_far:
scope = string_bits[1 + offset].lower()
if scope in ('personal', 'p'):
scope = "personal"
elif scope in ('global', 'g'):
if private:
# A private global game? Makes no sense.
valid_so_far = False
scope = "global"
elif scope in ('local', 'l'):
scope = "local"
else:
valid_so_far = False
if valid_so_far and primary in self._GAME_NEW_COMMANDS:
# De-alias the table; bail if it fails.
table_name = self.de_alias(player, string_bits[3 + offset], TABLE)
if not table_name:
return
valid = self.server.game_master.new_table(player,
string_bits[2 + offset], table_name, scope, private)
if valid:
made_new_table = True
else:
self.server.game_master.list_games(player)
valid = True
if not valid:
player.tell("Invalid game command.\n")
# If we made a new table, set the player's last table and channel.
if made_new_table:
player.config["last_table"] = table_name
player.config["last_channel"] = table_name
player.tell_cc("Your last table and channel have been set to ^R%s^~.\n" % table_name)
def table(self, table_string, player):
valid = False
if table_string:
# There must be at least two bits: the table name and a command.
string_bits = table_string.split()
if len(string_bits) > 1:
# De-alias the table name and bail if it fails.
table_name = self.de_alias(player, string_bits[0], TABLE)
if not table_name:
return
self.server.game_master.handle(player, table_name,
" ".join(string_bits[1:]))
player.config["last_table"] = table_name
valid = True
if not valid:
player.tell("Invalid table command.\n")
def last_table(self, command_string, player):
table_name = player.config["last_table"]
if not table_name:
player.tell("You must have a last table to use this command.\n")
return
if not command_string:
player.tell("Invalid table command.\n")
return
# Pass it on.
self.server.game_master.handle(player, table_name,
" ".join(command_string.split()))
def focus(self, table_name, player):
if not table_name:
player.tell("You must have a table to focus on.\n")
return
table = self.server.game_master.get_table(table_name)
if table:
player.config["focus_table"] = table.table_name
player.tell_cc("You are now focused on ^G%s^~.\n" % table.table_name)
else:
player.tell("You cannot focus on a nonexistent table.\n")
def unfocus(self, player):
if not player.config["focus_table"]:
player.tell("You are already unfocused.\n")
return
player.config["focus_table"] = None
player.tell("You are no longer focused on a table.\n")
def config(self, config_string, player):
try:
self.server.configurator.handle(config_string, player)
except Exception as e:
player.tell("Something went horribly awry with configuration.\n")
self.server.log.log("Configuration failed: %s" % e)
def de_alias(self, player, alias_str, alias_type):
# If it's not a number, we don't even bother de-aliasing. Just return the
# string.
if not alias_str.isdigit():
return alias_str
# If it's an invalid type, return None; otherwise snag the dictionary
# we'll be checking against.
if alias_type == CHANNEL:
alias_dict = player.config["channel_aliases"]
elif alias_type == PLAYER:
alias_dict = player.config["player_aliases"]
elif alias_type == TABLE:
alias_dict = player.config["table_aliases"]
else:
return None
# Now, if it /is/ a number, is it in the dictionary?
alias_num = int(alias_str)
if alias_num in alias_dict:
return alias_dict[alias_num]
player.tell_cc("^R%d^~ is not aliased!\n" % alias_num)
return None
def alias(self, alias_string, player):
if not alias_string:
player.tell("Invalid alias command.\n")
return False
alias_bits = alias_string.split()
# Bail if we didn't get three bits.
if len(alias_bits) != 3:
player.tell("Invalid alias command.\n")
return False
# Extract the values from the bits.
a_type = alias_bits[0]
a_name = alias_bits[1]
a_num = alias_bits[2]
# Bail if the name isn't valid.
if not name_is_valid(a_name):
player.tell("Cannot alias an invalid name.\n")
return False
# Bail if the number isn't a number or is > 99. Convert otherwise.
if not a_num.isdigit():
player.tell("Cannot alias to a non-number.\n")
return False
a_num = int(a_num)
if a_num > 99:
player.tell("Cannot alias to a number greater than 99.\n")
return False
# Get the type that we're aliasing. If it's invalid, we'll bail.
if a_type in ("channel", "chan", "ch", "c",):
alias_dict = player.config["channel_aliases"]
type_str = "channel"
elif a_type in ("player", "pl", "p",):
alias_dict = player.config["player_aliases"]
type_str = "player"
elif a_type in ("table", "tab", "ta", "t",):
alias_dict = player.config["table_aliases"]
type_str = "table"
else:
player.tell("Invalid type to alias to. Must be one of channel, player, or table.\n")
return False
# Is this already an alias?
addendum_str = ""
if a_num in alias_dict:
addendum_str = ", ^Rreplacing^~ ^c%s^~" % alias_dict[a_num]
# Either way, add the new alias.
alias_dict[a_num] = a_name
player.tell_cc("^C%d^~ is now a ^M%s^~ alias for ^G%s^~%s.\n" % (a_num, type_str, a_name, addendum_str))
return True
def become(self, new_name, player):
did_become = False
if new_name:
old_display_name = player.display_name
did_become = player.set_name(new_name)
if did_become:
player.location.notify_cc("^Y%s^~ has become ^Y%s^~.\n" % (old_display_name, player))
if not did_become:
player.tell("Failed to become.\n")
def show_help(self, player):
player.tell("\n\nCOMMUNICATION:\n")
player.tell_cc(" ^!'^.<message>, ^!\"^. Say <message>.\n")
player.tell_cc(" ^!-^.<emote>, ^!,^. Emote <emote>.\n")
player.tell_cc(" ^!tell^. <player> <msg>, ^!t^., ^!>^. Tell <player> <msg> privately.\n")
player.tell_cc(" ^!connect^. <channel> [<k>], ^!co^. Connect to <channel> [with key <k>].\n")
player.tell_cc(" ^!disconnect^. <channel>, ^!dc^. Disconnect from <channel>.\n")
player.tell_cc(" ^!invite^. <player> <channel> Invite <player> to <channel>.\n")
player.tell_cc(" ^!send^. <channel> <message>, ^!:^. Send <channel> <message>.\n")
player.tell_cc(" ^!;^.<message> Send the last channel used <message>.\n")
player.tell("\nWORLD INTERACTION:\n")
player.tell_cc(" ^!move^. <space>, ^!m^. Move to space <space>.\n")
player.tell_cc(" ^!who^., ^!w^. List players in your space/elsewhere.\n")
player.tell("\nGAMING:\n")
player.tell_cc(" ^!game^. list, ^!g^. ls List available games.\n")
player.tell_cc(" ^!game^. active, ^!g^. ac List active tables.\n")
player.tell_cc(" ^!game^. new <game> <tablename> New table of <game> named <tablename>.\n")
player.tell_cc(" ^!table^. <table> <cmd>, ^!/^. Send <table> <cmd>.\n")
player.tell_cc(" ^!\\^.<cmd> Send the last table played <cmd>.\n")
player.tell_cc(" ^!roll^. [X]d<Y>[+/-/*<Z>], ^!r^. Roll [X] Y-sided/F/% dice [modified].\n")
player.tell_cc(" ^!sroll^. [X]d<Y>[+/-/*<Z>], ^!sr^. Secret roll.\n")
player.tell("\nCONFIGURATION:\n")
player.tell_cc("^!set timestamp^. on|off, ^!set ts^. Enable/disable timestamps.\n")
player.tell_cc(" ^!set color^. on|off, ^!set c^. Enable/disable color.\n")
player.tell("\nMETA:\n")
player.tell_cc(" ^!become^. <newname> Set name to <newname>.\n")
player.tell_cc(" ^!alias^. <type> <name> <num> Alias table/channel <name> to <num>.\n")
player.tell_cc(" ^!help^., ^!?^. Print this help.\n")
player.tell_cc(" ^!quit^. Disconnect.\n")
self.server.log.log("%s asked for general help." % player)
def admin(self, admin_str, player):
try:
self.server.admin_manager.handle(player, admin_str)
except Exception as e:
player.tell_cc("The admin manager crashed. ^RAlert an admin^~.\n")
self.server.log.log("Admin manager crashed.\n" + traceback.format_exc())
def quit(self, player):
player.client.deactivate()
player.state = State("logout")
self.server.log.log("%s logged out." % player)
Add defocus as a synonym for unfocus.
It's probably the actual word and so the one I should use.
# Giles: chat.py
# Copyright 2012 Phil Bordelon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from giles.state import State
from giles.utils import name_is_valid
import traceback
CHANNEL = "channel"
PLAYER = "player"
TABLE = "table"
class Chat(object):
def __init__(self, server):
self.server = server
def handle(self, player):
state = player.state
substate = state.get_sub()
if substate == None:
# The player just entered chat. Welcome them, place them, subscribe
# them to the global channel.
player.tell("\nWelcome to chat. For help, type 'help' (without the quotes).\n\n")
player.move(self.server.get_space("main"),
custom_join="^!%s^. has connected to the server.\n" % player)
self.list_players_in_space(player.location, player)
self.server.channel_manager.connect(player, "global")
# Turn timestamps on for them.
player.config["timestamps"] = True
state.set_sub("prompt")
elif substate == "prompt":
player.prompt()
state.set_sub("input")
elif substate == "input":
command = player.client.get_command()
if command:
# Wipe out extraneous whitespace.
command = command.strip()
if len(command):
# We got what might be a legitimate command. Parse and
# manage it. First, see if the player is focused or not.
# If they are, we direct everything that doesn't begin
# with a '/' to their table; otherwise we send it to the
# standard parser. If they're not focused, we just punt
# them to the standard parser to begin with.
focus_table = player.config["focus_table"]
if focus_table:
if command[0] in ('/',):
# Make sure the subcommand is actually something.
possible_command = command[1:].strip()
if len(possible_command):
self.parse(command[1:], player)
else:
state.set_sub("prompt")
else:
self.table("%s %s" % (focus_table, command), player)
# We have to reprompt here.
state.set_sub("prompt")
else:
self.parse(command, player)
else:
# Just whitespace. Reprompt.
state.set_sub("prompt")
def parse(self, command, player):
did_quit = False
# First, handle the weird cases: starting characters with text
# immediately after. These are shortcuts for longer commands.
# Everything else is either a token of its own or will be
# tokenized further by another handler.
if command[0] in ('"', "'"):
# It's a say. Handle it that way.
self.say(command[1:].strip(), player)
elif command[0] in ('-', ','):
# It's an emote.
self.emote(command[1:].strip(), player)
elif command[0] in (':',):
# It's a send to a channel.
self.send(command[1:].strip(), player)
elif command[0] in (';',):
# It's a send to the last channel.
self.last_send(command[1:].strip(), player)
elif command[0] in ('>',):
# It's a tell.
self.tell(command[1:].strip(), player)
elif command[0] in ('/',):
# It's a command for a game table.
self.table(command[1:].strip(), player)
elif command[0] in ('\\',):
# It's a command for the last game table.
self.last_table(command[1:].strip(), player)
else:
# All right, now we're into actual commands. Split into components,
# lowercase the first one, and pass the rest off as necessary.
command_elements = command.split()
primary = command_elements[0].lower()
if len(command_elements) > 1:
secondary = " ".join(command_elements[1:])
else:
secondary = None
if primary in ('say',):
self.say(secondary, player)
elif primary in ('emote', 'me', 'em'):
self.emote(secondary, player)
elif primary in ('connect', 'co'):
self.connect(secondary, player)
elif primary in ('disconnect', 'dc'):
self.disconnect(secondary, player)
elif primary in ('channels', 'chan'):
self.channels(player)
elif primary in ('invite', 'inv'):
self.invite(secondary, player)
elif primary in ('send',):
self.send(secondary, player)
elif primary in ('tell', 't'):
self.tell(secondary, player)
elif primary in ('move', 'm'):
self.move(secondary, player)
elif primary in ('who', 'w'):
self.who(player)
elif primary in ('game', 'games', 'g'):
self.game(secondary, player)
elif primary in ('table', 'tab'):
self.table(secondary, player)
elif primary in ('roll', 'r'):
self.roll(secondary, player, secret=False)
elif primary in ('sroll', 'sr'):
self.roll(secondary, player, secret=True)
elif primary in ('set',):
self.config(secondary, player)
elif primary in ('alias',):
self.alias(secondary, player)
elif primary in ('become',):
self.become(secondary, player)
elif primary in ('help', 'h', '?'):
self.show_help(player)
elif primary in ('admin',):
self.admin(secondary, player)
elif primary in ('focus', 'f'):
self.focus(secondary, player)
elif primary in ('unfocus', 'defocus', 'unf'):
self.unfocus(player)
elif primary in ('quit', 'exit',):
self.quit(player)
did_quit = True
else:
player.tell_cc("Unknown command. Type ^!help^. for help.\n")
# Unless the player quit, we'll want to go back to the prompt.
if not did_quit:
player.state.set_sub("prompt")
def say(self, message, player):
if message:
player.location.notify_cc("^Y%s^~: %s^~\n" % (player, message))
self.server.log.log("[%s] %s: %s" % (player.location.name, player, message))
else:
player.tell("You must actually say something worthwhile.\n")
def emote(self, message, player):
if message:
player.location.notify_cc("^Y%s^~ %s^~\n" % (player, message))
self.server.log.log("[%s] %s %s" % (player.location.name, player, message))
else:
player.tell("You must actually emote something worthwhile.\n")
def connect(self, connect_str, player):
# If the string has a single element, it's a channel with no key.
if connect_str:
connect_bits = connect_str.split()
# De-alias; bail if it fails.
channel_name = self.de_alias(player, connect_bits[0], CHANNEL)
if not channel_name:
return
if len(connect_bits) == 1:
did_connect = self.server.channel_manager.connect(player, channel_name)
else:
did_connect = self.server.channel_manager.connect(player, channel_name, " ".join(connect_bits[1:]))
if did_connect:
player.config["last_channel"] = channel_name
else:
player.tell("Failed to connect to channel.\n")
else:
player.tell("You must give a channel to connect to.\n")
def disconnect(self, disconnect_str, player):
if disconnect_str:
# De-alias; bail if it fails.
channel_name = self.de_alias(player, disconnect_str, CHANNEL)
if not channel_name:
return
self.server.channel_manager.disconnect(player, channel_name)
else:
player.tell("You must give a channel to disconnect from.\n")
def channels(self, player):
channel_list = self.server.channel_manager.list_player_channel_names(player)
if channel_list:
player.tell("Channels you're connected to:\n\n")
for channel in channel_list:
player.tell_cc(" ^G%s^~\n" % channel)
else:
player.tell("You are not connected to any channels.\n")
def invite(self, payload, player):
# Need, at a minimum, two bits: the invitee and the channel.
if payload:
elements = payload.split()
if len(elements) < 2:
player.tell("You must give a player and a channel.\n")
return
target = elements[0]
intended_channel = elements[1]
invite_channel = self.server.channel_manager.has_channel(intended_channel)
invite_player = self.server.get_player(target)
if not invite_channel:
player.tell_cc("^!%s^~ doesn't even exist.\n" % (intended_channel))
self.server.log.log("%s invited to nonextant channel :%s" %
(player, intended_channel))
elif not invite_player:
player.tell_cc("^!%s^~ does not appear to be connected.\n" %
(invite_player))
self.server.log.log("Non-extant player %s invited to %s by %s" %
(target, intended_channel, player))
elif not invite_channel.is_connected(player):
player.tell("You can't invite to a channel you're not in.\n")
self.server.log.log("%s wasn't in %s but tried to invite %s there anyhow" %
(player, invite_channel, invite_player))
elif invite_channel.is_connected(invite_player):
player.tell_cc("^!%s^~ is already in that channel.\n" %
(invite_player))
self.server.log.log("%s invited %s to %s, where ey already was." %
(player, invite_player, invite_channel))
elif invite_player == player:
player.tell("Sending an invitation to yourself would be a waste of 47 cents.\n")
self.server.log.log("%s invited emself to %s." %
(player, invite_channel))
else:
# Okay, the player is on the channel, and the other player is online and not already in the channel.
msg_first = ("You invite ^!%s^~ to :^!%s^~.\n" %
(invite_player, invite_channel))
msg_second = ("You have been invited to :^!%s^~ by ^!%s^~.\n" %
(invite_channel, invite_player))
msg_second += ("To join, type: ^!connect %s " %
(invite_channel))
# Let's see whether the channel's keyed or not.
if invite_channel.key:
msg_second += invite_channel.key
msg_second += "^~\n"
msg_log = ("%s invites %s to :%s" %
(player, invite_player, invite_channel))
player.tell_cc(msg_first)
invite_player.tell_cc(msg_second)
self.server.log.log(msg_log)
else:
player.tell("You must give a player and a channel.\n")
def send(self, send_str, player):
# Need, at a minimum, two bits: the channel and the message.
if send_str:
send_str_bits = send_str.split()
if len(send_str_bits) < 2:
player.tell("You must give both a channel and a message.\n")
return
# De-alias the channel name; bail if it fails.
channel_name = self.de_alias(player, send_str_bits[0], CHANNEL)
if not channel_name:
return
success = self.server.channel_manager.send(player, " ".join(send_str_bits[1:]),
channel_name)
if not success:
player.tell("Failed to send.\n")
else:
player.config["last_channel"] = channel_name
def last_send(self, send_str, player):
channel_name = player.config["last_channel"]
if not channel_name:
player.tell("You must have a last channel to use this command.\n")
return
to_send = " ".join(send_str.split())
if to_send:
self.server.channel_manager.send(player, to_send, channel_name)
else:
player.tell("You must actually send some text.\n")
def tell(self, payload, player):
# Need, at a minimum, two bits: the target and the message.
if payload:
elements = payload.split()
if len(elements) < 2:
player.tell("You must give both a target and a message.\n")
return
target = elements[0]
if target[-1] == ',':
# Strip comma from target; allows "Tell bob, y helo there"
target = target[:-1]
# De-alias the target. Return if dealiasing failed.
target = self.de_alias(player, target, PLAYER)
if not target:
return
other = self.server.get_player(target)
if other == player:
player.tell("Talking to yourself?\n")
elif other:
msg = " ".join(elements[1:])
other.tell_cc("^R%s^~ tells you: %s\n" % (player, msg))
player.tell_cc("You tell ^R%s^~: %s\n" % (other, msg))
self.server.log.log("%s tells %s: %s" % (player, other, msg))
else:
player.tell_cc("Player ^R%s^~ not found.\n" % target)
else:
player.tell("You must give a player and a message.\n")
def list_players_in_space(self, location, player):
player.tell_cc("Players in ^Y%s^~:\n" % location.name)
list_str = " "
state = "bold"
for other in location.players:
if state == "bold":
list_str += "^!%s^. " % other
state = "regular"
elif state == "regular":
list_str += "%s " % other
state = "bold"
player.tell_cc(list_str + "\n\n")
def list_players_not_in_space(self, location, player):
player.tell_cc("Players elsewhere:\n")
list_str = " "
state = "bold"
for other in self.server.players:
if other.location != location:
if state == "bold":
list_str += "^!%s^. " % other
state = "regular"
elif state == "regular":
list_str += "%s " % other
state = "bold"
player.tell_cc(list_str + "\n\n")
def move(self, space_name, player):
if space_name:
old_space_name = player.location.name
player.move(self.server.get_space(space_name))
self.list_players_in_space(player.location, player)
self.server.log.log("%s moved from %s to %s." % (player, old_space_name, space_name))
else:
player.tell("You must give a space to move to.\n")
def who(self, player):
player.tell("\n")
self.list_players_in_space(player.location, player)
self.list_players_not_in_space(player.location, player)
def roll(self, roll_string, player, secret=False):
if roll_string:
self.server.die_roller.roll(roll_string, player, secret)
self.server.log.log("%s rolled %s." % (player, roll_string))
else:
player.tell("Invalid roll.\n")
# List of shortcuts for "list" and "new".
_GAME_LIST_COMMANDS = ('list', 'ls', 'l')
_GAME_NEW_COMMANDS = ('new', 'n')
def game(self, game_string, player):
valid = False
made_new_table = False
if game_string:
string_bits = game_string.split()
primary = string_bits[0].lower()
if len(string_bits) == 1:
if primary in self._GAME_LIST_COMMANDS:
self.server.game_master.list_games(player)
valid = True
elif primary in ('active', 'ac', 'a'):
self.server.game_master.list_tables(player, show_private=False)
valid = True
elif len(string_bits) == 2:
# Possibly a request to list games with a tag.
if primary in self._GAME_LIST_COMMANDS:
tag = string_bits[1].lower()
self.server.game_master.list_games(player, tag)
valid = True
elif len(string_bits) == 3:
# First is new, second is game, third is table.
if primary in self._GAME_NEW_COMMANDS:
# De-alias the table; bail if it fails.
table_name = self.de_alias(player, string_bits[2], TABLE)
if not table_name:
return
valid = self.server.game_master.new_table(player,
string_bits[1], table_name)
if valid:
made_new_table = True
elif len(string_bits) == 4 or len(string_bits) == 5:
# New, [private], scope, game, table.
# Assume we didn't get a private command...
valid_so_far = True
private = False
offset = 0
if len(string_bits) == 5:
# Ah, we did. Set the private flag and move the scope over.
if string_bits[1].lower() in ('private', 'pr', 'p'):
private = True
offset = 1
valid_so_far = True
else:
valid_so_far = False
if valid_so_far:
scope = string_bits[1 + offset].lower()
if scope in ('personal', 'p'):
scope = "personal"
elif scope in ('global', 'g'):
if private:
# A private global game? Makes no sense.
valid_so_far = False
scope = "global"
elif scope in ('local', 'l'):
scope = "local"
else:
valid_so_far = False
if valid_so_far and primary in self._GAME_NEW_COMMANDS:
# De-alias the table; bail if it fails.
table_name = self.de_alias(player, string_bits[3 + offset], TABLE)
if not table_name:
return
valid = self.server.game_master.new_table(player,
string_bits[2 + offset], table_name, scope, private)
if valid:
made_new_table = True
else:
self.server.game_master.list_games(player)
valid = True
if not valid:
player.tell("Invalid game command.\n")
# If we made a new table, set the player's last table and channel.
if made_new_table:
player.config["last_table"] = table_name
player.config["last_channel"] = table_name
player.tell_cc("Your last table and channel have been set to ^R%s^~.\n" % table_name)
def table(self, table_string, player):
valid = False
if table_string:
# There must be at least two bits: the table name and a command.
string_bits = table_string.split()
if len(string_bits) > 1:
# De-alias the table name and bail if it fails.
table_name = self.de_alias(player, string_bits[0], TABLE)
if not table_name:
return
self.server.game_master.handle(player, table_name,
" ".join(string_bits[1:]))
player.config["last_table"] = table_name
valid = True
if not valid:
player.tell("Invalid table command.\n")
def last_table(self, command_string, player):
table_name = player.config["last_table"]
if not table_name:
player.tell("You must have a last table to use this command.\n")
return
if not command_string:
player.tell("Invalid table command.\n")
return
# Pass it on.
self.server.game_master.handle(player, table_name,
" ".join(command_string.split()))
def focus(self, table_name, player):
if not table_name:
player.tell("You must have a table to focus on.\n")
return
table = self.server.game_master.get_table(table_name)
if table:
player.config["focus_table"] = table.table_name
player.tell_cc("You are now focused on ^G%s^~.\n" % table.table_name)
else:
player.tell("You cannot focus on a nonexistent table.\n")
def unfocus(self, player):
if not player.config["focus_table"]:
player.tell("You are already unfocused.\n")
return
player.config["focus_table"] = None
player.tell("You are no longer focused on a table.\n")
def config(self, config_string, player):
try:
self.server.configurator.handle(config_string, player)
except Exception as e:
player.tell("Something went horribly awry with configuration.\n")
self.server.log.log("Configuration failed: %s" % e)
def de_alias(self, player, alias_str, alias_type):
# If it's not a number, we don't even bother de-aliasing. Just return the
# string.
if not alias_str.isdigit():
return alias_str
# If it's an invalid type, return None; otherwise snag the dictionary
# we'll be checking against.
if alias_type == CHANNEL:
alias_dict = player.config["channel_aliases"]
elif alias_type == PLAYER:
alias_dict = player.config["player_aliases"]
elif alias_type == TABLE:
alias_dict = player.config["table_aliases"]
else:
return None
# Now, if it /is/ a number, is it in the dictionary?
alias_num = int(alias_str)
if alias_num in alias_dict:
return alias_dict[alias_num]
player.tell_cc("^R%d^~ is not aliased!\n" % alias_num)
return None
def alias(self, alias_string, player):
if not alias_string:
player.tell("Invalid alias command.\n")
return False
alias_bits = alias_string.split()
# Bail if we didn't get three bits.
if len(alias_bits) != 3:
player.tell("Invalid alias command.\n")
return False
# Extract the values from the bits.
a_type = alias_bits[0]
a_name = alias_bits[1]
a_num = alias_bits[2]
# Bail if the name isn't valid.
if not name_is_valid(a_name):
player.tell("Cannot alias an invalid name.\n")
return False
# Bail if the number isn't a number or is > 99. Convert otherwise.
if not a_num.isdigit():
player.tell("Cannot alias to a non-number.\n")
return False
a_num = int(a_num)
if a_num > 99:
player.tell("Cannot alias to a number greater than 99.\n")
return False
# Get the type that we're aliasing. If it's invalid, we'll bail.
if a_type in ("channel", "chan", "ch", "c",):
alias_dict = player.config["channel_aliases"]
type_str = "channel"
elif a_type in ("player", "pl", "p",):
alias_dict = player.config["player_aliases"]
type_str = "player"
elif a_type in ("table", "tab", "ta", "t",):
alias_dict = player.config["table_aliases"]
type_str = "table"
else:
player.tell("Invalid type to alias to. Must be one of channel, player, or table.\n")
return False
# Is this already an alias?
addendum_str = ""
if a_num in alias_dict:
addendum_str = ", ^Rreplacing^~ ^c%s^~" % alias_dict[a_num]
# Either way, add the new alias.
alias_dict[a_num] = a_name
player.tell_cc("^C%d^~ is now a ^M%s^~ alias for ^G%s^~%s.\n" % (a_num, type_str, a_name, addendum_str))
return True
def become(self, new_name, player):
did_become = False
if new_name:
old_display_name = player.display_name
did_become = player.set_name(new_name)
if did_become:
player.location.notify_cc("^Y%s^~ has become ^Y%s^~.\n" % (old_display_name, player))
if not did_become:
player.tell("Failed to become.\n")
def show_help(self, player):
player.tell("\n\nCOMMUNICATION:\n")
player.tell_cc(" ^!'^.<message>, ^!\"^. Say <message>.\n")
player.tell_cc(" ^!-^.<emote>, ^!,^. Emote <emote>.\n")
player.tell_cc(" ^!tell^. <player> <msg>, ^!t^., ^!>^. Tell <player> <msg> privately.\n")
player.tell_cc(" ^!connect^. <channel> [<k>], ^!co^. Connect to <channel> [with key <k>].\n")
player.tell_cc(" ^!disconnect^. <channel>, ^!dc^. Disconnect from <channel>.\n")
player.tell_cc(" ^!invite^. <player> <channel> Invite <player> to <channel>.\n")
player.tell_cc(" ^!send^. <channel> <message>, ^!:^. Send <channel> <message>.\n")
player.tell_cc(" ^!;^.<message> Send the last channel used <message>.\n")
player.tell("\nWORLD INTERACTION:\n")
player.tell_cc(" ^!move^. <space>, ^!m^. Move to space <space>.\n")
player.tell_cc(" ^!who^., ^!w^. List players in your space/elsewhere.\n")
player.tell("\nGAMING:\n")
player.tell_cc(" ^!game^. list, ^!g^. ls List available games.\n")
player.tell_cc(" ^!game^. active, ^!g^. ac List active tables.\n")
player.tell_cc(" ^!game^. new <game> <tablename> New table of <game> named <tablename>.\n")
player.tell_cc(" ^!table^. <table> <cmd>, ^!/^. Send <table> <cmd>.\n")
player.tell_cc(" ^!\\^.<cmd> Send the last table played <cmd>.\n")
player.tell_cc(" ^!roll^. [X]d<Y>[+/-/*<Z>], ^!r^. Roll [X] Y-sided/F/% dice [modified].\n")
player.tell_cc(" ^!sroll^. [X]d<Y>[+/-/*<Z>], ^!sr^. Secret roll.\n")
player.tell("\nCONFIGURATION:\n")
player.tell_cc("^!set timestamp^. on|off, ^!set ts^. Enable/disable timestamps.\n")
player.tell_cc(" ^!set color^. on|off, ^!set c^. Enable/disable color.\n")
player.tell("\nMETA:\n")
player.tell_cc(" ^!become^. <newname> Set name to <newname>.\n")
player.tell_cc(" ^!alias^. <type> <name> <num> Alias table/channel <name> to <num>.\n")
player.tell_cc(" ^!help^., ^!?^. Print this help.\n")
player.tell_cc(" ^!quit^. Disconnect.\n")
self.server.log.log("%s asked for general help." % player)
def admin(self, admin_str, player):
try:
self.server.admin_manager.handle(player, admin_str)
except Exception as e:
player.tell_cc("The admin manager crashed. ^RAlert an admin^~.\n")
self.server.log.log("Admin manager crashed.\n" + traceback.format_exc())
def quit(self, player):
player.client.deactivate()
player.state = State("logout")
self.server.log.log("%s logged out." % player)
|
from unittest import TestCase
import json
import subprocess
import sys
from jsonschema import Draft4Validator, ValidationError, cli, __version__
from jsonschema.validators import _LATEST_VERSION as LatestValidator
from jsonschema.tests._helpers import captured_output
from jsonschema.compat import JSONDecodeError, NativeIO
def fake_validator(*errors):
errors = list(reversed(errors))
class FakeValidator(object):
def __init__(self, *args, **kwargs):
pass
def iter_errors(self, instance):
if errors:
return errors.pop()
return []
def check_schema(self, schema):
pass
return FakeValidator
class TestParser(TestCase):
FakeValidator = fake_validator()
instance_file = "foo.json"
schema_file = "schema.json"
def setUp(self):
cli.open = self.fake_open
self.addCleanup(delattr, cli, "open")
def fake_open(self, path):
if path == self.instance_file:
contents = ""
elif path == self.schema_file:
contents = {}
else: # pragma: no cover
self.fail("What is {!r}".format(path))
return NativeIO(json.dumps(contents))
def test_find_validator_by_fully_qualified_object_name(self):
arguments = cli.parse_args(
[
"--validator",
"jsonschema.tests.test_cli.TestParser.FakeValidator",
"--instance", self.instance_file,
self.schema_file,
]
)
self.assertIs(arguments["validator"], self.FakeValidator)
def test_find_validator_in_jsonschema(self):
arguments = cli.parse_args(
[
"--validator", "Draft4Validator",
"--instance", self.instance_file,
self.schema_file,
]
)
self.assertIs(arguments["validator"], Draft4Validator)
def test_none_validator(self):
arguments = cli.parse_args(
[
"--instance", self.instance_file,
self.schema_file,
]
)
self.assertIs(arguments["validator"], LatestValidator)
def test_none_instance(self):
arguments = cli.parse_args(
[
self.schema_file,
]
)
self.assertEqual(arguments["instances"], [])
def test_unknown_output(self):
# Avoid the help message on stdout
with captured_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
cli.parse_args(
[
"--output", "foo",
self.schema_file,
]
)
self.assertIn("invalid choice: 'foo'", stderr.getvalue())
self.assertFalse(stdout.getvalue())
def test_useless_error_format(self):
# Avoid the help message on stdout
with captured_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
cli.parse_args(
[
"--output", "pretty",
"--error-format", "foo",
self.schema_file,
]
)
self.assertIn(
"--error-format can only be used with --output plain",
stderr.getvalue(),
)
self.assertFalse(stdout.getvalue())
class TestCLI(TestCase):
instance_file_1 = "foo1.json"
instance_file_2 = "foo2.json"
schema_file = "schema.json"
schema_error_file = "schema_error.json"
bad_json_file_1 = "bad1.json"
bad_json_file_2 = "bad2.json"
pretty_parsing_error_tag = "===[" + JSONDecodeError.__name__ + "]==="
pretty_validation_error_tag = "===[ValidationError]==="
pretty_success_tag = "===[SUCCESS]==="
def setUp(self):
cli.open = self.fake_open
self.addCleanup(delattr, cli, "open")
def fake_open(self, path):
if path == self.instance_file_1:
contents = "1"
elif path == self.instance_file_2:
contents = "25"
elif path == self.schema_file:
contents = """
{
"anyOf": [
{"minimum": 20},
{"type": "string"},
{"required": true}
]
}
"""
elif path == self.schema_error_file:
contents = """
{
"title": 1
}
"""
elif path == self.bad_json_file_1:
contents = "{bad_key: val}"
elif path == self.bad_json_file_2:
contents = "{1 []}"
else: # pragma: no cover
self.fail("What is {!r}".format(path))
return NativeIO(contents)
def test_draft3_schema_draft4_validator(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": Draft4Validator,
"schema": "schema.json",
"instances": ["foo1.json"],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertTrue(stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_successful_validation(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": ["foo2.json"],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertFalse(stderr.getvalue())
self.assertEqual(exit_code, 0)
def test_unsuccessful_validation(self):
error = ValidationError("I am an error!", instance=1)
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator([error]),
"schema": "schema.json",
"instances": ["foo1.json"],
"error_format": "{error.instance} - {error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertEqual(stderr.getvalue(), "1 - I am an error!")
self.assertEqual(exit_code, 1)
def test_unsuccessful_validation_multiple_instances(self):
first_errors = [
ValidationError("9", instance=1),
ValidationError("8", instance=1),
]
second_errors = [ValidationError("7", instance=2)]
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(first_errors, second_errors),
"schema": "schema.json",
"instances": ["foo1.json", "foo2.json"],
"error_format": "{error.instance} - {error.message}\t",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t")
self.assertEqual(exit_code, 1)
def test_license(self):
output = subprocess.check_output(
[sys.executable, "-m", "pip", "show", "jsonschema"],
stderr=subprocess.STDOUT,
)
self.assertIn(b"License: MIT", output)
def test_version(self):
version = subprocess.check_output(
[sys.executable, "-m", "jsonschema", "--version"],
stderr=subprocess.STDOUT,
)
version = version.decode("utf-8").strip()
self.assertEqual(version, __version__)
def test_piping(self):
stdout, stderr, stdin = NativeIO(), NativeIO(), NativeIO("{}")
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": [],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
stdin=stdin,
)
self.assertFalse(stdout.getvalue())
self.assertFalse(stderr.getvalue())
self.assertEqual(exit_code, 0)
def test_schema_parsing_error(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "bad1.json",
"instances": ["foo1.json"],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertIn("Failed to parse bad1.json", stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_instance_parsing_error(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": ["bad1.json", "bad2.json"],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
output_err = stderr.getvalue()
self.assertFalse(stdout.getvalue())
self.assertIn("Failed to parse bad1.json", output_err)
self.assertIn("Failed to parse bad2.json", output_err)
self.assertEqual(exit_code, 1)
def test_stdin_parsing_error(self):
stdout, stderr, stdin = NativeIO(), NativeIO(), NativeIO("{foo}")
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": [],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
stdin=stdin,
)
self.assertFalse(stdout.getvalue())
self.assertIn("Failed to parse <stdin>", stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_stdin_pretty_parsing_error(self):
stdout, stderr, stdin = NativeIO(), NativeIO(), NativeIO("{foo}")
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": [],
"output": "pretty",
},
stdout=stdout,
stderr=stderr,
stdin=stdin,
)
self.assertFalse(stdout.getvalue())
self.assertIn(self.pretty_parsing_error_tag, stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_parsing_error(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "bad1.json",
"instances": ["foo1.json"],
"error_format": "",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertIn("Failed to parse bad1.json", stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_pretty_parsing_error(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "bad1.json",
"instances": ["foo1.json"],
"error_format": "",
"output": "pretty",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertIn(self.pretty_parsing_error_tag, stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_pretty_successful_validation(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": ["foo2.json"],
"error_format": "",
"output": "pretty",
},
stdout=stdout,
stderr=stderr,
)
self.assertIn(self.pretty_success_tag, stdout.getvalue())
self.assertFalse(stderr.getvalue())
self.assertEqual(exit_code, 0)
def test_pretty_unsuccessful_validation(self):
error = ValidationError("I am an error!", instance=1)
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator([error]),
"schema": "schema.json",
"instances": ["foo1.json"],
"error_format": "",
"output": "pretty",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertIn(self.pretty_validation_error_tag, stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_schema_validation(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": LatestValidator,
"schema": "schema_error.json",
"instances": [],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr
)
self.assertFalse(stdout.getvalue())
self.assertTrue(stderr.getvalue())
self.assertEqual(exit_code, 1)
Move the asserts out of the with block.
Not in this case, but in many, having them there is dangerous
if the context manager has a bug and doesn't re-raise its
exception (in which case the asserts will never run).
from unittest import TestCase
import json
import subprocess
import sys
from jsonschema import Draft4Validator, ValidationError, cli, __version__
from jsonschema.compat import JSONDecodeError, NativeIO
from jsonschema.tests._helpers import captured_output
from jsonschema.validators import _LATEST_VERSION as LatestValidator
def fake_validator(*errors):
errors = list(reversed(errors))
class FakeValidator(object):
def __init__(self, *args, **kwargs):
pass
def iter_errors(self, instance):
if errors:
return errors.pop()
return []
def check_schema(self, schema):
pass
return FakeValidator
class TestParser(TestCase):
FakeValidator = fake_validator()
instance_file = "foo.json"
schema_file = "schema.json"
def setUp(self):
cli.open = self.fake_open
self.addCleanup(delattr, cli, "open")
def fake_open(self, path):
if path == self.instance_file:
contents = ""
elif path == self.schema_file:
contents = {}
else: # pragma: no cover
self.fail("What is {!r}".format(path))
return NativeIO(json.dumps(contents))
def test_find_validator_by_fully_qualified_object_name(self):
arguments = cli.parse_args(
[
"--validator",
"jsonschema.tests.test_cli.TestParser.FakeValidator",
"--instance", self.instance_file,
self.schema_file,
]
)
self.assertIs(arguments["validator"], self.FakeValidator)
def test_find_validator_in_jsonschema(self):
arguments = cli.parse_args(
[
"--validator", "Draft4Validator",
"--instance", self.instance_file,
self.schema_file,
]
)
self.assertIs(arguments["validator"], Draft4Validator)
def test_none_validator(self):
arguments = cli.parse_args(
[
"--instance", self.instance_file,
self.schema_file,
]
)
self.assertIs(arguments["validator"], LatestValidator)
def test_none_instance(self):
arguments = cli.parse_args(
[
self.schema_file,
]
)
self.assertEqual(arguments["instances"], [])
def test_unknown_output(self):
# Avoid the help message on stdout
with captured_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
cli.parse_args(
[
"--output", "foo",
self.schema_file,
]
)
self.assertIn("invalid choice: 'foo'", stderr.getvalue())
self.assertFalse(stdout.getvalue())
def test_useless_error_format(self):
# Avoid the help message on stdout
with captured_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
cli.parse_args(
[
"--output", "pretty",
"--error-format", "foo",
self.schema_file,
]
)
self.assertIn(
"--error-format can only be used with --output plain",
stderr.getvalue(),
)
self.assertFalse(stdout.getvalue())
class TestCLI(TestCase):
instance_file_1 = "foo1.json"
instance_file_2 = "foo2.json"
schema_file = "schema.json"
schema_error_file = "schema_error.json"
bad_json_file_1 = "bad1.json"
bad_json_file_2 = "bad2.json"
pretty_parsing_error_tag = "===[" + JSONDecodeError.__name__ + "]==="
pretty_validation_error_tag = "===[ValidationError]==="
pretty_success_tag = "===[SUCCESS]==="
def setUp(self):
cli.open = self.fake_open
self.addCleanup(delattr, cli, "open")
def fake_open(self, path):
if path == self.instance_file_1:
contents = "1"
elif path == self.instance_file_2:
contents = "25"
elif path == self.schema_file:
contents = """
{
"anyOf": [
{"minimum": 20},
{"type": "string"},
{"required": true}
]
}
"""
elif path == self.schema_error_file:
contents = """
{
"title": 1
}
"""
elif path == self.bad_json_file_1:
contents = "{bad_key: val}"
elif path == self.bad_json_file_2:
contents = "{1 []}"
else: # pragma: no cover
self.fail("What is {!r}".format(path))
return NativeIO(contents)
def test_draft3_schema_draft4_validator(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": Draft4Validator,
"schema": "schema.json",
"instances": ["foo1.json"],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertTrue(stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_successful_validation(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": ["foo2.json"],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertFalse(stderr.getvalue())
self.assertEqual(exit_code, 0)
def test_unsuccessful_validation(self):
error = ValidationError("I am an error!", instance=1)
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator([error]),
"schema": "schema.json",
"instances": ["foo1.json"],
"error_format": "{error.instance} - {error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertEqual(stderr.getvalue(), "1 - I am an error!")
self.assertEqual(exit_code, 1)
def test_unsuccessful_validation_multiple_instances(self):
first_errors = [
ValidationError("9", instance=1),
ValidationError("8", instance=1),
]
second_errors = [ValidationError("7", instance=2)]
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(first_errors, second_errors),
"schema": "schema.json",
"instances": ["foo1.json", "foo2.json"],
"error_format": "{error.instance} - {error.message}\t",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t")
self.assertEqual(exit_code, 1)
def test_license(self):
output = subprocess.check_output(
[sys.executable, "-m", "pip", "show", "jsonschema"],
stderr=subprocess.STDOUT,
)
self.assertIn(b"License: MIT", output)
def test_version(self):
version = subprocess.check_output(
[sys.executable, "-m", "jsonschema", "--version"],
stderr=subprocess.STDOUT,
)
version = version.decode("utf-8").strip()
self.assertEqual(version, __version__)
def test_piping(self):
stdout, stderr, stdin = NativeIO(), NativeIO(), NativeIO("{}")
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": [],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
stdin=stdin,
)
self.assertFalse(stdout.getvalue())
self.assertFalse(stderr.getvalue())
self.assertEqual(exit_code, 0)
def test_schema_parsing_error(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "bad1.json",
"instances": ["foo1.json"],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertIn("Failed to parse bad1.json", stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_instance_parsing_error(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": ["bad1.json", "bad2.json"],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
output_err = stderr.getvalue()
self.assertFalse(stdout.getvalue())
self.assertIn("Failed to parse bad1.json", output_err)
self.assertIn("Failed to parse bad2.json", output_err)
self.assertEqual(exit_code, 1)
def test_stdin_parsing_error(self):
stdout, stderr, stdin = NativeIO(), NativeIO(), NativeIO("{foo}")
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": [],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
stdin=stdin,
)
self.assertFalse(stdout.getvalue())
self.assertIn("Failed to parse <stdin>", stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_stdin_pretty_parsing_error(self):
stdout, stderr, stdin = NativeIO(), NativeIO(), NativeIO("{foo}")
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": [],
"output": "pretty",
},
stdout=stdout,
stderr=stderr,
stdin=stdin,
)
self.assertFalse(stdout.getvalue())
self.assertIn(self.pretty_parsing_error_tag, stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_parsing_error(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "bad1.json",
"instances": ["foo1.json"],
"error_format": "",
"output": "plain",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertIn("Failed to parse bad1.json", stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_pretty_parsing_error(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "bad1.json",
"instances": ["foo1.json"],
"error_format": "",
"output": "pretty",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertIn(self.pretty_parsing_error_tag, stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_pretty_successful_validation(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": "schema.json",
"instances": ["foo2.json"],
"error_format": "",
"output": "pretty",
},
stdout=stdout,
stderr=stderr,
)
self.assertIn(self.pretty_success_tag, stdout.getvalue())
self.assertFalse(stderr.getvalue())
self.assertEqual(exit_code, 0)
def test_pretty_unsuccessful_validation(self):
error = ValidationError("I am an error!", instance=1)
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator([error]),
"schema": "schema.json",
"instances": ["foo1.json"],
"error_format": "",
"output": "pretty",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertIn(self.pretty_validation_error_tag, stderr.getvalue())
self.assertEqual(exit_code, 1)
def test_schema_validation(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": LatestValidator,
"schema": "schema_error.json",
"instances": [],
"error_format": "{error.message}",
"output": "plain",
},
stdout=stdout,
stderr=stderr
)
self.assertFalse(stdout.getvalue())
self.assertTrue(stderr.getvalue())
self.assertEqual(exit_code, 1)
|
import webnotes
def execute():
from patches.april_2013.p05_update_file_data import update_file_list, get_single_doctypes
singles = get_single_doctypes()
for doctype in webnotes.conn.sql_list("""select table_name from `information_schema`.`columns`
where table_schema=%s and column_name='file_list'""", webnotes.conn.cur_db_name):
doctype = doctype[3:]
if not webnotes.conn.exists("DocType", doctype): continue
update_file_list(doctype, singles)
webnotes.conn.sql("""delete from `tabCustom Field` where fieldname='file_list'
and parent=%s""", doctype)
webnotes.conn.sql("""delete from `tabDocField` where fieldname='file_list'
and parent=%s""", doctype)
[patches] [fix] fixed an old patch
import webnotes
def execute():
from patches.april_2013.p05_update_file_data import update_file_list, get_single_doctypes
singles = get_single_doctypes()
for doctype in webnotes.conn.sql_list("""select table_name from `information_schema`.`columns`
where table_schema=%s and column_name='file_list'""", webnotes.conn.cur_db_name):
doctype = doctype[3:]
if not webnotes.conn.exists("DocType", doctype): continue
update_file_list(doctype, singles)
webnotes.conn.sql("""delete from `tabCustom Field` where fieldname='file_list'
and parent=%s""", doctype)
|
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
from lib.common import send_mail_smtp_without_login
#from dut import dut
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize
self.write_lock = threading.Lock()
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
if re.search('error|\s+err\s+|fail|wrong',string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW, font =wx.Font(self.font_point_size+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))#wx.CallAfter(s
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
class DasHFrame(MainFrame):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = None
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':'TC'}
lib_path ='./lib'
log_path = '../log'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= None
alive =True
mail_server=None
mail_to_list=None
mail_from=None
mail_read_url= 'outlook.office365.com'
mail_password = None
mail_usre =None
case_queue =None
check_case_running_status_lock = None
case_list=None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
self.case_list= []
self.case_queue = Queue.Queue()
self.dict_test_report={}
self.check_case_running_status_lock = threading.Lock()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server = self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list =self.ini_setting.get('dash', 'mail_to_list')
self.mail_read_url =self.ini_setting.get('dash', 'mail_read_url')
self.mail_user = self.ini_setting.get('dash','mail_user')
self.mail_password =self.ini_setting.get('dash', 'mail_password')
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
self.log_path = create_case_folder(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
get_case_queue = fileMenu.Append(wx.NewId(), "Get Case Queue", "Get Case Queue") #done
clear_case_queue = fileMenu.Append(wx.NewId(), "Clear Case Queue", "Clear Case Queue")
kill_running_case = fileMenu.Append(wx.NewId(), "Kill Running Case(s)", "Kill Running Case(s)")
self.m_menubar_main.Append(fileMenu, "&Open")
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_MENU,self.get_case_queue ,get_case_queue)
self.Bind(wx.EVT_MENU,self.on_clear_case_queue ,clear_case_queue)
self.Bind(wx.EVT_MENU,self.on_kill_running_case ,kill_running_case)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
self.edit_area.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_close_tab_in_edit_area, self.edit_area)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 6, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 3, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
th= threading.Thread(target=self.polling_running_cases)
th.start()
th = threading.Thread(target=self.polling_request_via_mail)
th.start()
def on_close(self, event):
self.alive =False
time.sleep(0.01)
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
self.mail_test_report("DASH TEST REPORT")
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
closing_page.on_close()
self.redir.close()
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
event.Skip()
def generate_report(self, filename):
report = '''Test Report
RESULT,\tStart_Time,\tEnd_Time,\tPID,\tDuration,\tCase_Name,\tLog\n'''
if len(self.dict_test_report):
with open(filename, 'a+') as f:
f.write(report)
for pi in sorted(self.dict_test_report, key = lambda x: self.dict_test_report[x][1]):
case_name, start_time, end_time, duration, return_code ,proc, log_path =self.dict_test_report[pi][:7]
if return_code is None:
result = 'IP'
else:
result = return_code # 'FAIL' if return_code else 'PASS'
record = '\t'.join(['{},\t'.format(x) for x in [result,start_time,end_time,pi,duration,case_name,'<{}>'.format(log_path) ]])
report+=record+'\n'
f.write(record+'\n')
return report
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
globals()[ses_name].close_session()
del globals()[ses_name]
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
self.case_list.append(path_name)
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
def build_session_tab(self):
if self.session_page.RootItem:
self.session_page.DeleteAllItems()
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
pass
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
def on_LeftDClick_in_Session_tab(self, event):
event.Skip()
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path)
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
def add_new_session_to_globals(self, new_page, args_str):
if globals().has_key(new_page.name):
if globals()[new_page.name]==None:
pass
else:
error('{} already '.format(new_page.name))
else:
globals().update({new_page.name: new_page})
self.add_cmd_to_sequence_queue('{} = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
if cmd.strip()=='':
return
module,class_name, function,args = parse_command_line(cmd)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
call_function = None
if class_name!="":
call_function = getattr(instance_name, function_name)
#(*new_argvs,**new_kwargs)
else:
call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
self.add_cmd_to_history(cmd, module, str_code)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code):
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
self.add_cmd_to_sequence_queue(str_code,module_name )
#self.sequence_queue.put([cmd, datetime.now()])
def build_function_tab(self):
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
new_module = self.function_page.InsertItem(root, root, module_name)
file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, file, path_name,description)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
class_obj = getattr(lmod, attr)
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
for attr_in_class in sorted(dir(class_obj)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(class_obj,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
item_info = wx.TreeItemData({'name':'{}.{}.{}'.format(module_name,attr,attr_in_class)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.function_page.SetItemData(new_item, item_info)
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:module_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
str_code ="""#created by DasH
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import lib.common
log_path= '../log/tmp'
log_path= lib.common.create_case_folder()
try:
""".format(self.src_path,self.lib_path )
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info(str_code)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
result ='KILL'
self.update_case_status(p.pid, result)
self.mail_test_report("DASH TEST REPORT-updating")
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
self.update_case_status(p.pid, result)
def run_script(self, script_name):
from lib.common import run_script
from multiprocessing import Process, Queue
import subprocess
import shlex
old_script_name = script_name
lex = shlex.shlex(script_name)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_args = script_name_and_args[1:]
script_name = script_name_and_args[0]
if script_name.find(os.path.sep)!=-1:
pass
else:
script_name= '{}/{}'.format(self.suite_path,script_name)
from lib.common import create_case_folder
old_sys_argv = sys.argv
sys.argv= [script_name]+script_args
case_log_path = self.log_path #create_case_folder()
sys.argv= old_sys_argv
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_args + ['-l','{}'.format(case_log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable,'./script_runner.py', script_name ]+script_args+ ['-l','{}'.format(case_log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)#, stdin=pipe_input, stdout=pipe_output,stderr=pipe_output)
self.add_new_case_to_report(p.pid, old_script_name, p, case_log_path)
except:
error(traceback.format_exc())
return p, case_log_path
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
self.on_kill_script(event)
try:
p, case_log_path = self.run_script('{} {}'.format(script_name, item_name))
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def check_case_status(self):
self.check_case_running_status_lock.acquire()
changed=False
running_case = 0
for pid in self.dict_test_report.keys():
case_name, start_time, end_time, duration, return_code ,proc, log_path= self.dict_test_report[pid]
if return_code is None:
if proc.poll() is None:
running_case+=1
debug('RUNNING', start_time, end_time, duration, return_code ,proc, log_path)
else:
changed=True
return_code = 'FAIL' if proc.returncode else 'PASS'
self.update_case_status(pid,return_code)
if running_case:
pass
elif not self.case_queue.empty():#self.case_queue.qsize():
case_name_with_args = self.case_queue.get()
p, case_log_path = self.run_script(case_name_with_args)
self.check_case_running_status_lock.release()
if changed:
#test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
self.mail_test_report('DasH Test Report-updating')
return changed
def polling_running_cases(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
#self.check_case_running_status_lock.acquire()
self.check_case_status()
#self.check_case_running_status_lock.release()
def add_new_case_to_report(self, pid, case_name, proc, log_path):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
#self.check_case_running_status_lock.acquire()
if pid in self.dict_test_report:
self.dict_test_report[pid].update([case_name, start_time, end_time, duration, return_code, proc, log_path])
else:
self.dict_test_report[pid]= [case_name, start_time, end_time, duration, return_code, proc, log_path ]
#self.check_case_running_status_lock.release()
def update_case_status(self, pid,return_code=None):
now = datetime.now()
case_name, start_time, end_time, duration, tmp_return_code ,proc,log_path= self.dict_test_report[pid]
if tmp_return_code is None:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid]=[case_name,start_time, end_time, duration, return_code, proc, log_path]
else:
pass# don't update one case result twice
def mail_test_report(self, subject="DASH TEST REPORT-updating"):
try:
#self.check_case_status()
test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
#TO, SUBJECT, TEXT, SERVER, FROM
send_mail_smtp_without_login(self.mail_to_list, subject,test_report,self.mail_server,self.mail_from)
except Exception as e:
error(traceback.format_exc())
def on_mail_test_report(self,event):
self.mail_test_report('DasH Test Report-updating')
#p.terminate()
def on_handle_request_via_mail(self):
import imaplib
url, user, password = self.mail_read_url,self.mail_user, self.mail_password
conn = imaplib.IMAP4_SSL(url,993)
conn.login(user,password)
conn.select('INBOX')
results,data = conn.search(None,'(UNSEEN)') # #'ALL')
msg_ids = data[0]
msg_id_list = msg_ids.split()
from email.parser import Parser
def process_multipart_message(message):
if isinstance(message, basestring) or isinstance(message , list):
return message
rtn = ''
try:
if message.is_multipart():
for m in message.get_payload():
rtn += process_multipart_message(m)
else:
rtn += message.get_payload()
except Exception as e:
pass
return rtn
MAX_UNREAD_MAIL = 50
for unread_mail_id in msg_id_list[::-1][:MAX_UNREAD_MAIL]:
result,data = conn.fetch(unread_mail_id,"(RFC822)")
raw_email = data[0][1]
p = Parser()
msg = p.parsestr(raw_email)
#msg = process_multipart_message(msg )
from1 = msg.get('From')
sub = msg.get('Subject')
sub = sub.strip().lower()
support_list='''
###############################
mail subject below is supported:
dash-request-case-queue : request the cases in queue which to be executed
dash-request-case : request cases which are under suite_path
dash-request-report : request a test report by now
dash-request-kill-running : to kill all running test cases
dash-request-clear-queue : to clear/remove all cases which are in case queue
dash-request-run : to run script(s), each line is a script with arguments if it has
--------------------------------
***non-case-sensitive***
###############################
'''
if sub in ['dash']:
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH Support List',support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-case-queue']:
case_in_queue =self.get_case_queue(None)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case In Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-case']:
cases_string = '\n\t'.join(self.case_list)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case List',cases_string+support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-report']:
self.mail_test_report('DasH Test Report-requested')
elif sub in ['dash-request-kill-running']:
killed= self.on_kill_running_case()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-[DasH]:Killed Running Case(s)',killed+support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-clear-queue']:
case_in_queue = self.on_clear_case_queue()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Clear Case Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-run']:
if from1 in ['dash@calix.com', 'yu_silence@163.com',self.mail_to_list]:
conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
#conn.uid('STORE', '-FLAGS', '(\Seen)')
payload = msg.get_payload()
payload = process_multipart_message(payload )
from lib.html2text import html2text
txt = html2text(payload)
cases = txt.replace('\r\n','\n').split('\n')
for line in cases:
line = line.strip()
if line.strip().startswith('#') or len(line)==0:
pass
else:
self.case_queue.put(line)
info('adding case to queue: {}'.format(line))
def polling_request_via_mail(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
try:
self.on_handle_request_via_mail()
except:
pass
def get_case_queue(self, item=None):
case_in_queue = list(self.case_queue.queue)
number_in_queue= len(case_in_queue)
if number_in_queue:
str_case_in_queue='\ntotal {} case(s) in Queue\n'.format(number_in_queue)+'\n'.join('{}'.format(x) for x in case_in_queue)
else:
str_case_in_queue='\nNo Case in Queue'
info('Case(s) in Queue', str_case_in_queue)
return str_case_in_queue
def on_clear_case_queue(self, event=None):
case_in_queue = self.get_case_queue(None)
self.case_queue.queue.clear()
self.get_case_queue(None)
return case_in_queue
def on_kill_running_case(self,event=None):
killed_case= ''
for case in self.dict_test_report:
case_name,start_time, end_time, duration, return_code, proc, log_path = self.dict_test_report[:7]
if return_code is None:
if proc.poll() is None:
killed_case+='{}:{}\n'.format(case_name, proc.pid)
info('Terminate alive process {}:{}'.format(case_name, proc.pid))
result ='KILL'
self.update_case_status(proc.pid, result)
proc.terminate()
info('Killed All Running cases', killed_case)
return killed_case
#done: 2017-08-22, 2017-08-19 save main log window to a file
#todo: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#todo: 2017-08-19 run a script in DasH
#todo: 2017-08-19 generate test report
#todo: 2017-08-19 publish all test cases in a web page
#todo: 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#todo: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#todo: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#todo: 2017-08-23 add tips for all tree item in teh left
run a test suite file with extension .csv/txt
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
from lib.common import send_mail_smtp_without_login
from lib.common import run_script
from multiprocessing import Process
import subprocess
import shlex
#from dut import dut
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize
self.write_lock = threading.Lock()
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
if re.search('error|\s+err\s+|fail|wrong',string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW, font =wx.Font(self.font_point_size+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))#wx.CallAfter(s
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
class DasHFrame(MainFrame):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = None
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':'TC'}
lib_path ='./lib'
log_path = '../log'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= None
alive =True
mail_server=None
mail_to_list=None
mail_from=None
mail_read_url= 'outlook.office365.com'
mail_password = None
mail_usre =None
case_queue =None
check_case_running_status_lock = None
case_list=None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
self.case_list= []
self.case_queue = Queue.Queue()
self.dict_test_report={}
self.check_case_running_status_lock = threading.Lock()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server = self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list =self.ini_setting.get('dash', 'mail_to_list')
self.mail_read_url =self.ini_setting.get('dash', 'mail_read_url')
self.mail_user = self.ini_setting.get('dash','mail_user')
self.mail_password =self.ini_setting.get('dash', 'mail_password')
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
self.log_path = create_case_folder(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
get_case_queue = fileMenu.Append(wx.NewId(), "Get Case Queue", "Get Case Queue") #done
clear_case_queue = fileMenu.Append(wx.NewId(), "Clear Case Queue", "Clear Case Queue")
kill_running_case = fileMenu.Append(wx.NewId(), "Kill Running Case(s)", "Kill Running Case(s)")
self.m_menubar_main.Append(fileMenu, "&Open")
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_MENU,self.get_case_queue ,get_case_queue)
self.Bind(wx.EVT_MENU,self.on_clear_case_queue ,clear_case_queue)
self.Bind(wx.EVT_MENU,self.on_kill_running_case ,kill_running_case)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
self.edit_area.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_close_tab_in_edit_area, self.edit_area)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 6, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 3, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
th= threading.Thread(target=self.polling_running_cases)
th.start()
th = threading.Thread(target=self.polling_request_via_mail)
th.start()
def on_close(self, event):
self.alive =False
time.sleep(0.01)
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
self.mail_test_report("DASH TEST REPORT")
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
closing_page.on_close()
self.redir.close()
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
event.Skip()
def generate_report(self, filename):
report = '''Test Report
RESULT,\tStart_Time,\tEnd_Time,\tPID,\tDuration,\tCase_Name,\tLog\n'''
if len(self.dict_test_report):
with open(filename, 'a+') as f:
f.write(report)
for pi in sorted(self.dict_test_report, key = lambda x: self.dict_test_report[x][1]):
case_name, start_time, end_time, duration, return_code ,proc, log_path =self.dict_test_report[pi][:7]
if return_code is None:
result = 'IP'
else:
result = return_code # 'FAIL' if return_code else 'PASS'
record = '\t'.join(['{},\t'.format(x) for x in [result,start_time,end_time,pi,duration,case_name,'<{}>'.format(log_path) ]])
report+=record+'\n'
f.write(record+'\n')
return report
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
globals()[ses_name].close_session()
del globals()[ses_name]
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
self.case_list.append(path_name)
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
def build_session_tab(self):
if self.session_page.RootItem:
self.session_page.DeleteAllItems()
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
pass
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
def on_LeftDClick_in_Session_tab(self, event):
event.Skip()
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path)
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
def add_new_session_to_globals(self, new_page, args_str):
if globals().has_key(new_page.name):
if globals()[new_page.name]==None:
pass
else:
error('{} already '.format(new_page.name))
else:
globals().update({new_page.name: new_page})
self.add_cmd_to_sequence_queue('{} = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
if cmd.strip()=='':
return
module,class_name, function,args = parse_command_line(cmd)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
call_function = None
if class_name!="":
call_function = getattr(instance_name, function_name)
#(*new_argvs,**new_kwargs)
else:
call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
self.add_cmd_to_history(cmd, module, str_code)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code):
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
self.add_cmd_to_sequence_queue(str_code,module_name )
#self.sequence_queue.put([cmd, datetime.now()])
def build_function_tab(self):
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
new_module = self.function_page.InsertItem(root, root, module_name)
file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, file, path_name,description)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
class_obj = getattr(lmod, attr)
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
for attr_in_class in sorted(dir(class_obj)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(class_obj,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
item_info = wx.TreeItemData({'name':'{}.{}.{}'.format(module_name,attr,attr_in_class)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.function_page.SetItemData(new_item, item_info)
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:module_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
str_code ="""#created by DasH
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import lib.common
log_path= '../log/tmp'
log_path= lib.common.create_case_folder()
try:
""".format(self.src_path,self.lib_path )
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info(str_code)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
result ='KILL'
self.update_case_status(p.pid, result)
self.mail_test_report("DASH TEST REPORT-updating")
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
self.update_case_status(p.pid, result)
def run_script(self, script_name):
old_script_name = script_name
lex = shlex.shlex(script_name)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_args = script_name_and_args[1:]
script_name = script_name_and_args[0]
if script_name.find(os.path.sep)!=-1:
pass
else:
script_name= '{}/{}'.format(self.suite_path,script_name)
from lib.common import create_case_folder
old_sys_argv = sys.argv
sys.argv= [script_name]+script_args
case_log_path = self.log_path #create_case_folder()
sys.argv= old_sys_argv
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_args + ['-l','{}'.format(case_log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable,'./script_runner.py', script_name ]+script_args+ ['-l','{}'.format(case_log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)#, stdin=pipe_input, stdout=pipe_output,stderr=pipe_output)
self.add_new_case_to_report(p.pid, old_script_name, p, case_log_path)
except:
error(traceback.format_exc())
return p, case_log_path
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
if script_name.lower().split('.')[-1] in ['txt','csv']:#test suite file, not a single script
self.run_a_test_suite(script_name)
else:#a single test case
self.on_kill_script(event)
try:
p, case_log_path = self.run_script('{} {}'.format(script_name, item_name))
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def check_case_status(self):
self.check_case_running_status_lock.acquire()
changed=False
running_case = 0
for pid in self.dict_test_report.keys():
case_name, start_time, end_time, duration, return_code ,proc, log_path= self.dict_test_report[pid]
if return_code is None:
if proc.poll() is None:
running_case+=1
debug('RUNNING', start_time, end_time, duration, return_code ,proc, log_path)
else:
changed=True
return_code = 'FAIL' if proc.returncode else 'PASS'
self.update_case_status(pid,return_code)
if running_case:
pass
elif not self.case_queue.empty():#self.case_queue.qsize():
case_name_with_args = self.case_queue.get()
p, case_log_path = self.run_script(case_name_with_args)
self.check_case_running_status_lock.release()
if changed:
#test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
self.mail_test_report('DasH Test Report-updating')
return changed
def polling_running_cases(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
#self.check_case_running_status_lock.acquire()
self.check_case_status()
#self.check_case_running_status_lock.release()
def add_new_case_to_report(self, pid, case_name, proc, log_path):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
#self.check_case_running_status_lock.acquire()
if pid in self.dict_test_report:
self.dict_test_report[pid].update([case_name, start_time, end_time, duration, return_code, proc, log_path])
else:
self.dict_test_report[pid]= [case_name, start_time, end_time, duration, return_code, proc, log_path ]
#self.check_case_running_status_lock.release()
def update_case_status(self, pid,return_code=None):
now = datetime.now()
case_name, start_time, end_time, duration, tmp_return_code ,proc,log_path= self.dict_test_report[pid]
if tmp_return_code is None:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid]=[case_name,start_time, end_time, duration, return_code, proc, log_path]
else:
pass# don't update one case result twice
def mail_test_report(self, subject="DASH TEST REPORT-updating"):
try:
#self.check_case_status()
test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
#TO, SUBJECT, TEXT, SERVER, FROM
send_mail_smtp_without_login(self.mail_to_list, subject,test_report,self.mail_server,self.mail_from)
except Exception as e:
error(traceback.format_exc())
def on_mail_test_report(self,event):
self.mail_test_report('DasH Test Report-updating')
#p.terminate()
def on_handle_request_via_mail(self):
import imaplib
url, user, password = self.mail_read_url,self.mail_user, self.mail_password
conn = imaplib.IMAP4_SSL(url,993)
conn.login(user,password)
conn.select('INBOX')
results,data = conn.search(None,'(UNSEEN)') # #'ALL')
msg_ids = data[0]
msg_id_list = msg_ids.split()
from email.parser import Parser
def process_multipart_message(message):
if isinstance(message, basestring) or isinstance(message , list):
return message
rtn = ''
try:
if message.is_multipart():
for m in message.get_payload():
rtn += process_multipart_message(m)
else:
rtn += message.get_payload()
except Exception as e:
pass
return rtn
MAX_UNREAD_MAIL = 50
for unread_mail_id in msg_id_list[::-1][:MAX_UNREAD_MAIL]:
result,data = conn.fetch(unread_mail_id,"(RFC822)")
raw_email = data[0][1]
p = Parser()
msg = p.parsestr(raw_email)
#msg = process_multipart_message(msg )
from1 = msg.get('From')
sub = msg.get('Subject')
sub = sub.strip().lower()
support_list='''
###############################
mail subject below is supported:
dash-request-case-queue : request the cases in queue which to be executed
dash-request-case : request cases which are under suite_path
dash-request-report : request a test report by now
dash-request-kill-running : to kill all running test cases
dash-request-clear-queue : to clear/remove all cases which are in case queue
dash-request-run : to run script(s), each line is a script with arguments if it has
--------------------------------
***non-case-sensitive***
###############################
'''
if sub in ['dash']:
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH Support List',support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-case-queue']:
case_in_queue =self.get_case_queue(None)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case In Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-case']:
cases_string = '\n\t'.join(self.case_list)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case List',cases_string+support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-report']:
self.mail_test_report('DasH Test Report-requested')
elif sub in ['dash-request-kill-running']:
killed= self.on_kill_running_case()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-[DasH]:Killed Running Case(s)',killed+support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-clear-queue']:
case_in_queue = self.on_clear_case_queue()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Clear Case Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
elif sub in ['dash-request-run']:
if from1 in ['dash@calix.com', 'yu_silence@163.com',self.mail_to_list]:
conn.uid('STORE', unread_mail_id, '-FLAGS', '\SEEN')
#conn.uid('STORE', '-FLAGS', '(\Seen)')
payload = msg.get_payload()
payload = process_multipart_message(payload )
from lib.html2text import html2text
txt = html2text(payload)
cases = txt.replace('\r\n','\n').split('\n')
for line in cases:
line = line.strip()
if line.strip().startswith('#') or len(line)==0:
pass
else:
type_case, case_name, args = self.check_case_type(line)
if type_case in ['txt','csv']:
self.run_a_test_suite(line)
else:
self.case_queue.put(line)
info('adding case to queue: {}'.format(line))
else:
conn.uid('STORE', unread_mail_id, '-FLAGS', '\SEEN')
def check_case_type(self, str_line):
lex = shlex.shlex(str_line)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_name = script_name_and_args[0]
return script_name.lower().split('.')[-1],script_name_and_args[0] ,script_name_and_args[1:]
def polling_request_via_mail(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
try:
self.on_handle_request_via_mail()
except Exception as e:
error(traceback.format_exc())
pass
def get_case_queue(self, item=None):
case_in_queue = list(self.case_queue.queue)
number_in_queue= len(case_in_queue)
if number_in_queue:
str_case_in_queue='\ntotal {} case(s) in Queue\n'.format(number_in_queue)+'\n'.join('{}'.format(x) for x in case_in_queue)
else:
str_case_in_queue='\nNo Case in Queue'
info('Case(s) in Queue', str_case_in_queue)
return str_case_in_queue
def on_clear_case_queue(self, event=None):
case_in_queue = self.get_case_queue(None)
self.case_queue.queue.clear()
self.get_case_queue(None)
return case_in_queue
def on_kill_running_case(self,event=None):
killed_case= ''
for case in self.dict_test_report:
case_name,start_time, end_time, duration, return_code, proc, log_path = self.dict_test_report[:7]
if return_code is None:
if proc.poll() is None:
killed_case+='{}:{}\n'.format(case_name, proc.pid)
info('Terminate alive process {}:{}'.format(case_name, proc.pid))
result ='KILL'
self.update_case_status(proc.pid, result)
proc.terminate()
info('Killed All Running cases', killed_case)
return killed_case
def run_a_test_suite(self, csv_file_name, clear_queue=False, kill_running =False):
try:
case_type, suite_file_name, args =self.check_case_type(csv_file_name)
if clear_queue:
self.on_clear_case_queue()
if kill_running:
self.on_kill_running_case()
import csv
if suite_file_name.find(os.path.sep)!=-1:
pass
else:
suite_file_name= '{}/{}'.format(self.suite_path,suite_file_name)
with open(suite_file_name) as bench:
reader = csv.reader(bench,delimiter=',')
for row in reader:
if len(row)<1:
continue
else:
name = row[0]
args.insert(0,0)
for index in range(1,len(args)):
name =name.replace('{{index}}'.format(index =index), '{}'.format(args[index]))
self.case_queue.put(name)
info('adding case to queue: {}'.format(name))
except Exception as e:
error(traceback.format_exc())
#done: 2017-08-22, 2017-08-19 save main log window to a file
#todo: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#todo: 2017-08-19 run a script in DasH
#todo: 2017-08-19 generate test report
#todo: 2017-08-19 publish all test cases in a web page
#todo: 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#todo: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#todo: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#todo: 2017-08-23 add tips for all tree item in teh left |
# The MIT License (MIT)
#
# Copyright (c) 2014-2016 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Zach Pincus
import itertools
from PyQt5 import Qt
from .. import shared_resources
"""
Example 1: Simple ROI drawing
roi = RectROI(rw)
# click to draw ROI in GUI
x1, y1, x2, y2 = roi.bounds
roi.remove_from_rw()
Example 2: Pre-set bounds with a specified aspect ratio (width/height):
roi = EllipseROI(rw, aspect=2, bounds=(200, 400, 600, 500))
"""
class _ROIMixin:
def __init__(self, ris_widget, color=Qt.Qt.green, aspect=None, bounds=None):
"""Class for drawing a Region of Interest on a ris_widget.
The ROI can be drawn by clicking on the upper-left of the desired region,
then dragging. A second click sets the lower-right. Pressing escape
before this allows selection of a new upper-left point.
Afterward, the ROI can be clicked to highlight it, allowing movement or
resizing. Pressing delete/backspace will remove a selected ROI and allow
it to be re-drawn.
The bounds property can be used to obtain the (x1, y1, x2, y2) positions
of the corners of the ROI. If no ROI is shown, this will be None.
After use, call remove_from_rw().
Parameters:
ris_widget: a ris_widget instance to draw an ROI on
color: a Qt color for the ROI
aspect: width/height ratio to maintain, or None
bounds: (x1, y1, x2, y2) coordinates of corners. If None, the ROI
can be drawn by clicking on the ris_widget.
"""
layer_stack = ris_widget.image_scene.layer_stack_item
super().__init__(layer_stack)
self.aspect = aspect
self.display_pen = Qt.QPen(color)
self.display_pen.setWidth(2)
self.display_pen.setCosmetic(True)
self.setPen(self.display_pen)
self.selected_pen = Qt.QPen(self.display_pen)
self.selected_pen.setColor(Qt.Qt.red)
self.dragging = False
self.rw = ris_widget
ris_widget.image_view.mouse_release.connect(self._view_mouse_release)
layer_stack.installSceneEventFilter(self)
self.resizers = {_ResizeHandle(self, Qt.Qt.red): coords for coords in [
(0, 0),
(0.5, 0),
(1, 0),
(1, 0.5),
(1, 1),
(0.5, 1),
(0, 1),
(0, 0.5)
]}
self.bounds = bounds
@property
def bounds(self):
if self.isVisible():
return self.rect().normalized().getCoords()
else:
return None
@bounds.setter
def bounds(self, bounds=None):
if bounds is None:
self.hide()
else:
x1, y1, x2, y2 = bounds
w = x2 - x1
if self.aspect is None:
h = y2 - y1
else:
h = w / self.aspect
self.setRect(x1, y1, w, h)
self._set_selectable(True)
def remove_from_rw(self):
for resizer in self.resizers:
resizer.remove()
self.rw.image_view.mouse_release.disconnect(self._view_mouse_release)
scene = self.rw.image_scene
scene.removeItem(self)
scene.layer_stack_item.removeSceneEventFilter(self)
def shape(self):
s = Qt.QPainterPathStroker()
s.setWidth(10/self.scene().views()[0].zoom)
return s.createStroke(super().shape())
def boundingRect(self):
return self.shape().boundingRect()
def paint(self, painter, option, widget):
option = Qt.QStyleOptionGraphicsItem(option)
option.state &= ~Qt.QStyle.State_Selected
super().paint(painter, option, widget)
def _view_mouse_release(self, pos):
if not self.isVisible():
# no current ROI shown: start a new one
self.dragging = True
self.setRect(Qt.QRectF(pos, pos))
self._set_selectable(False)
self.show()
elif self.dragging:
# finish drawing the roi_rect
self.dragging = False
self._set_selectable(True)
self._done_resizing()
def _done_resizing(self):
self.setRect(self.rect().normalized())
if self.isSelected():
self._locate_resizers()
def sceneEventFilter(self, watched, event):
if self.dragging and event.type() == Qt.QEvent.GraphicsSceneHoverMove:
self._resize(event.pos())
return True
elif event.type() == Qt.QEvent.KeyPress:
key = event.key()
if key == Qt.Qt.Key_Escape and self.dragging:
self.dragging = False
self.hide()
return True
elif key in {Qt.Qt.Key_Delete, Qt.Qt.Key_Backspace} and self.isSelected():
self.hide()
return True
return False
def _resize(self, pos):
rect = Qt.QRectF(self.rect().topLeft(), pos)
if self.aspect is not None:
desired_height = rect.width() / self.aspect
rect.setHeight(desired_height)
self.setRect(rect)
if self.isSelected():
self._locate_resizers()
def itemChange(self, change, value):
if change == Qt.QGraphicsItem.ItemSelectedHasChanged:
if value:
self.setPen(self.selected_pen)
self._locate_resizers()
for resizer in self.resizers:
resizer.show()
else:
self.setPen(self.display_pen)
for resizer in self.resizers:
resizer.hide()
return value
def mouseMoveEvent(self, event):
delta = event.pos() - event.lastPos()
self.setRect(self.rect().translated(delta.x(), delta.y()))
self._locate_resizers()
def _set_selectable(self, selectable):
self.setFlag(Qt.QGraphicsItem.ItemIsSelectable, selectable)
self.setSelected(False)
def _locate_resizers(self):
rect = self.rect()
ul = rect.topLeft()
vector = rect.bottomRight() - ul
for resizer, coords in self.resizers.items():
x = ul.x() + vector.x() * coords[0]
y = ul.y() + vector.y() * coords[1]
resizer.setPos(x, y)
def _adjust(self, pos, resizer):
xc, yc = self.resizers[resizer]
rect = self.rect()
x1, y1, x2, y2 = rect.getCoords()
x = [x1, x2]
y = [y1, y2]
if xc != 0.5:
x[xc] = pos.x()
if yc != 0.5:
y[yc] = pos.y()
if self.aspect is not None:
if xc == 0.5: # trying to adjust height only, so servo width to maintain aspect
desired_width = (y[1] - y[0]) * self.aspect
x[1] = x[0] + desired_width
else:
desired_height = (x[1] - x[0]) / self.aspect
if yc != 0:
y[1] = y[0] + desired_height
else:
y[0] = y[1] - desired_height
rect.setCoords(x[0], y[0], x[1], y[1])
self.setRect(rect)
self._locate_resizers()
class RectROI(_ROIMixin, Qt.QGraphicsRectItem):
QGRAPHICSITEM_TYPE = shared_resources.UNIQUE_QGRAPHICSITEM_TYPE()
def type(self):
return self.QGRAPHICSITEM_TYPE
class EllipseROI(_ROIMixin, Qt.QGraphicsEllipseItem):
QGRAPHICSITEM_TYPE = shared_resources.UNIQUE_QGRAPHICSITEM_TYPE()
def type(self):
return self.QGRAPHICSITEM_TYPE
class _ResizeHandle(Qt.QGraphicsRectItem):
def __init__(self, parent, color):
super().__init__(-3, -3, 6, 6, parent)
view = self.scene().views()[0]
self._zoom_changed(view.zoom)
view.zoom_changed.connect(self._zoom_changed)
self.hide()
self.setPen(Qt.QPen(Qt.Qt.NoPen))
self.setBrush(Qt.QBrush(color))
self.setFlag(Qt.QGraphicsItem.ItemIsMovable)
def remove(self):
self.scene().views()[0].zoom_changed.disconnect(self._zoom_changed)
def _zoom_changed(self, z):
self.setScale(1/z)
def mouseReleaseEvent(self, event):
self.parentItem()._done_resizing()
def mouseMoveEvent(self, event):
self.parentItem()._adjust(self.mapToParent(event.pos()), self)
deal with bizarro segfault on Linux
# The MIT License (MIT)
#
# Copyright (c) 2014-2016 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Zach Pincus
import itertools
from PyQt5 import Qt
from .. import shared_resources
"""
Example 1: Simple ROI drawing
roi = RectROI(rw)
# click to draw ROI in GUI
x1, y1, x2, y2 = roi.bounds
roi.remove_from_rw()
Example 2: Pre-set bounds with a specified aspect ratio (width/height):
roi = EllipseROI(rw, aspect=2, bounds=(200, 400, 600, 500))
"""
class _ROIMixin:
def __init__(self, ris_widget, color=Qt.Qt.green, aspect=None, bounds=None):
"""Class for drawing a Region of Interest on a ris_widget.
The ROI can be drawn by clicking on the upper-left of the desired region,
then dragging. A second click sets the lower-right. Pressing escape
before this allows selection of a new upper-left point.
Afterward, the ROI can be clicked to highlight it, allowing movement or
resizing. Pressing delete/backspace will remove a selected ROI and allow
it to be re-drawn.
The bounds property can be used to obtain the (x1, y1, x2, y2) positions
of the corners of the ROI. If no ROI is shown, this will be None.
After use, call remove_from_rw().
Parameters:
ris_widget: a ris_widget instance to draw an ROI on
color: a Qt color for the ROI
aspect: width/height ratio to maintain, or None
bounds: (x1, y1, x2, y2) coordinates of corners. If None, the ROI
can be drawn by clicking on the ris_widget.
"""
layer_stack = ris_widget.image_scene.layer_stack_item
super().__init__(layer_stack)
self.aspect = aspect
self.display_pen = Qt.QPen(color)
self.display_pen.setWidth(2)
self.display_pen.setCosmetic(True)
self.setPen(self.display_pen)
self.selected_pen = Qt.QPen(self.display_pen)
self.selected_pen.setColor(Qt.Qt.red)
self.dragging = False
self.rw = ris_widget
ris_widget.image_view.mouse_release.connect(self._view_mouse_release)
layer_stack.installSceneEventFilter(self)
self.resizers = {_ResizeHandle(self, Qt.Qt.red): coords for coords in [
(0, 0),
(0.5, 0),
(1, 0),
(1, 0.5),
(1, 1),
(0.5, 1),
(0, 1),
(0, 0.5)
]}
self.bounds = bounds
@property
def bounds(self):
if self.isVisible():
return self.rect().normalized().getCoords()
else:
return None
@bounds.setter
def bounds(self, bounds=None):
if bounds is None:
self.hide()
else:
x1, y1, x2, y2 = bounds
w = x2 - x1
if self.aspect is None:
h = y2 - y1
else:
h = w / self.aspect
self.setRect(x1, y1, w, h)
self._set_selectable(True)
def remove_from_rw(self):
for resizer in self.resizers:
resizer.remove()
self.rw.image_view.mouse_release.disconnect(self._view_mouse_release)
scene = self.rw.image_scene
scene.removeItem(self)
scene.layer_stack_item.removeSceneEventFilter(self)
del self.rw
def shape(self):
s = Qt.QPainterPathStroker()
s.setWidth(10/self.scene().views()[0].zoom)
return s.createStroke(super().shape())
def boundingRect(self):
return self.shape().boundingRect()
def paint(self, painter, option, widget):
option = Qt.QStyleOptionGraphicsItem(option)
option.state &= ~Qt.QStyle.State_Selected
super().paint(painter, option, widget)
def _view_mouse_release(self, pos):
if not self.isVisible():
# no current ROI shown: start a new one
self.dragging = True
self.setRect(Qt.QRectF(pos, pos))
self._set_selectable(False)
self.show()
elif self.dragging:
# finish drawing the roi_rect
self.dragging = False
self._set_selectable(True)
self._done_resizing()
def _done_resizing(self):
self.setRect(self.rect().normalized())
if self.isSelected():
self._locate_resizers()
def sceneEventFilter(self, watched, event):
if self.dragging and event.type() == Qt.QEvent.GraphicsSceneHoverMove:
self._resize(event.pos())
return True
elif event.type() == Qt.QEvent.KeyPress:
key = event.key()
if key == Qt.Qt.Key_Escape and self.dragging:
self.dragging = False
self.hide()
return True
elif key in {Qt.Qt.Key_Delete, Qt.Qt.Key_Backspace} and self.isSelected():
self.hide()
return True
return False
def _resize(self, pos):
rect = Qt.QRectF(self.rect().topLeft(), pos)
if self.aspect is not None:
desired_height = rect.width() / self.aspect
rect.setHeight(desired_height)
self.setRect(rect)
if self.isSelected():
self._locate_resizers()
def itemChange(self, change, value):
if change == Qt.QGraphicsItem.ItemSelectedHasChanged:
if value:
self.setPen(self.selected_pen)
self._locate_resizers()
for resizer in self.resizers:
resizer.show()
else:
self.setPen(self.display_pen)
for resizer in self.resizers:
resizer.hide()
return value
def mouseMoveEvent(self, event):
delta = event.pos() - event.lastPos()
self.setRect(self.rect().translated(delta.x(), delta.y()))
self._locate_resizers()
def _set_selectable(self, selectable):
self.setFlag(Qt.QGraphicsItem.ItemIsSelectable, selectable)
self.setSelected(False)
def _locate_resizers(self):
rect = self.rect()
ul = rect.topLeft()
vector = rect.bottomRight() - ul
for resizer, coords in self.resizers.items():
x = ul.x() + vector.x() * coords[0]
y = ul.y() + vector.y() * coords[1]
resizer.setPos(x, y)
def _adjust(self, pos, resizer):
xc, yc = self.resizers[resizer]
rect = self.rect()
x1, y1, x2, y2 = rect.getCoords()
x = [x1, x2]
y = [y1, y2]
if xc != 0.5:
x[xc] = pos.x()
if yc != 0.5:
y[yc] = pos.y()
if self.aspect is not None:
if xc == 0.5: # trying to adjust height only, so servo width to maintain aspect
desired_width = (y[1] - y[0]) * self.aspect
x[1] = x[0] + desired_width
else:
desired_height = (x[1] - x[0]) / self.aspect
if yc != 0:
y[1] = y[0] + desired_height
else:
y[0] = y[1] - desired_height
rect.setCoords(x[0], y[0], x[1], y[1])
self.setRect(rect)
self._locate_resizers()
class RectROI(_ROIMixin, Qt.QGraphicsRectItem):
QGRAPHICSITEM_TYPE = shared_resources.UNIQUE_QGRAPHICSITEM_TYPE()
def type(self):
return self.QGRAPHICSITEM_TYPE
class EllipseROI(_ROIMixin, Qt.QGraphicsEllipseItem):
QGRAPHICSITEM_TYPE = shared_resources.UNIQUE_QGRAPHICSITEM_TYPE()
def type(self):
return self.QGRAPHICSITEM_TYPE
class _ResizeHandle(Qt.QGraphicsRectItem):
def __init__(self, parent, color):
super().__init__(-3, -3, 6, 6)
# TODO: WTF with PyQt5 v. 5.9 on Linux, core is dumped if the parent
# is set in the constructor above. (Only if the parent is a subclass
# of _ROIMixin?!) But parenting later works fine.
self.setParentItem(parent)
view = self.scene().views()[0]
self._zoom_changed(view.zoom)
view.zoom_changed.connect(self._zoom_changed)
self.hide()
self.setPen(Qt.QPen(Qt.Qt.NoPen))
self.setBrush(Qt.QBrush(color))
self.setFlag(Qt.QGraphicsItem.ItemIsMovable)
def remove(self):
self.scene().views()[0].zoom_changed.disconnect(self._zoom_changed)
def _zoom_changed(self, z):
self.setScale(1/z)
def mouseReleaseEvent(self, event):
self.parentItem()._done_resizing()
def mouseMoveEvent(self, event):
self.parentItem()._adjust(self.mapToParent(event.pos()), self)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
invoiced = False
if purchase.invoiced_rate == 100.00:
invoiced = True
res[purchase.id] = invoiced
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been paid"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = "name desc"
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
wf_service = netsvc.LocalService("workflow")
for id in unlink_ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', ids[0], 'send_rfq', cr)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this company: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
wf_service = netsvc.LocalService("workflow")
for p_id in ids:
# Deleting the existing instance of workflow for PO
wf_service.trg_delete(uid, 'purchase.order', p_id, cr)
wf_service.trg_create(uid, 'purchase.order', p_id, cr)
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoiced': True, 'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
for pick in purchase.picking_ids:
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_cancel', cr)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
if inv:
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'invoice_cancel', cr)
self.write(cr,uid,ids,{'state':'cancel'})
for (id, name) in self.name_get(cr, uid, ids):
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return True
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'partner_id': order.dest_address_id.id or order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'partner_id': order.dest_address_id.id or order.partner_id.id,
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': order_line.price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
if not picking_id:
picking_id = self.pool.get('stock.picking').create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id:
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'partner_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if (qty or 0.0) < min_qty: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_warehouse(self, procurement, user_company):
"""
Return the warehouse containing the procurment stock location (or one of it ancestors)
If none match, returns then first warehouse of the company
"""
# TODO refactor the domain once we implement the "parent_of" domain operator
# NOTE This method has been copied in the `purchase_requisition` module to ensure
# retro-compatibility. This code duplication will be deleted in next stable version.
# Do not forget to update both version in case of modification.
company_id = (procurement.company_id or user_company).id
domains = [
[
'&', ('company_id', '=', company_id),
'|', '&', ('lot_stock_id.parent_left', '<', procurement.location_id.parent_left),
('lot_stock_id.parent_right', '>', procurement.location_id.parent_right),
('lot_stock_id', '=', procurement.location_id.id)
],
[('company_id', '=', company_id)]
]
cr, uid = procurement._cr, procurement._uid
context = procurement._context
Warehouse = self.pool['stock.warehouse']
for domain in domains:
ids = Warehouse.search(cr, uid, domain, context=context)
if ids:
return ids[0]
return False
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': self._get_warehouse(procurement, company),
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', mail.res_id, 'send_rfq', cr)
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', context['default_res_id'], 'send_rfq', cr)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[FIX] purchase: revert revision 7677 to set a default unit of measure to 'Unit'
This allows to field a purchase.order without showing the units of measure if not product is selected
Revision 7677 was integrated to fix lp:958897 (no change of uom when selecting a product whose uom is in the category 'Units'). This fix did not solve it properly (only for initial value) and introduced another problem. A better fix will be done on the onchange product.
bzr revid: mat@openerp.com-20131129143522-i85e4hvf0p4h3ynn
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
invoiced = False
if purchase.invoiced_rate == 100.00:
invoiced = True
res[purchase.id] = invoiced
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been paid"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = "name desc"
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
wf_service = netsvc.LocalService("workflow")
for id in unlink_ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', ids[0], 'send_rfq', cr)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this company: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
wf_service = netsvc.LocalService("workflow")
for p_id in ids:
# Deleting the existing instance of workflow for PO
wf_service.trg_delete(uid, 'purchase.order', p_id, cr)
wf_service.trg_create(uid, 'purchase.order', p_id, cr)
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoiced': True, 'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
for pick in purchase.picking_ids:
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_cancel', cr)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
if inv:
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'invoice_cancel', cr)
self.write(cr,uid,ids,{'state':'cancel'})
for (id, name) in self.name_get(cr, uid, ids):
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return True
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'partner_id': order.dest_address_id.id or order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'partner_id': order.dest_address_id.id or order.partner_id.id,
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': order_line.price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
if not picking_id:
picking_id = self.pool.get('stock.picking').create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id:
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'partner_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if (qty or 0.0) < min_qty: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_warehouse(self, procurement, user_company):
"""
Return the warehouse containing the procurment stock location (or one of it ancestors)
If none match, returns then first warehouse of the company
"""
# TODO refactor the domain once we implement the "parent_of" domain operator
# NOTE This method has been copied in the `purchase_requisition` module to ensure
# retro-compatibility. This code duplication will be deleted in next stable version.
# Do not forget to update both version in case of modification.
company_id = (procurement.company_id or user_company).id
domains = [
[
'&', ('company_id', '=', company_id),
'|', '&', ('lot_stock_id.parent_left', '<', procurement.location_id.parent_left),
('lot_stock_id.parent_right', '>', procurement.location_id.parent_right),
('lot_stock_id', '=', procurement.location_id.id)
],
[('company_id', '=', company_id)]
]
cr, uid = procurement._cr, procurement._uid
context = procurement._context
Warehouse = self.pool['stock.warehouse']
for domain in domains:
ids = Warehouse.search(cr, uid, domain, context=context)
if ids:
return ids[0]
return False
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': self._get_warehouse(procurement, company),
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', mail.res_id, 'send_rfq', cr)
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', context['default_res_id'], 'send_rfq', cr)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes/functions related to GKE (Google Container Engine)."""
import json
import logging
import os
from perfkitbenchmarker import container_service
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import kubernetes_helper
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gce_virtual_machine
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
FLAGS.kubernetes_anti_affinity = False
NVIDIA_DRIVER_SETUP_DAEMON_SET_SCRIPT = 'https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/k8s-1.9/nvidia-driver-installer/cos/daemonset-preloaded.yaml'
NVIDIA_UNRESTRICTED_PERMISSIONS_DAEMON_SET = 'nvidia_unrestricted_permissions_daemonset.yml'
DEFAULT_CONTAINER_VERSION = 'latest'
class GoogleContainerRegistry(container_service.BaseContainerRegistry):
"""Class for building and storing container images on GCP."""
CLOUD = providers.GCP
def __init__(self, registry_spec):
super(GoogleContainerRegistry, self).__init__(registry_spec)
self.project = self.project or util.GetDefaultProject()
def GetFullRegistryTag(self, image):
"""Gets the full tag of the image."""
region = util.GetMultiRegionFromRegion(util.GetRegionFromZone(self.zone))
hostname = '{region}.gcr.io'.format(region=region)
full_tag = '{hostname}/{project}/{name}'.format(
hostname=hostname, project=self.project, name=image)
return full_tag
def Login(self):
"""No-op because Push() handles its own auth."""
pass
def Push(self, image):
"""Push a locally built image to the registry."""
full_tag = self.GetFullRegistryTag(image.name)
tag_cmd = ['docker', 'tag', image.name, full_tag]
vm_util.IssueCommand(tag_cmd)
# vm_util.IssueCommand() is used here instead of util.GcloudCommand()
# because gcloud flags cannot be appended to the command since they
# are interpreted as docker args instead.
push_cmd = [
FLAGS.gcloud_path, '--project', self.project,
'docker', '--', 'push', full_tag
]
vm_util.IssueCommand(push_cmd)
def RemoteBuild(self, image):
"""Build the image remotely."""
full_tag = self.GetFullRegistryTag(image.name)
build_cmd = util.GcloudCommand(self, 'builds', 'submit',
'--tag', full_tag, image.directory)
del build_cmd.flags['zone']
build_cmd.Issue()
class GkeCluster(container_service.KubernetesCluster):
"""Class representing a Google Container Engine cluster."""
CLOUD = providers.GCP
def _GetRequiredGkeEnv(self):
env = os.environ.copy()
if self.use_application_default_credentials:
env['CLOUDSDK_CONTAINER_USE_APPLICATION_DEFAULT_CREDENTIALS'] = 'true'
return env
def __init__(self, spec):
super(GkeCluster, self).__init__(spec)
self.project = spec.vm_spec.project
self.min_cpu_platform = spec.vm_spec.min_cpu_platform
self.gce_accelerator_type_override = FLAGS.gce_accelerator_type_override
self.cluster_version = (FLAGS.container_cluster_version or
DEFAULT_CONTAINER_VERSION)
self.use_application_default_credentials = True
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the cluster.
Returns:
dict mapping string property key to value.
"""
result = super(GkeCluster, self).GetResourceMetadata()
if self.gce_accelerator_type_override:
result['accelerator_type_override'] = self.gce_accelerator_type_override
result['container_cluster_version'] = self.cluster_version
return result
def _Create(self):
"""Creates the cluster."""
if self.min_cpu_platform or self.gpu_count:
cmd = util.GcloudCommand(
self, 'beta', 'container', 'clusters', 'create', self.name)
else:
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'create', self.name)
cmd.flags['cluster-version'] = self.cluster_version
if FLAGS.gke_enable_alpha:
cmd.args.append('--enable-kubernetes-alpha')
cmd.args.append('--no-enable-autorepair')
cmd.args.append('--no-enable-autoupgrade')
user = util.GetDefaultUser()
if FLAGS.gcp_service_account:
cmd.flags['service-account'] = FLAGS.gcp_service_account
elif 'gserviceaccount.com' in user:
cmd.flags['service-account'] = user
self.use_application_default_credentials = False
else:
cmd.flags['scopes'] = 'cloud-platform'
if self.gpu_count:
cmd.flags['accelerator'] = (gce_virtual_machine.
GenerateAcceleratorSpecString(self.gpu_type,
self.gpu_count))
if self.min_cpu_platform:
cmd.flags['min-cpu-platform'] = self.min_cpu_platform
if self.min_nodes != self.num_nodes or self.max_nodes != self.num_nodes:
cmd.args.append('--enable-autoscaling')
cmd.flags['max-nodes'] = self.max_nodes
cmd.flags['min-nodes'] = self.min_nodes
cmd.flags['num-nodes'] = self.num_nodes
if self.machine_type is None:
cmd.flags['machine-type'] = 'custom-{0}-{1}'.format(
self.cpus, self.memory)
else:
cmd.flags['machine-type'] = self.machine_type
cmd.flags['metadata'] = util.MakeFormattedDefaultTags()
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
# This command needs a long timeout due to the many minutes it
# can take to provision a large GPU-accelerated GKE cluster.
_, stderr, retcode = cmd.Issue(
timeout=900, env=self._GetRequiredGkeEnv(), raise_on_failure=False)
if retcode != 0:
# Log specific type of failure, if known.
if 'ZONE_RESOURCE_POOL_EXHAUSTED' in stderr:
logging.exception('Container resources exhausted: %s', stderr)
raise errors.Benchmarks.InsufficientCapacityCloudFailure(
'Container resources exhausted in zone %s: %s' %
(self.zone, stderr))
raise errors.Resource.CreationError(stderr)
def _PostCreate(self):
"""Acquire cluster authentication."""
super(GkeCluster, self)._PostCreate()
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'get-credentials', self.name)
env = self._GetRequiredGkeEnv()
env['KUBECONFIG'] = FLAGS.kubeconfig
cmd.IssueRetryable(env=env)
self._AddTags()
if self.gpu_count:
kubernetes_helper.CreateFromFile(NVIDIA_DRIVER_SETUP_DAEMON_SET_SCRIPT)
kubernetes_helper.CreateFromFile(
data.ResourcePath(NVIDIA_UNRESTRICTED_PERMISSIONS_DAEMON_SET))
def _AddTags(self):
"""Tags all VMs in the cluster."""
vms_in_cluster = []
for instance_group in self._GetInstanceGroups():
vms_in_cluster.extend(self._GetInstancesFromInstanceGroup(instance_group))
for vm_name in vms_in_cluster:
cmd = util.GcloudCommand(self, 'compute', 'instances', 'add-metadata',
vm_name)
cmd.flags['metadata'] = util.MakeFormattedDefaultTags()
cmd.Issue()
cmd = util.GcloudCommand(self, 'compute', 'disks', 'add-labels', vm_name)
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
cmd.Issue()
def _GetInstanceGroups(self):
cmd = util.GcloudCommand(self, 'container', 'node-pools', 'list')
cmd.flags['cluster'] = self.name
stdout, _, _ = cmd.Issue()
json_output = json.loads(stdout)
instance_groups = []
for node_pool in json_output:
for group_url in node_pool['instanceGroupUrls']:
instance_groups.append(group_url.split('/')[-1]) # last url part
return instance_groups
def _GetInstancesFromInstanceGroup(self, instance_group_name):
cmd = util.GcloudCommand(self, 'compute', 'instance-groups',
'list-instances', instance_group_name)
stdout, _, _ = cmd.Issue()
json_output = json.loads(stdout)
instances = []
for instance in json_output:
instances.append(instance['instance'].split('/')[-1])
return instances
def _Delete(self):
"""Deletes the cluster."""
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'delete', self.name)
cmd.Issue(timeout=450, raise_on_failure=False)
def _Exists(self):
"""Returns True if the cluster exits."""
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'describe', self.name)
_, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)
return retcode == 0
Implement the _IsDeleting method for GCP container clusters to avoid timeout errors during deletion.
-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=270750768
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes/functions related to GKE (Google Container Engine)."""
import json
import logging
import os
from perfkitbenchmarker import container_service
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import kubernetes_helper
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gce_virtual_machine
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
FLAGS.kubernetes_anti_affinity = False
NVIDIA_DRIVER_SETUP_DAEMON_SET_SCRIPT = 'https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/k8s-1.9/nvidia-driver-installer/cos/daemonset-preloaded.yaml'
NVIDIA_UNRESTRICTED_PERMISSIONS_DAEMON_SET = 'nvidia_unrestricted_permissions_daemonset.yml'
DEFAULT_CONTAINER_VERSION = 'latest'
class GoogleContainerRegistry(container_service.BaseContainerRegistry):
"""Class for building and storing container images on GCP."""
CLOUD = providers.GCP
def __init__(self, registry_spec):
super(GoogleContainerRegistry, self).__init__(registry_spec)
self.project = self.project or util.GetDefaultProject()
def GetFullRegistryTag(self, image):
"""Gets the full tag of the image."""
region = util.GetMultiRegionFromRegion(util.GetRegionFromZone(self.zone))
hostname = '{region}.gcr.io'.format(region=region)
full_tag = '{hostname}/{project}/{name}'.format(
hostname=hostname, project=self.project, name=image)
return full_tag
def Login(self):
"""No-op because Push() handles its own auth."""
pass
def Push(self, image):
"""Push a locally built image to the registry."""
full_tag = self.GetFullRegistryTag(image.name)
tag_cmd = ['docker', 'tag', image.name, full_tag]
vm_util.IssueCommand(tag_cmd)
# vm_util.IssueCommand() is used here instead of util.GcloudCommand()
# because gcloud flags cannot be appended to the command since they
# are interpreted as docker args instead.
push_cmd = [
FLAGS.gcloud_path, '--project', self.project,
'docker', '--', 'push', full_tag
]
vm_util.IssueCommand(push_cmd)
def RemoteBuild(self, image):
"""Build the image remotely."""
full_tag = self.GetFullRegistryTag(image.name)
build_cmd = util.GcloudCommand(self, 'builds', 'submit',
'--tag', full_tag, image.directory)
del build_cmd.flags['zone']
build_cmd.Issue()
class GkeCluster(container_service.KubernetesCluster):
"""Class representing a Google Container Engine cluster."""
CLOUD = providers.GCP
def _GetRequiredGkeEnv(self):
env = os.environ.copy()
if self.use_application_default_credentials:
env['CLOUDSDK_CONTAINER_USE_APPLICATION_DEFAULT_CREDENTIALS'] = 'true'
return env
def __init__(self, spec):
super(GkeCluster, self).__init__(spec)
self.project = spec.vm_spec.project
self.min_cpu_platform = spec.vm_spec.min_cpu_platform
self.gce_accelerator_type_override = FLAGS.gce_accelerator_type_override
self.cluster_version = (FLAGS.container_cluster_version or
DEFAULT_CONTAINER_VERSION)
self.use_application_default_credentials = True
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the cluster.
Returns:
dict mapping string property key to value.
"""
result = super(GkeCluster, self).GetResourceMetadata()
if self.gce_accelerator_type_override:
result['accelerator_type_override'] = self.gce_accelerator_type_override
result['container_cluster_version'] = self.cluster_version
return result
def _Create(self):
"""Creates the cluster."""
if self.min_cpu_platform or self.gpu_count:
cmd = util.GcloudCommand(
self, 'beta', 'container', 'clusters', 'create', self.name)
else:
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'create', self.name)
cmd.flags['cluster-version'] = self.cluster_version
if FLAGS.gke_enable_alpha:
cmd.args.append('--enable-kubernetes-alpha')
cmd.args.append('--no-enable-autorepair')
cmd.args.append('--no-enable-autoupgrade')
user = util.GetDefaultUser()
if FLAGS.gcp_service_account:
cmd.flags['service-account'] = FLAGS.gcp_service_account
elif 'gserviceaccount.com' in user:
cmd.flags['service-account'] = user
self.use_application_default_credentials = False
else:
cmd.flags['scopes'] = 'cloud-platform'
if self.gpu_count:
cmd.flags['accelerator'] = (gce_virtual_machine.
GenerateAcceleratorSpecString(self.gpu_type,
self.gpu_count))
if self.min_cpu_platform:
cmd.flags['min-cpu-platform'] = self.min_cpu_platform
if self.min_nodes != self.num_nodes or self.max_nodes != self.num_nodes:
cmd.args.append('--enable-autoscaling')
cmd.flags['max-nodes'] = self.max_nodes
cmd.flags['min-nodes'] = self.min_nodes
cmd.flags['num-nodes'] = self.num_nodes
if self.machine_type is None:
cmd.flags['machine-type'] = 'custom-{0}-{1}'.format(
self.cpus, self.memory)
else:
cmd.flags['machine-type'] = self.machine_type
cmd.flags['metadata'] = util.MakeFormattedDefaultTags()
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
# This command needs a long timeout due to the many minutes it
# can take to provision a large GPU-accelerated GKE cluster.
_, stderr, retcode = cmd.Issue(
timeout=900, env=self._GetRequiredGkeEnv(), raise_on_failure=False)
if retcode != 0:
# Log specific type of failure, if known.
if 'ZONE_RESOURCE_POOL_EXHAUSTED' in stderr:
logging.exception('Container resources exhausted: %s', stderr)
raise errors.Benchmarks.InsufficientCapacityCloudFailure(
'Container resources exhausted in zone %s: %s' %
(self.zone, stderr))
raise errors.Resource.CreationError(stderr)
def _PostCreate(self):
"""Acquire cluster authentication."""
super(GkeCluster, self)._PostCreate()
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'get-credentials', self.name)
env = self._GetRequiredGkeEnv()
env['KUBECONFIG'] = FLAGS.kubeconfig
cmd.IssueRetryable(env=env)
self._AddTags()
if self.gpu_count:
kubernetes_helper.CreateFromFile(NVIDIA_DRIVER_SETUP_DAEMON_SET_SCRIPT)
kubernetes_helper.CreateFromFile(
data.ResourcePath(NVIDIA_UNRESTRICTED_PERMISSIONS_DAEMON_SET))
def _AddTags(self):
"""Tags all VMs in the cluster."""
vms_in_cluster = []
for instance_group in self._GetInstanceGroups():
vms_in_cluster.extend(self._GetInstancesFromInstanceGroup(instance_group))
for vm_name in vms_in_cluster:
cmd = util.GcloudCommand(self, 'compute', 'instances', 'add-metadata',
vm_name)
cmd.flags['metadata'] = util.MakeFormattedDefaultTags()
cmd.Issue()
cmd = util.GcloudCommand(self, 'compute', 'disks', 'add-labels', vm_name)
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
cmd.Issue()
def _GetInstanceGroups(self):
cmd = util.GcloudCommand(self, 'container', 'node-pools', 'list')
cmd.flags['cluster'] = self.name
stdout, _, _ = cmd.Issue()
json_output = json.loads(stdout)
instance_groups = []
for node_pool in json_output:
for group_url in node_pool['instanceGroupUrls']:
instance_groups.append(group_url.split('/')[-1]) # last url part
return instance_groups
def _GetInstancesFromInstanceGroup(self, instance_group_name):
cmd = util.GcloudCommand(self, 'compute', 'instance-groups',
'list-instances', instance_group_name)
stdout, _, _ = cmd.Issue()
json_output = json.loads(stdout)
instances = []
for instance in json_output:
instances.append(instance['instance'].split('/')[-1])
return instances
def _IsDeleting(self):
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'describe', self.name)
stdout, _, _ = cmd.Issue(raise_on_failure=False)
return True if stdout else False
def _Delete(self):
"""Deletes the cluster."""
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'delete', self.name)
cmd.args.append('--async')
cmd.Issue(raise_on_failure=False)
def _Exists(self):
"""Returns True if the cluster exits."""
cmd = util.GcloudCommand(
self, 'container', 'clusters', 'describe', self.name)
_, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)
return retcode == 0
|
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "c27b720c93f76662ab6d0e0e507d1fc66ab22119"
TFRT_SHA256 = "a60737a17bf074e4f3d6eeb97fa1a504021e74f8e5771eac602afc9d83ab6d07"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
Update TFRT dependency to use revision
http://github.com/tensorflow/runtime/commit/26e15bbcedf97a7509d969d6a909a6dbedc5e04b.
PiperOrigin-RevId: 486133126
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "26e15bbcedf97a7509d969d6a909a6dbedc5e04b"
TFRT_SHA256 = "f1a1daeb2d5641744f8e202fffdb0585a4005ad28d1ecdc0926dcd15249b8328"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import platform, re, os, sys, pkgutil, shutil
import mooseutils
from TestHarness import util
from TestHarness.StatusSystem import StatusSystem
from FactorySystem.MooseObject import MooseObject
from tempfile import SpooledTemporaryFile
import subprocess
from signal import SIGTERM
class Tester(MooseObject):
"""
Base class from which all tester objects are instanced.
"""
@staticmethod
def validParams():
params = MooseObject.validParams()
# Common Options
params.addRequiredParam('type', "The type of test of Tester to create for this test.")
params.addParam('max_time', 300, "The maximum in seconds that the test will be allowed to run.")
params.addParam('skip', "Provide a reason this test will be skipped.")
params.addParam('deleted', "Tests that only show up when using the '-e' option (Permanently skipped or not implemented).")
params.addParam('heavy', False, "Set to True if this test should only be run when the '--heavy' option is used.")
params.addParam('group', [], "A list of groups for which this test belongs.")
params.addParam('prereq', [], "A list of prereq tests that need to run successfully before launching this test.")
params.addParam('skip_checks', False, "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)")
params.addParam('scale_refine', 0, "The number of refinements to do when scaling")
params.addParam('success_message', 'OK', "The successful message")
params.addParam('cli_args', [], "Additional arguments to be passed to the test.")
params.addParam('allow_test_objects', False, "Allow the use of test objects by adding --allow-test-objects to the command line.")
params.addParam('valgrind', 'NONE', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")
params.addParam('tags', [], "A list of strings")
params.addParam('max_buffer_size', None, "Bytes allowed in stdout/stderr before it is subjected to being trimmed. Set to -1 to ignore output size restrictions. "
"If 'max_buffer_size' is not set, the default value of 'None' triggers a reasonable value (e.g. 100 kB)")
params.addParam('parallel_scheduling', False, "Allow all tests in test spec file to run in parallel (adheres to prereq rules).")
# Test Filters
params.addParam('platform', ['ALL'], "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')")
params.addParam('compiler', ['ALL'], "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')")
params.addParam('petsc_version', ['ALL'], "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
params.addParam('petsc_version_release', ['ALL'], "A test that runs against PETSc master if FALSE ('ALL', 'TRUE', 'FALSE')")
params.addParam('slepc_version', [], "A list of slepc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
params.addParam('mesh_mode', ['ALL'], "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')")
params.addParam('ad_mode', ['ALL'], "A list of AD modes for which this test will run ('SPARSE', 'NONSPARSE')")
params.addParam('method', ['ALL'], "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')")
params.addParam('library_mode', ['ALL'], "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')")
params.addParam('dtk', ['ALL'], "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_ids', ['ALL'], "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')")
params.addParam('recover', True, "A test that runs with '--recover' mode enabled")
params.addParam('vtk', ['ALL'], "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('tecplot', ['ALL'], "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('dof_id_bytes', ['ALL'], "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'")
params.addParam('petsc_debug', ['ALL'], "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs.")
params.addParam('curl', ['ALL'], "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('threading', ['ALL'], "A list of threading models ths tests runs with ('ALL', 'TBB', 'OPENMP', 'PTHREADS', 'NONE')")
params.addParam('superlu', ['ALL'], "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('chaco', ['ALL'], "A test that runs only if Chaco (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('parmetis', ['ALL'], "A test that runs only if Parmetis (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('party', ['ALL'], "A test that runs only if Party (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('ptscotch', ['ALL'], "A test that runs only if PTScotch (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('slepc', ['ALL'], "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_id', ['ALL'], "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')")
params.addParam('cxx11', ['ALL'], "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('asio', ['ALL'], "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('fparser_jit', ['ALL'], "A test that runs only if FParser JIT is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('libpng', ['ALL'], "A test that runs only if libpng is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('depend_files', [], "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory")
params.addParam('env_vars', [], "A test that only runs if all the environment variables listed exist")
params.addParam('should_execute', True, 'Whether or not the executable needs to be run. Use this to chain together multiple tests based off of one executeable invocation')
params.addParam('required_submodule', [], "A list of initialized submodules for which this test requires.")
params.addParam('required_objects', [], "A list of required objects that are in the executable.")
params.addParam('required_applications', [], "A list of required registered applications that are in the executable.")
params.addParam('check_input', False, "Check for correct input file syntax")
params.addParam('display_required', False, "The test requires and active display for rendering (i.e., ImageDiff tests).")
params.addParam('timing', True, "If True, the test will be allowed to run with the timing flag (i.e. Manually turning on performance logging).")
params.addParam('boost', ['ALL'], "A test that runs only if BOOST is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('python', None, "Restrict the test to s specific version of python (e.g., 3.6 or 3.7.1).")
params.addParam('required_python_packages', None, "Test will only run if the supplied python packages exist.")
params.addParam('requires', None, "A list of programs required for the test to operate, as tested with shutil.which.")
# SQA
params.addParam("requirement", None, "The SQA requirement that this test satisfies (e.g., 'The Marker system shall provide means to mark elements for refinement within a box region.')")
params.addParam("design", [], "The list of markdown files that contain the design(s) associated with this test (e.g., '/Markers/index.md /BoxMarker.md').")
params.addParam("issues", [], "The list of github issues associated with this test (e.g., '#1234 #4321')")
params.addParam("detail", None, "Details of SQA requirement for use within sub-blocks.")
params.addParam("validation", False, "Set to True to mark test as a validation problem.")
params.addParam("verification", False, "Set to True to mark test as a verification problem.")
params.addParam("deprecated", False, "When True the test is no longer considered part SQA process and as such does not include the need for a requirement definition.")
params.addParam("working_directory", None, "When set, TestHarness will enter this directory before running test")
return params
# This is what will be checked for when we look for valid testers
IS_TESTER = True
def __init__(self, name, params):
MooseObject.__init__(self, name, params)
self.specs = params
self.outfile = None
self.errfile = None
self.joined_out = ''
self.exit_code = 0
self.process = None
self.tags = params['tags']
self.__caveats = set([])
# Alternate text we want to print as part of our status instead of the
# pre-formatted status text (SYNTAX PASS instead of OK for example)
self.__tester_message = ''
# Bool if test can run
self._runnable = None
# Set up common paramaters
self.should_execute = self.specs['should_execute']
self.check_input = self.specs['check_input']
if self.specs["allow_test_objects"]:
self.specs["cli_args"].append("--allow-test-objects")
### Enumerate the tester statuses we want to use
self.test_status = StatusSystem()
self.no_status = self.test_status.no_status
self.queued = self.test_status.queued
self.skip = self.test_status.skip
self.silent = self.test_status.silent
self.success = self.test_status.success
self.fail = self.test_status.fail
self.diff = self.test_status.diff
self.deleted = self.test_status.deleted
self.__failed_statuses = [self.fail, self.diff, self.deleted]
self.__skipped_statuses = [self.skip, self.silent]
def getStatus(self):
return self.test_status.getStatus()
def setStatus(self, status, message=''):
self.__tester_message = message
return self.test_status.setStatus(status)
def createStatus(self):
return self.test_status.createStatus()
# Return a tuple (status, message, caveats) for this tester as found
# in the .previous_test_results.json file (or supplied json object)
def previousTesterStatus(self, options, previous_storage=None):
if not previous_storage:
previous_storage = options.results_storage
status_exists = previous_storage.get(self.getTestDir(), {}).get(self.getTestName(), None)
status = (self.test_status.createStatus(), '', '')
if status_exists:
status = (self.test_status.createStatus(str(status_exists['STATUS'])),
str(status_exists['STATUS_MESSAGE']),
status_exists['CAVEATS'])
return (status)
def getStatusMessage(self):
return self.__tester_message
# Return a boolean based on current status
def isNoStatus(self):
return self.getStatus() == self.no_status
def isSkip(self):
return self.getStatus() in self.__skipped_statuses
def isQueued(self):
return self.getStatus() == self.queued
def isSilent(self):
return self.getStatus() == self.silent
def isPass(self):
return self.getStatus() == self.success
def isFail(self):
return self.getStatus() in self.__failed_statuses
def isDiff(self):
return self.getStatus() == self.diff
def isDeleted(self):
return self.getStatus() == self.deleted
def getTestName(self):
""" return test name """
return self.specs['test_name']
def getPrereqs(self):
""" return list of prerequisite tests this test depends on """
return self.specs['prereq']
def getMooseDir(self):
""" return moose directory """
return self.specs['moose_dir']
def getTestDir(self):
""" return directory in which this tester is located """
if self.specs['working_directory']:
return os.path.join(self.specs['test_dir'], self.specs['working_directory'])
return self.specs['test_dir']
def getMinReportTime(self):
""" return minimum time elapse before reporting a 'long running' status """
return self.specs['min_reported_time']
def getMaxTime(self):
""" return maximum time elapse before reporting a 'timeout' status """
return float(self.specs['max_time'])
def getRunnable(self, options):
""" return bool and cache results, if this test can run """
if self._runnable is None:
self._runnable = self.checkRunnableBase(options)
return self._runnable
def getInputFile(self):
""" return the input file if applicable to this Tester """
return None
def getOutputFiles(self):
""" return the output files if applicable to this Tester """
return []
def getOutput(self):
""" Return the contents of stdout and stderr """
return self.joined_out
def getCheckInput(self):
return self.check_input
def setValgrindMode(self, mode):
""" Increase the alloted time for tests when running with the valgrind option """
if mode == 'NORMAL':
self.specs['max_time'] = float(self.specs['max_time']) * 2
elif mode == 'HEAVY':
self.specs['max_time'] = float(self.specs['max_time']) * 6
def checkRunnable(self, options):
"""
Derived method to return tuple if this tester should be executed or not.
The tuple should be structured as (boolean, 'reason'). If false, and the
reason is left blank, this tester will be treated as silent (no status
will be printed and will not be counted among the skipped tests).
"""
return True
def shouldExecute(self):
"""
return boolean for tester allowed to execute its command
see .getCommand for more information
"""
return self.should_execute
def prepare(self, options):
"""
Method which is called prior to running the test. It can be used to cleanup files
or do other preparations before the tester is run.
"""
return
def getThreads(self, options):
""" return number of threads to use for this tester """
return 1
def getProcs(self, options):
""" return number of processors to use for this tester """
return 1
def getSlots(self, options):
""" return number of slots to use for this tester """
return self.getThreads(options) * self.getProcs(options)
def getCommand(self, options):
""" return the executable command that will be executed by the tester """
return ''
def runCommand(self, cmd, cwd, timer, options):
"""
Helper method for running external (sub)processes as part of the tester's execution. This
uses the tester's getCommand and getTestDir methods to run a subprocess. The timer must
be the same timer passed to the run method. Results from running the subprocess is stored
in the tester's output and exit_code fields.
"""
cmd = self.getCommand(options)
cwd = self.getTestDir()
self.process = None
try:
f = SpooledTemporaryFile(max_size=1000000) # 1M character buffer
e = SpooledTemporaryFile(max_size=100000) # 100K character buffer
# On Windows, there is an issue with path translation when the command is passed in
# as a list.
if platform.system() == "Windows":
process = subprocess.Popen(cmd, stdout=f, stderr=e, close_fds=False,
shell=True, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, cwd=cwd)
else:
process = subprocess.Popen(cmd, stdout=f, stderr=e, close_fds=False,
shell=True, preexec_fn=os.setsid, cwd=cwd)
except:
print("Error in launching a new task", cmd)
raise
self.process = process
self.outfile = f
self.errfile = e
timer.start()
process.wait()
timer.stop()
self.exit_code = process.poll()
self.outfile.flush()
self.errfile.flush()
# store the contents of output, and close the file
self.joined_out = util.readOutput(self.outfile, self.errfile, self)
self.outfile.close()
self.errfile.close()
def killCommand(self):
"""
Kills any currently executing process started by the runCommand method.
"""
if self.process is not None:
try:
if platform.system() == "Windows":
from distutils import spawn
if spawn.find_executable("taskkill"):
subprocess.call(['taskkill', '/F', '/T', '/PID', str(self.process.pid)])
else:
self.process.terminate()
else:
pgid = os.getpgid(self.process.pid)
os.killpg(pgid, SIGTERM)
except OSError: # Process already terminated
pass
def run(self, timer, options):
"""
This is a method that is the tester's main execution code. Subclasses can override this
method with custom code relevant to their specific testing needs. By default this method
calls runCommand. runCommand is provided as a helper for running (external) subprocesses
as part of the tester's execution and should be the *only* way subprocesses are executed
if needed. The run method is responsible to call the start+stop methods on timer to record
the time taken to run the actual test. start+stop can be called multiple times.
"""
cmd = self.getCommand(options)
cwd = self.getTestDir()
self.runCommand(cmd, cwd, timer, options)
def processResultsCommand(self, moose_dir, options):
""" method to return the commands (list) used for processing results """
return []
def processResults(self, moose_dir, options, output):
""" method to process the results of a finished tester """
return
def hasRedirectedOutput(self, options):
""" return bool on tester having redirected output """
return (self.specs.isValid('redirect_output') and self.specs['redirect_output'] == True and self.getProcs(options) > 1)
def getRedirectedOutputFiles(self, options):
""" return a list of redirected output """
return [os.path.join(self.getTestDir(), self.name() + '.processor.{}'.format(p)) for p in range(self.getProcs(options))]
def addCaveats(self, *kwargs):
""" Add caveat(s) which will be displayed with the final test status """
for i in [x for x in kwargs if x]:
if type(i) == type([]):
self.__caveats.update(i)
else:
self.__caveats.add(i)
return self.getCaveats()
def getCaveats(self):
""" Return caveats accumalted by this tester """
return self.__caveats
def clearCaveats(self):
""" Clear any caveats stored in tester """
self.__caveats = set([])
return self.getCaveats()
def checkRunnableBase(self, options):
"""
Method to check for caveats that would prevent this tester from
executing correctly (or not at all).
DO NOT override this method. Instead, see .checkRunnable()
"""
reasons = {}
checks = options._checks
tag_match = False
for t in self.tags:
if t in options.runtags:
tag_match = True
break
if len(options.runtags) > 0 and not tag_match:
self.setStatus(self.silent)
return False
# If something has already deemed this test a failure
if self.isFail():
return False
# Check if we only want to run syntax tests
if options.check_input and not self.specs['check_input']:
self.setStatus(self.silent)
return False
# Check if we want to exclude syntax tests
if options.no_check_input and self.specs['check_input']:
self.setStatus(self.silent)
return False
# Are we running only tests in a specific group?
if options.group != 'ALL' and options.group not in self.specs['group']:
self.setStatus(self.silent)
return False
if options.not_group != '' and options.not_group in self.specs['group']:
self.setStatus(self.silent)
return False
# Store regexp for matching tests if --re is used
if options.reg_exp:
match_regexp = re.compile(options.reg_exp)
# If --re then only test matching regexp. Needs to run before other SKIP methods
# This also needs to be in its own bucket group. We normally print skipped messages.
# But we do not want to print tests that didn't match regex.
if options.reg_exp and not match_regexp.search(self.specs['test_name']):
self.setStatus(self.silent)
return False
# Short circuit method and run this test if we are ignoring all caveats
if options.ignored_caveats == 'all':
# Still, we should abide by the derived classes
return self.checkRunnable(options)
# Check for deleted tests
if self.specs.isValid('deleted'):
reasons['deleted'] = str(self.specs['deleted'])
# Skipped by external means (example: TestHarness part2 with --check-input)
if self.isSkip() and self.getStatusMessage():
reasons['skip'] = self.getStatusMessage()
# Test is skipped
elif self.specs.type('skip') is bool and self.specs['skip']:
# Backwards compatible (no reason)
reasons['skip'] = 'no reason'
elif self.specs.type('skip') is not bool and self.specs.isValid('skip'):
reasons['skip'] = self.specs['skip']
# If were testing for SCALE_REFINE, then only run tests with a SCALE_REFINE set
elif (options.scaling) and self.specs['scale_refine'] == 0:
self.setStatus(self.silent)
return False
# If we're testing with valgrind, then skip tests that require parallel or threads or don't meet the valgrind setting
elif options.valgrind_mode != '':
tmp_reason = ''
if self.specs['valgrind'].upper() == 'NONE':
tmp_reason = 'Valgrind==NONE'
elif self.specs['valgrind'].upper() == 'HEAVY' and options.valgrind_mode.upper() == 'NORMAL':
tmp_reason = 'Valgrind==HEAVY'
elif int(self.specs['min_threads']) > 1:
tmp_reason = 'Valgrind requires non-threaded'
elif self.specs["check_input"]:
tmp_reason = 'check_input==True'
if tmp_reason != '':
reasons['valgrind'] = tmp_reason
# If we're running in recover mode skip tests that have recover = false
elif options.enable_recover and self.specs['recover'] == False:
reasons['recover'] = 'NO RECOVER'
# Check for PETSc versions
(petsc_status, petsc_version) = util.checkPetscVersion(checks, self.specs)
if not petsc_status:
reasons['petsc_version'] = 'using PETSc ' + str(checks['petsc_version']) + ' REQ: ' + petsc_version
# Check for SLEPc versions
(slepc_status, slepc_version) = util.checkSlepcVersion(checks, self.specs)
if not slepc_status and len(self.specs['slepc_version']) != 0:
if slepc_version != None:
reasons['slepc_version'] = 'using SLEPc ' + str(checks['slepc_version']) + ' REQ: ' + slepc_version
elif slepc_version == None:
reasons['slepc_version'] = 'SLEPc is not installed'
# PETSc and SLEPc is being explicitly checked above
local_checks = ['platform', 'compiler', 'mesh_mode', 'ad_mode', 'method', 'library_mode', 'dtk', 'unique_ids', 'vtk', 'tecplot',
'petsc_debug', 'curl', 'superlu', 'cxx11', 'asio', 'unique_id', 'slepc', 'petsc_version_release', 'boost', 'fparser_jit',
'parmetis', 'chaco', 'party', 'ptscotch', 'threading', 'libpng']
for check in local_checks:
test_platforms = set()
operator_display = '!='
inverse_set = False
for x in self.specs[check]:
if x[0] == '!':
if inverse_set:
reasons[check] = 'Multiple Negation Unsupported'
inverse_set = True
operator_display = '=='
x = x[1:] # Strip off the !
x_upper = x.upper()
if x_upper in test_platforms:
reasons[x_upper] = 'Duplicate Entry or Negative of Existing Entry'
test_platforms.add(x.upper())
match_found = len(test_platforms.intersection(checks[check])) > 0
# Either we didn't find the match when we were using normal "include" logic
# or we did find the match when we wanted to exclude it
if inverse_set == match_found:
reasons[check] = re.sub(r'\[|\]', '', check).upper() + operator_display + ', '.join(test_platforms)
# Check for heavy tests
if options.all_tests or options.heavy_tests:
if not self.specs['heavy'] and options.heavy_tests:
reasons['heavy'] = 'NOT HEAVY'
elif self.specs['heavy']:
reasons['heavy'] = 'HEAVY'
# There should only be one entry in self.specs['dof_id_bytes']
for x in self.specs['dof_id_bytes']:
if x != 'ALL' and not x in checks['dof_id_bytes']:
reasons['dof_id_bytes'] = '--with-dof-id-bytes!=' + x
# Check to make sure depend files exist
for file in self.specs['depend_files']:
if not os.path.isfile(os.path.join(self.specs['base_dir'], file)):
reasons['depend_files'] = 'DEPEND FILES'
# We calculate the exe_objects only if we need them
if self.specs["required_objects"] and checks["exe_objects"] is None:
checks["exe_objects"] = util.getExeObjects(self.specs["executable"])
# Check to see if we have the required object names
for var in self.specs['required_objects']:
if var not in checks["exe_objects"]:
reasons['required_objects'] = '%s not found in executable' % var
break
# We extract the registered apps only if we need them
if self.specs["required_applications"] and checks["registered_apps"] is None:
checks["registered_apps"] = util.getExeRegisteredApps(self.specs["executable"])
# Check to see if we have the required application names
for var in self.specs['required_applications']:
if var not in checks["registered_apps"]:
reasons['required_applications'] = 'App %s not registered in executable' % var
break
# Check to make sure required submodules are initialized
for var in self.specs['required_submodule']:
if var not in checks["submodules"]:
reasons['required_submodule'] = '%s submodule not initialized' % var
# Check to make sure environment variable exists
for var in self.specs['env_vars']:
if not os.environ.get(var):
reasons['env_vars'] = 'ENV VAR NOT SET'
# Check for display
if self.specs['display_required'] and not os.getenv('DISPLAY', False):
reasons['display_required'] = 'NO DISPLAY'
# Check python version
py_version = self.specs['python']
if (py_version is not None):
if isinstance(py_version, int) and (py_version != sys.version_info[0]):
reasons['python'] = 'PYTHON != {}'.format(py_version)
elif isinstance(py_version, float) and (py_version != float('{}.{}'.format(*sys.version_info[0:2]))):
reasons['python'] = 'PYTHON != {}'.format(py_version)
elif isinstance(py_version, str):
ver = py_version.split('.')
if any(sys.version_info[i] != int(v) for i, v in enumerate(ver)):
reasons['python'] = 'PYTHON != {}'.format(py_version)
# Check python packages
py_packages = self.specs['required_python_packages']
if py_packages is not None:
missing = mooseutils.check_configuration(py_packages.split(), message=False)
if missing:
reasons['python_packages_required'] = ', '.join(['no {}'.format(p) for p in missing])
# Check for programs
programs = self.specs['requires']
if (programs is not None):
missing = []
for prog in programs.split():
if shutil.which(prog) is None:
missing.append(prog)
if missing:
reasons['requires'] = ', '.join(['no {}'.format(p) for p in missing])
# Verify working_directory is relative and available
if self.specs['working_directory']:
if self.specs['working_directory'][:1] == os.path.sep:
self.setStatus(self.fail, 'ABSOLUTE PATH DETECTED')
elif not os.path.exists(os.path.join(self.specs['test_dir'], self.specs['working_directory'])):
self.setStatus(self.fail, 'WORKING DIRECTORY NOT FOUND')
##### The below must be performed last to register all above caveats #####
# Remove any matching user supplied caveats from accumulated checkRunnable caveats that
# would normally produce a skipped test.
caveat_list = set()
if options.ignored_caveats:
caveat_list = set([x.lower() for x in options.ignored_caveats.split()])
if len(set(reasons.keys()) - caveat_list) > 0:
tmp_reason = []
for key, value in reasons.items():
if key.lower() not in caveat_list:
tmp_reason.append(value)
flat_reason = ', '.join(tmp_reason)
# If the test is deleted we still need to treat this differently
self.addCaveats(flat_reason)
if 'deleted' in reasons.keys():
if options.extra_info:
self.setStatus(self.deleted)
else:
self.setStatus(self.silent)
elif self.getStatus() == self.fail:
return False
else:
self.setStatus(self.skip)
return False
# Check the return values of the derived classes
self._runnable = self.checkRunnable(options)
return self._runnable
refactor a small bit of code
Refs #14962
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import platform, re, os, sys, pkgutil, shutil
import mooseutils
from TestHarness import util
from TestHarness.StatusSystem import StatusSystem
from FactorySystem.MooseObject import MooseObject
from tempfile import SpooledTemporaryFile
import subprocess
from signal import SIGTERM
class Tester(MooseObject):
"""
Base class from which all tester objects are instanced.
"""
@staticmethod
def validParams():
params = MooseObject.validParams()
# Common Options
params.addRequiredParam('type', "The type of test of Tester to create for this test.")
params.addParam('max_time', 300, "The maximum in seconds that the test will be allowed to run.")
params.addParam('skip', "Provide a reason this test will be skipped.")
params.addParam('deleted', "Tests that only show up when using the '-e' option (Permanently skipped or not implemented).")
params.addParam('heavy', False, "Set to True if this test should only be run when the '--heavy' option is used.")
params.addParam('group', [], "A list of groups for which this test belongs.")
params.addParam('prereq', [], "A list of prereq tests that need to run successfully before launching this test.")
params.addParam('skip_checks', False, "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)")
params.addParam('scale_refine', 0, "The number of refinements to do when scaling")
params.addParam('success_message', 'OK', "The successful message")
params.addParam('cli_args', [], "Additional arguments to be passed to the test.")
params.addParam('allow_test_objects', False, "Allow the use of test objects by adding --allow-test-objects to the command line.")
params.addParam('valgrind', 'NONE', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")
params.addParam('tags', [], "A list of strings")
params.addParam('max_buffer_size', None, "Bytes allowed in stdout/stderr before it is subjected to being trimmed. Set to -1 to ignore output size restrictions. "
"If 'max_buffer_size' is not set, the default value of 'None' triggers a reasonable value (e.g. 100 kB)")
params.addParam('parallel_scheduling', False, "Allow all tests in test spec file to run in parallel (adheres to prereq rules).")
# Test Filters
params.addParam('platform', ['ALL'], "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')")
params.addParam('compiler', ['ALL'], "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')")
params.addParam('petsc_version', ['ALL'], "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
params.addParam('petsc_version_release', ['ALL'], "A test that runs against PETSc master if FALSE ('ALL', 'TRUE', 'FALSE')")
params.addParam('slepc_version', [], "A list of slepc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
params.addParam('mesh_mode', ['ALL'], "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')")
params.addParam('ad_mode', ['ALL'], "A list of AD modes for which this test will run ('SPARSE', 'NONSPARSE')")
params.addParam('method', ['ALL'], "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')")
params.addParam('library_mode', ['ALL'], "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')")
params.addParam('dtk', ['ALL'], "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_ids', ['ALL'], "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')")
params.addParam('recover', True, "A test that runs with '--recover' mode enabled")
params.addParam('vtk', ['ALL'], "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('tecplot', ['ALL'], "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('dof_id_bytes', ['ALL'], "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'")
params.addParam('petsc_debug', ['ALL'], "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs.")
params.addParam('curl', ['ALL'], "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('threading', ['ALL'], "A list of threading models ths tests runs with ('ALL', 'TBB', 'OPENMP', 'PTHREADS', 'NONE')")
params.addParam('superlu', ['ALL'], "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('chaco', ['ALL'], "A test that runs only if Chaco (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('parmetis', ['ALL'], "A test that runs only if Parmetis (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('party', ['ALL'], "A test that runs only if Party (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('ptscotch', ['ALL'], "A test that runs only if PTScotch (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
params.addParam('slepc', ['ALL'], "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('unique_id', ['ALL'], "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')")
params.addParam('cxx11', ['ALL'], "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('asio', ['ALL'], "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('fparser_jit', ['ALL'], "A test that runs only if FParser JIT is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('libpng', ['ALL'], "A test that runs only if libpng is available ('ALL', 'TRUE', 'FALSE')")
params.addParam('depend_files', [], "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory")
params.addParam('env_vars', [], "A test that only runs if all the environment variables listed exist")
params.addParam('should_execute', True, 'Whether or not the executable needs to be run. Use this to chain together multiple tests based off of one executeable invocation')
params.addParam('required_submodule', [], "A list of initialized submodules for which this test requires.")
params.addParam('required_objects', [], "A list of required objects that are in the executable.")
params.addParam('required_applications', [], "A list of required registered applications that are in the executable.")
params.addParam('check_input', False, "Check for correct input file syntax")
params.addParam('display_required', False, "The test requires and active display for rendering (i.e., ImageDiff tests).")
params.addParam('timing', True, "If True, the test will be allowed to run with the timing flag (i.e. Manually turning on performance logging).")
params.addParam('boost', ['ALL'], "A test that runs only if BOOST is detected ('ALL', 'TRUE', 'FALSE')")
params.addParam('python', None, "Restrict the test to s specific version of python (e.g., 3.6 or 3.7.1).")
params.addParam('required_python_packages', None, "Test will only run if the supplied python packages exist.")
params.addParam('requires', None, "A list of programs required for the test to operate, as tested with shutil.which.")
# SQA
params.addParam("requirement", None, "The SQA requirement that this test satisfies (e.g., 'The Marker system shall provide means to mark elements for refinement within a box region.')")
params.addParam("design", [], "The list of markdown files that contain the design(s) associated with this test (e.g., '/Markers/index.md /BoxMarker.md').")
params.addParam("issues", [], "The list of github issues associated with this test (e.g., '#1234 #4321')")
params.addParam("detail", None, "Details of SQA requirement for use within sub-blocks.")
params.addParam("validation", False, "Set to True to mark test as a validation problem.")
params.addParam("verification", False, "Set to True to mark test as a verification problem.")
params.addParam("deprecated", False, "When True the test is no longer considered part SQA process and as such does not include the need for a requirement definition.")
params.addParam("working_directory", None, "When set, TestHarness will enter this directory before running test")
return params
# This is what will be checked for when we look for valid testers
IS_TESTER = True
def __init__(self, name, params):
MooseObject.__init__(self, name, params)
self.specs = params
self.outfile = None
self.errfile = None
self.joined_out = ''
self.exit_code = 0
self.process = None
self.tags = params['tags']
self.__caveats = set([])
# Alternate text we want to print as part of our status instead of the
# pre-formatted status text (SYNTAX PASS instead of OK for example)
self.__tester_message = ''
# Bool if test can run
self._runnable = None
# Set up common paramaters
self.should_execute = self.specs['should_execute']
self.check_input = self.specs['check_input']
if self.specs["allow_test_objects"]:
self.specs["cli_args"].append("--allow-test-objects")
### Enumerate the tester statuses we want to use
self.test_status = StatusSystem()
self.no_status = self.test_status.no_status
self.queued = self.test_status.queued
self.skip = self.test_status.skip
self.silent = self.test_status.silent
self.success = self.test_status.success
self.fail = self.test_status.fail
self.diff = self.test_status.diff
self.deleted = self.test_status.deleted
self.__failed_statuses = [self.fail, self.diff, self.deleted]
self.__skipped_statuses = [self.skip, self.silent]
def getStatus(self):
return self.test_status.getStatus()
def setStatus(self, status, message=''):
self.__tester_message = message
return self.test_status.setStatus(status)
def createStatus(self):
return self.test_status.createStatus()
# Return a tuple (status, message, caveats) for this tester as found
# in the .previous_test_results.json file (or supplied json object)
def previousTesterStatus(self, options, previous_storage=None):
if not previous_storage:
previous_storage = options.results_storage
status_exists = previous_storage.get(self.getTestDir(), {}).get(self.getTestName(), None)
status = (self.test_status.createStatus(), '', '')
if status_exists:
status = (self.test_status.createStatus(str(status_exists['STATUS'])),
str(status_exists['STATUS_MESSAGE']),
status_exists['CAVEATS'])
return (status)
def getStatusMessage(self):
return self.__tester_message
# Return a boolean based on current status
def isNoStatus(self):
return self.getStatus() == self.no_status
def isSkip(self):
return self.getStatus() in self.__skipped_statuses
def isQueued(self):
return self.getStatus() == self.queued
def isSilent(self):
return self.getStatus() == self.silent
def isPass(self):
return self.getStatus() == self.success
def isFail(self):
return self.getStatus() in self.__failed_statuses
def isDiff(self):
return self.getStatus() == self.diff
def isDeleted(self):
return self.getStatus() == self.deleted
def getTestName(self):
""" return test name """
return self.specs['test_name']
def getPrereqs(self):
""" return list of prerequisite tests this test depends on """
return self.specs['prereq']
def getMooseDir(self):
""" return moose directory """
return self.specs['moose_dir']
def getTestDir(self):
""" return directory in which this tester is located """
if self.specs['working_directory']:
return os.path.join(self.specs['test_dir'], self.specs['working_directory'])
return self.specs['test_dir']
def getMinReportTime(self):
""" return minimum time elapse before reporting a 'long running' status """
return self.specs['min_reported_time']
def getMaxTime(self):
""" return maximum time elapse before reporting a 'timeout' status """
return float(self.specs['max_time'])
def getRunnable(self, options):
""" return bool and cache results, if this test can run """
if self._runnable is None:
self._runnable = self.checkRunnableBase(options)
return self._runnable
def getInputFile(self):
""" return the input file if applicable to this Tester """
return None
def getOutputFiles(self):
""" return the output files if applicable to this Tester """
return []
def getOutput(self):
""" Return the contents of stdout and stderr """
return self.joined_out
def getCheckInput(self):
return self.check_input
def setValgrindMode(self, mode):
""" Increase the alloted time for tests when running with the valgrind option """
if mode == 'NORMAL':
self.specs['max_time'] = float(self.specs['max_time']) * 2
elif mode == 'HEAVY':
self.specs['max_time'] = float(self.specs['max_time']) * 6
def checkRunnable(self, options):
"""
Derived method to return tuple if this tester should be executed or not.
The tuple should be structured as (boolean, 'reason'). If false, and the
reason is left blank, this tester will be treated as silent (no status
will be printed and will not be counted among the skipped tests).
"""
return True
def shouldExecute(self):
"""
return boolean for tester allowed to execute its command
see .getCommand for more information
"""
return self.should_execute
def prepare(self, options):
"""
Method which is called prior to running the test. It can be used to cleanup files
or do other preparations before the tester is run.
"""
return
def getThreads(self, options):
""" return number of threads to use for this tester """
return 1
def getProcs(self, options):
""" return number of processors to use for this tester """
return 1
def getSlots(self, options):
""" return number of slots to use for this tester """
return self.getThreads(options) * self.getProcs(options)
def getCommand(self, options):
""" return the executable command that will be executed by the tester """
return ''
def runCommand(self, cmd, cwd, timer, options):
"""
Helper method for running external (sub)processes as part of the tester's execution. This
uses the tester's getCommand and getTestDir methods to run a subprocess. The timer must
be the same timer passed to the run method. Results from running the subprocess is stored
in the tester's output and exit_code fields.
"""
cmd = self.getCommand(options)
cwd = self.getTestDir()
self.process = None
try:
f = SpooledTemporaryFile(max_size=1000000) # 1M character buffer
e = SpooledTemporaryFile(max_size=100000) # 100K character buffer
# On Windows, there is an issue with path translation when the command is passed in
# as a list.
if platform.system() == "Windows":
process = subprocess.Popen(cmd, stdout=f, stderr=e, close_fds=False,
shell=True, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, cwd=cwd)
else:
process = subprocess.Popen(cmd, stdout=f, stderr=e, close_fds=False,
shell=True, preexec_fn=os.setsid, cwd=cwd)
except:
print("Error in launching a new task", cmd)
raise
self.process = process
self.outfile = f
self.errfile = e
timer.start()
process.wait()
timer.stop()
self.exit_code = process.poll()
self.outfile.flush()
self.errfile.flush()
# store the contents of output, and close the file
self.joined_out = util.readOutput(self.outfile, self.errfile, self)
self.outfile.close()
self.errfile.close()
def killCommand(self):
"""
Kills any currently executing process started by the runCommand method.
"""
if self.process is not None:
try:
if platform.system() == "Windows":
from distutils import spawn
if spawn.find_executable("taskkill"):
subprocess.call(['taskkill', '/F', '/T', '/PID', str(self.process.pid)])
else:
self.process.terminate()
else:
pgid = os.getpgid(self.process.pid)
os.killpg(pgid, SIGTERM)
except OSError: # Process already terminated
pass
def run(self, timer, options):
"""
This is a method that is the tester's main execution code. Subclasses can override this
method with custom code relevant to their specific testing needs. By default this method
calls runCommand. runCommand is provided as a helper for running (external) subprocesses
as part of the tester's execution and should be the *only* way subprocesses are executed
if needed. The run method is responsible to call the start+stop methods on timer to record
the time taken to run the actual test. start+stop can be called multiple times.
"""
cmd = self.getCommand(options)
cwd = self.getTestDir()
self.runCommand(cmd, cwd, timer, options)
def processResultsCommand(self, moose_dir, options):
""" method to return the commands (list) used for processing results """
return []
def processResults(self, moose_dir, options, output):
""" method to process the results of a finished tester """
return
def hasRedirectedOutput(self, options):
""" return bool on tester having redirected output """
return (self.specs.isValid('redirect_output') and self.specs['redirect_output'] == True and self.getProcs(options) > 1)
def getRedirectedOutputFiles(self, options):
""" return a list of redirected output """
return [os.path.join(self.getTestDir(), self.name() + '.processor.{}'.format(p)) for p in range(self.getProcs(options))]
def addCaveats(self, *kwargs):
""" Add caveat(s) which will be displayed with the final test status """
for i in [x for x in kwargs if x]:
if type(i) == type([]):
self.__caveats.update(i)
else:
self.__caveats.add(i)
return self.getCaveats()
def getCaveats(self):
""" Return caveats accumalted by this tester """
return self.__caveats
def clearCaveats(self):
""" Clear any caveats stored in tester """
self.__caveats = set([])
return self.getCaveats()
def checkRunnableBase(self, options):
"""
Method to check for caveats that would prevent this tester from
executing correctly (or not at all).
DO NOT override this method. Instead, see .checkRunnable()
"""
reasons = {}
checks = options._checks
tag_match = False
for t in self.tags:
if t in options.runtags:
tag_match = True
break
if len(options.runtags) > 0 and not tag_match:
self.setStatus(self.silent)
return False
# If something has already deemed this test a failure
if self.isFail():
return False
# Check if we only want to run syntax tests
if options.check_input and not self.specs['check_input']:
self.setStatus(self.silent)
return False
# Check if we want to exclude syntax tests
if options.no_check_input and self.specs['check_input']:
self.setStatus(self.silent)
return False
# Are we running only tests in a specific group?
if options.group != 'ALL' and options.group not in self.specs['group']:
self.setStatus(self.silent)
return False
if options.not_group != '' and options.not_group in self.specs['group']:
self.setStatus(self.silent)
return False
# Store regexp for matching tests if --re is used
if options.reg_exp:
match_regexp = re.compile(options.reg_exp)
# If --re then only test matching regexp. Needs to run before other SKIP methods
# This also needs to be in its own bucket group. We normally print skipped messages.
# But we do not want to print tests that didn't match regex.
if options.reg_exp and not match_regexp.search(self.specs['test_name']):
self.setStatus(self.silent)
return False
# Short circuit method and run this test if we are ignoring all caveats
if options.ignored_caveats == 'all':
# Still, we should abide by the derived classes
return self.checkRunnable(options)
# Check for deleted tests
if self.specs.isValid('deleted'):
reasons['deleted'] = str(self.specs['deleted'])
# Skipped by external means (example: TestHarness part2 with --check-input)
if self.isSkip() and self.getStatusMessage():
reasons['skip'] = self.getStatusMessage()
# Test is skipped
elif self.specs.type('skip') is bool and self.specs['skip']:
# Backwards compatible (no reason)
reasons['skip'] = 'no reason'
elif self.specs.type('skip') is not bool and self.specs.isValid('skip'):
reasons['skip'] = self.specs['skip']
# If were testing for SCALE_REFINE, then only run tests with a SCALE_REFINE set
elif (options.scaling) and self.specs['scale_refine'] == 0:
self.setStatus(self.silent)
return False
# If we're testing with valgrind, then skip tests that require parallel or threads or don't meet the valgrind setting
elif options.valgrind_mode != '':
tmp_reason = ''
if self.specs['valgrind'].upper() == 'NONE':
tmp_reason = 'Valgrind==NONE'
elif self.specs['valgrind'].upper() == 'HEAVY' and options.valgrind_mode.upper() == 'NORMAL':
tmp_reason = 'Valgrind==HEAVY'
elif int(self.specs['min_threads']) > 1:
tmp_reason = 'Valgrind requires non-threaded'
elif self.specs["check_input"]:
tmp_reason = 'check_input==True'
if tmp_reason != '':
reasons['valgrind'] = tmp_reason
# If we're running in recover mode skip tests that have recover = false
elif options.enable_recover and self.specs['recover'] == False:
reasons['recover'] = 'NO RECOVER'
# Check for PETSc versions
(petsc_status, petsc_version) = util.checkPetscVersion(checks, self.specs)
if not petsc_status:
reasons['petsc_version'] = 'using PETSc ' + str(checks['petsc_version']) + ' REQ: ' + petsc_version
# Check for SLEPc versions
(slepc_status, slepc_version) = util.checkSlepcVersion(checks, self.specs)
if not slepc_status and len(self.specs['slepc_version']) != 0:
if slepc_version != None:
reasons['slepc_version'] = 'using SLEPc ' + str(checks['slepc_version']) + ' REQ: ' + slepc_version
elif slepc_version == None:
reasons['slepc_version'] = 'SLEPc is not installed'
# PETSc and SLEPc is being explicitly checked above
local_checks = ['platform', 'compiler', 'mesh_mode', 'ad_mode', 'method', 'library_mode', 'dtk', 'unique_ids', 'vtk', 'tecplot',
'petsc_debug', 'curl', 'superlu', 'cxx11', 'asio', 'unique_id', 'slepc', 'petsc_version_release', 'boost', 'fparser_jit',
'parmetis', 'chaco', 'party', 'ptscotch', 'threading', 'libpng']
for check in local_checks:
test_platforms = set()
operator_display = '!='
inverse_set = False
for x in self.specs[check]:
if x[0] == '!':
if inverse_set:
reasons[check] = 'Multiple Negation Unsupported'
inverse_set = True
operator_display = '=='
x = x[1:] # Strip off the !
x_upper = x.upper()
if x_upper in test_platforms:
reasons[x_upper] = 'Duplicate Entry or Negative of Existing Entry'
test_platforms.add(x.upper())
match_found = len(test_platforms.intersection(checks[check])) > 0
# Either we didn't find the match when we were using normal "include" logic
# or we did find the match when we wanted to exclude it
if inverse_set == match_found:
reasons[check] = re.sub(r'\[|\]', '', check).upper() + operator_display + ', '.join(test_platforms)
# Check for heavy tests
if options.all_tests or options.heavy_tests:
if not self.specs['heavy'] and options.heavy_tests:
reasons['heavy'] = 'NOT HEAVY'
elif self.specs['heavy']:
reasons['heavy'] = 'HEAVY'
# There should only be one entry in self.specs['dof_id_bytes']
for x in self.specs['dof_id_bytes']:
if x != 'ALL' and not x in checks['dof_id_bytes']:
reasons['dof_id_bytes'] = '--with-dof-id-bytes!=' + x
# Check to make sure depend files exist
for file in self.specs['depend_files']:
if not os.path.isfile(os.path.join(self.specs['base_dir'], file)):
reasons['depend_files'] = 'DEPEND FILES'
# We calculate the exe_objects only if we need them
if self.specs["required_objects"] and checks["exe_objects"] is None:
checks["exe_objects"] = util.getExeObjects(self.specs["executable"])
# Check to see if we have the required object names
for var in self.specs['required_objects']:
if var not in checks["exe_objects"]:
reasons['required_objects'] = '%s not found in executable' % var
break
# We extract the registered apps only if we need them
if self.specs["required_applications"] and checks["registered_apps"] is None:
checks["registered_apps"] = util.getExeRegisteredApps(self.specs["executable"])
# Check to see if we have the required application names
for var in self.specs['required_applications']:
if var not in checks["registered_apps"]:
reasons['required_applications'] = 'App %s not registered in executable' % var
break
# Check to make sure required submodules are initialized
for var in self.specs['required_submodule']:
if var not in checks["submodules"]:
reasons['required_submodule'] = '%s submodule not initialized' % var
# Check to make sure environment variable exists
for var in self.specs['env_vars']:
if not os.environ.get(var):
reasons['env_vars'] = 'ENV VAR NOT SET'
# Check for display
if self.specs['display_required'] and not os.getenv('DISPLAY', False):
reasons['display_required'] = 'NO DISPLAY'
# Check python version
py_version = self.specs['python']
if (py_version is not None):
if isinstance(py_version, int) and (py_version != sys.version_info[0]):
reasons['python'] = 'PYTHON != {}'.format(py_version)
elif isinstance(py_version, float) and (py_version != float('{}.{}'.format(*sys.version_info[0:2]))):
reasons['python'] = 'PYTHON != {}'.format(py_version)
elif isinstance(py_version, str):
ver = py_version.split('.')
if any(sys.version_info[i] != int(v) for i, v in enumerate(ver)):
reasons['python'] = 'PYTHON != {}'.format(py_version)
# Check python packages
py_packages = self.specs['required_python_packages']
if py_packages is not None:
missing = mooseutils.check_configuration(py_packages.split(), message=False)
if missing:
reasons['python_packages_required'] = ', '.join(['no {}'.format(p) for p in missing])
# Check for programs
programs = self.specs['requires']
if (programs is not None):
missing = []
for prog in programs.split():
if shutil.which(prog) is None:
missing.append(prog)
if missing:
reasons['requires'] = ', '.join(['no {}'.format(p) for p in missing])
# Verify working_directory is relative and available
if self.specs['working_directory']:
if self.specs['working_directory'][:1] == os.path.sep:
self.setStatus(self.fail, 'ABSOLUTE PATH DETECTED')
elif not os.path.exists(self.getTestDir()):
self.setStatus(self.fail, 'WORKING DIRECTORY NOT FOUND')
##### The below must be performed last to register all above caveats #####
# Remove any matching user supplied caveats from accumulated checkRunnable caveats that
# would normally produce a skipped test.
caveat_list = set()
if options.ignored_caveats:
caveat_list = set([x.lower() for x in options.ignored_caveats.split()])
if len(set(reasons.keys()) - caveat_list) > 0:
tmp_reason = []
for key, value in reasons.items():
if key.lower() not in caveat_list:
tmp_reason.append(value)
flat_reason = ', '.join(tmp_reason)
# If the test is deleted we still need to treat this differently
self.addCaveats(flat_reason)
if 'deleted' in reasons.keys():
if options.extra_info:
self.setStatus(self.deleted)
else:
self.setStatus(self.silent)
elif self.getStatus() == self.fail:
return False
else:
self.setStatus(self.skip)
return False
# Check the return values of the derived classes
self._runnable = self.checkRunnable(options)
return self._runnable
|
from __future__ import print_function
from facepy import GraphAPI
import facepy
import re
import json
from frontend import write_html
from dateutil.parser import parse
# You need to have the Access Token is stored in a plain text file ACCESS_TOKEN
# to get an access token follow this SO answer: http://stackoverflow.com/a/16054555/1780891
with open('./ACCESS_TOKEN', 'r') as file_handle:
access_token = file_handle.readline().rstrip('\n')
graph = GraphAPI(access_token)
def get_comments(post_id):
base_query = post_id + '/comments'
# scrape the first page
print('scraping:', base_query)
comments = graph.get(base_query)
data = comments['data']
return data
def get_picture(post_id, dir="."):
base_query = post_id + '?fields=object_id'
try:
pic_id = graph.get(base_query)['object_id']
except KeyError:
return None
try:
pic = graph.get('{}/picture'.format(pic_id))
f_name = "{}/{}.png".format(dir, pic_id)
f_handle = open(f_name, "wb")
f_handle.write(pic)
f_handle.close()
return "{}.png".format(pic_id)
except facepy.FacebookError:
return None
def get_feed(page_id, pages=1):
base_query = page_id + '/feed?limit=10'
# scrape the first page
print('scraping:', base_query)
feed = graph.get(base_query)
data = feed['data']
total_scraped = 0
# determine the next page
next = feed['paging']['next']
next_search = re.search('.*(\&until=[0-9]+)', next, re.IGNORECASE)
if next_search:
the_until_arg = next_search.group(1)
pages = pages - 1
# scrape the rest of the pages
while (next is not False) and pages > 0:
the_query = base_query + the_until_arg
print('baking:', the_query)
try:
feed = graph.get(the_query)
data.append(feed['data'])
except facepy.exceptions.OAuthError:
print('start again at', the_query)
break
# determine the next page, until there isn't one
try:
next = feed['paging']['next']
next_search = re.search('.*(\&until=[0-9]+)', next, re.IGNORECASE)
if next_search:
the_until_arg = next_search.group(1)
except IndexError:
print('last page...')
next = False
total_scraped = total_scraped + 100
print(total_scraped, 'pies in the face so far')
pages = pages - 1
return data
def get_aggregated_feed(pages):
"""
Aggregates feeds give a list of pages and their ids.
Input: A list of tuples
Output: Combined list of posts sorted by timestamp
"""
data = list()
for page_name, _id in pages:
page_data = get_feed(_id)
for data_dict in page_data:
data_dict['source'] = page_name
data_dict['pic'] = get_picture(data_dict['id'], dir='output')
data.extend(page_data)
data.sort(key=lambda x: parse(x['created_time']), reverse=True)
return data
if __name__ == "__main__":
# Great thanks to https://gist.github.com/abelsonlive/4212647
news_pages = [('The Scholar\'s Avenue', 'scholarsavenue'),
('Awaaz IIT Kharagpur', 'awaaziitkgp'),
# ('Technology Students Gymkhana', 'TSG.IITKharagpur'),
('Technology IIT KGP', 'iitkgp.tech')]
for_later = ['Cultural-IIT-Kharagpur']
data = get_aggregated_feed(news_pages)
json.dump(data, open('output/feed.json', 'w'))
write_html(data, 'output/feed.html')
Add TSG and output
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>
from __future__ import print_function
from facepy import GraphAPI
import facepy
import re
import json
from frontend import write_html
from dateutil.parser import parse
# You need to have the Access Token is stored in a plain text file ACCESS_TOKEN
# to get an access token follow this SO answer: http://stackoverflow.com/a/16054555/1780891
with open('./ACCESS_TOKEN', 'r') as file_handle:
access_token = file_handle.readline().rstrip('\n')
graph = GraphAPI(access_token)
def get_comments(post_id):
base_query = post_id + '/comments'
# scrape the first page
print('scraping:', base_query)
comments = graph.get(base_query)
data = comments['data']
return data
def get_picture(post_id, dir="."):
base_query = post_id + '?fields=object_id'
try:
pic_id = graph.get(base_query)['object_id']
except KeyError:
return None
try:
pic = graph.get('{}/picture'.format(pic_id))
f_name = "{}/{}.png".format(dir, pic_id)
f_handle = open(f_name, "wb")
f_handle.write(pic)
f_handle.close()
return "{}.png".format(pic_id)
except facepy.FacebookError:
return None
def get_feed(page_id, pages=1):
base_query = page_id + '/feed?limit=10'
# scrape the first page
print('scraping:', base_query)
feed = graph.get(base_query)
data = feed['data']
total_scraped = 0
# determine the next page
next = feed['paging']['next']
next_search = re.search('.*(\&until=[0-9]+)', next, re.IGNORECASE)
if next_search:
the_until_arg = next_search.group(1)
pages = pages - 1
# scrape the rest of the pages
while (next is not False) and pages > 0:
the_query = base_query + the_until_arg
print('baking:', the_query)
try:
feed = graph.get(the_query)
data.append(feed['data'])
except facepy.exceptions.OAuthError:
print('start again at', the_query)
break
# determine the next page, until there isn't one
try:
next = feed['paging']['next']
next_search = re.search('.*(\&until=[0-9]+)', next, re.IGNORECASE)
if next_search:
the_until_arg = next_search.group(1)
except IndexError:
print('last page...')
next = False
total_scraped = total_scraped + 100
print(total_scraped, 'pies in the face so far')
pages = pages - 1
return data
def get_aggregated_feed(pages):
"""
Aggregates feeds give a list of pages and their ids.
Input: A list of tuples
Output: Combined list of posts sorted by timestamp
"""
data = list()
for page_name, _id in pages:
page_data = get_feed(_id)
for data_dict in page_data:
data_dict['source'] = page_name
data_dict['pic'] = get_picture(data_dict['id'], dir='output')
data.extend(page_data)
data.sort(key=lambda x: parse(x['created_time']), reverse=True)
return data
if __name__ == "__main__":
# Great thanks to https://gist.github.com/abelsonlive/4212647
news_pages = [('The Scholar\'s Avenue', 'scholarsavenue'),
('Awaaz IIT Kharagpur', 'awaaziitkgp'),
('Technology Students Gymkhana', 'TSG.IITKharagpur'),
('Technology IIT KGP', 'iitkgp.tech')]
for_later = ['Cultural-IIT-Kharagpur']
data = get_aggregated_feed(news_pages)
json.dump(data, open('output/feed.json', 'w'))
write_html(data, 'output/index.html')
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.tools.float_utils import float_round
from odoo.addons import decimal_precision as dp
import logging
_logger = logging.getLogger(__name__)
class ProductTemplate(models.Model):
_inherit = 'product.template'
list_price = fields.Float(
compute='_compute_net_price',
# company_dependent=True,
store=True,
)
lst_price_brut = fields.Float(
string='Gross selling price',
digits=dp.get_precision('Product Price'),
# company_dependent=True,
)
lst_price_net = fields.Float(
string='Net selling price',
digits=dp.get_precision('Product Price'),
# company_dependent=True,
)
brut_net_factor = fields.Float(
string='Gross/Net Ratio',
compute='_compute_net_price',
store=True, readonly=True,
default=1
)
def _get_brut_net_factor(self, price):
product = self.product_variant_ids
if len(self.product_variant_ids) > 1:
product = self.product_variant_ids[0]
company = self.env.user.company_id
taxes = self.taxes_id.filtered(lambda x: x.company_id == company).with_context(round=False).compute_all(price, self.currency_id, 1, product=product, partner=self.env.user.company_id.partner_id)
brut_net_factor = 1
if taxes['total_excluded'] > 0:
brut_net_factor = taxes['total_included'] / taxes['total_excluded']
_logger.debug('%s / %s = %s', taxes['total_included'], taxes['total_excluded'], brut_net_factor)
return brut_net_factor
@api.model
def create(self, vals):
if 'lst_price_net' in vals:
vals['list_price'] = float(vals.get('lst_price_net'))
template = super(ProductTemplate, self).create(vals)
if 'lst_price_brut' in vals:
vals['lst_price_net'] = template._get_net_price(float(vals.get('lst_price_brut')))
if 'list_price' in vals:
vals.update({
'lst_price_brut': template._get_brut_price(float(vals.get('list_price'))),
'lst_price_net': float(vals.get('list_price'))
})
del vals['message_follower_ids']
template.write(vals)
return template
@api.multi
def write(self, vals):
if 'lst_price_net' in vals:
vals['list_price'] = float(vals.get('lst_price_net'))
for template in self:
if 'lst_price_brut' in vals:
vals['lst_price_net'] = template._get_net_price(float(vals.get('lst_price_brut')))
if 'list_price' in vals:
vals.update({
'lst_price_brut': template._get_brut_price(float(vals.get('list_price'))),
'lst_price_net': float(vals.get('list_price'))
})
break
return super(ProductTemplate, self).write(vals)
def _get_brut_price(self, net_price):
self.ensure_one()
prec = self.env['decimal.precision'].precision_get('Product Price') + 1
brut_net_factor = self._get_brut_net_factor(net_price)
return float_round(net_price * brut_net_factor, prec)
def _get_net_price(self, brut_price):
self.ensure_one()
prec = self.env['decimal.precision'].precision_get('Product Price') + 1
brut_net_factor = self._get_brut_net_factor(brut_price)
return float_round(brut_price / brut_net_factor, prec)
@api.onchange('lst_price_net')
def _compute_brut_price(self):
for template in self:
brut_net_factor = template._get_brut_net_factor(template.lst_price_net)
prec = self.env['decimal.precision'].precision_get('Product Price') + 1
template.update({
'list_price': float_round(template.lst_price_net, prec),
'brut_net_factor': brut_net_factor,
'lst_price_brut': template._get_brut_price(self.lst_price_net)
})
@api.depends('lst_price_brut', 'taxes_id')
def _compute_net_price(self):
"""
The gross price will always have the last word, so even if you set a net price directly
it might happen it will change to a calculated price based on gross price and the calculated ratio
"""
for template in self:
brut_net_factor = template._get_brut_net_factor(template.lst_price_brut)
list_price = template._get_net_price(template.lst_price_brut)
template.update({
'list_price': list_price,
'brut_net_factor': brut_net_factor,
'lst_price_net': list_price
})
def _get_db_list_price(self):
self.env.cr.execute("SELECT list_price FROM product_template WHERE id = %s", (self.id,))
db_template = self.env.cr.dictfetchall()[0]
return db_template['list_price']
@api.multi
def convert_list_brut_price(self):
for template in self:
if template.lst_price_brut == 0:
template.lst_price_brut = template._get_db_list_price()
template._compute_net_price()
@api.multi
def convert_list_net_price(self):
for template in self:
if template.lst_price_net == 0:
template.lst_price_net = template._get_db_list_price()
template._compute_brut_price()
[FIX] account_product_gross_net: only delete existing attributes
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.tools.float_utils import float_round
from odoo.addons import decimal_precision as dp
import logging
_logger = logging.getLogger(__name__)
class ProductTemplate(models.Model):
_inherit = 'product.template'
list_price = fields.Float(
compute='_compute_net_price',
# company_dependent=True,
store=True,
)
lst_price_brut = fields.Float(
string='Gross selling price',
digits=dp.get_precision('Product Price'),
# company_dependent=True,
)
lst_price_net = fields.Float(
string='Net selling price',
digits=dp.get_precision('Product Price'),
# company_dependent=True,
)
brut_net_factor = fields.Float(
string='Gross/Net Ratio',
compute='_compute_net_price',
store=True, readonly=True,
default=1
)
def _get_brut_net_factor(self, price):
product = self.product_variant_ids
if len(self.product_variant_ids) > 1:
product = self.product_variant_ids[0]
company = self.env.user.company_id
taxes = self.taxes_id.filtered(lambda x: x.company_id == company).with_context(round=False).compute_all(price, self.currency_id, 1, product=product, partner=self.env.user.company_id.partner_id)
brut_net_factor = 1
if taxes['total_excluded'] > 0:
brut_net_factor = taxes['total_included'] / taxes['total_excluded']
_logger.debug('%s / %s = %s', taxes['total_included'], taxes['total_excluded'], brut_net_factor)
return brut_net_factor
@api.model
def create(self, vals):
if 'lst_price_net' in vals:
vals['list_price'] = float(vals.get('lst_price_net'))
template = super(ProductTemplate, self).create(vals)
if 'lst_price_brut' in vals:
vals['lst_price_net'] = template._get_net_price(float(vals.get('lst_price_brut')))
if 'list_price' in vals:
vals.update({
'lst_price_brut': template._get_brut_price(float(vals.get('list_price'))),
'lst_price_net': float(vals.get('list_price'))
})
if 'message_follower_ids' in vals:
del vals['message_follower_ids']
template.write(vals)
return template
@api.multi
def write(self, vals):
if 'lst_price_net' in vals:
vals['list_price'] = float(vals.get('lst_price_net'))
for template in self:
if 'lst_price_brut' in vals:
vals['lst_price_net'] = template._get_net_price(float(vals.get('lst_price_brut')))
if 'list_price' in vals:
vals.update({
'lst_price_brut': template._get_brut_price(float(vals.get('list_price'))),
'lst_price_net': float(vals.get('list_price'))
})
break
return super(ProductTemplate, self).write(vals)
def _get_brut_price(self, net_price):
self.ensure_one()
prec = self.env['decimal.precision'].precision_get('Product Price') + 1
brut_net_factor = self._get_brut_net_factor(net_price)
return float_round(net_price * brut_net_factor, prec)
def _get_net_price(self, brut_price):
self.ensure_one()
prec = self.env['decimal.precision'].precision_get('Product Price') + 1
brut_net_factor = self._get_brut_net_factor(brut_price)
return float_round(brut_price / brut_net_factor, prec)
@api.onchange('lst_price_net')
def _compute_brut_price(self):
for template in self:
brut_net_factor = template._get_brut_net_factor(template.lst_price_net)
prec = self.env['decimal.precision'].precision_get('Product Price') + 1
template.update({
'list_price': float_round(template.lst_price_net, prec),
'brut_net_factor': brut_net_factor,
'lst_price_brut': template._get_brut_price(self.lst_price_net)
})
@api.depends('lst_price_brut', 'taxes_id')
def _compute_net_price(self):
"""
The gross price will always have the last word, so even if you set a net price directly
it might happen it will change to a calculated price based on gross price and the calculated ratio
"""
for template in self:
brut_net_factor = template._get_brut_net_factor(template.lst_price_brut)
list_price = template._get_net_price(template.lst_price_brut)
template.update({
'list_price': list_price,
'brut_net_factor': brut_net_factor,
'lst_price_net': list_price
})
def _get_db_list_price(self):
self.env.cr.execute("SELECT list_price FROM product_template WHERE id = %s", (self.id,))
db_template = self.env.cr.dictfetchall()[0]
return db_template['list_price']
@api.multi
def convert_list_brut_price(self):
for template in self:
if template.lst_price_brut == 0:
template.lst_price_brut = template._get_db_list_price()
template._compute_net_price()
@api.multi
def convert_list_net_price(self):
for template in self:
if template.lst_price_net == 0:
template.lst_price_net = template._get_db_list_price()
template._compute_brut_price()
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2014, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
file_store : file-system based data store & locks.
'''
import os
from os import path, mkdir
from os.path import dirname, exists
import errno
import tempfile
import shutil
from .base import base_store
from jug.backends.encode import encode_to, decode_from
def create_directories(dname):
'''
create_directories(dname)
Recursively create directories.
'''
if dname.endswith('/'): dname = dname[:-1]
head, tail = path.split(dname)
if path.exists(dname): return
if head: create_directories(head)
try:
mkdir(dname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class file_store(base_store):
def __init__(self, dname):
'''
file_store(dname)
Recursively create directories.
'''
if dname.endswith('/'): dname = dname[:-1]
self.jugdir = dname
def __repr__(self):
return 'file_store({})'.format(self.jugdir)
__str__ = __repr__
def create(self):
'''
Recursively create directories.
'''
create_directories(self.jugdir)
create_directories(self.tempdir())
def _maybe_create(self):
'''
Calls self.create() the first time it is called; then becomes a no-op.
'''
self.create()
self._maybe_create = (lambda : None)
def tempdir(self):
return path.join(self.jugdir, 'tempfiles')
def _getfname(self, name):
import six
if type(name) != six.text_type:
name = six.text_type(name, 'utf-8')
return path.join(self.jugdir, name[:2], name[2:])
def dump(self, object, name):
'''
store.dump(object, name)
Performs the same as
pickle.dump(object, open(name,'w'))
but does it in a way that is guaranteed to be atomic even over NFS.
'''
name = self._getfname(name)
create_directories(dirname(name))
self._maybe_create()
fd, fname = tempfile.mkstemp('.jugtmp', 'jugtemp', self.tempdir())
output = os.fdopen(fd, 'wb')
try:
import numpy as np
if type(object) == np.ndarray:
np.lib.format.write_array(output, object)
output.flush()
os.fsync(output.fileno())
output.close()
os.rename(fname, name)
return
except ImportError:
pass
except OSError:
pass
except ValueError:
pass
encode_to(object, output)
output.flush()
os.fsync(output.fileno())
output.close()
# Rename is atomic even over NFS.
os.rename(fname, name)
def list(self):
'''
keys = store.list()
Returns a list of all the keys in the store
'''
if not exists(self.jugdir):
return []
keys = []
for d in os.listdir(self.jugdir):
if len(d) == 2:
for f in os.listdir(path.join(self.jugdir, d)):
keys.append((d+f).encode('ascii'))
return keys
def listlocks(self):
'''
keys = store.listlocks()
Returns a list of all the locks in the store
This is an unsafe function as the results may be outdated by the time
the function returns.
'''
if not exists(path.join(self.jugdir, 'locks')):
return []
keys = []
for k in os.listdir(path.join(self.jugdir, 'locks')):
keys.append(k[:-len('.lock')].encode('ascii'))
return keys
def can_load(self, name):
'''
can = store.can_load(name)
'''
fname = self._getfname(name)
return exists(fname)
def load(self, name):
'''
obj = store.load(name)
Loads the objects. Equivalent to pickle.load(), but a bit smarter at
times.
Parameters
----------
name : str
Key to use
Returns
-------
obj : any
The object that was saved under ``name``
'''
fname = self._getfname(name)
input = open(fname, 'rb')
try:
import numpy as np
return np.lib.format.read_array(input)
except ValueError:
input.seek(0)
except ImportError:
pass
return decode_from(input)
def remove(self, name):
'''
was_removed = store.remove(name)
Remove the entry associated with name.
Returns whether any entry was actually removed.
'''
try:
fname = self._getfname(name)
os.unlink(fname)
return True
except OSError:
return False
def cleanup(self, active):
'''
nr_removed = store.cleanup(active)
Implement 'cleanup' command
Parameters
----------
active : sequence
files *not to remove*
Returns
-------
nr_removed : integer
number of removed files
'''
active = frozenset(self._getfname(t.hash()) for t in active)
removed = 0
for dir,_,fs in os.walk(self.jugdir):
for f in fs:
f = path.join(dir, f)
if f not in active:
os.unlink(f)
removed += 1
return removed
def remove_locks(self):
'''
removed = store.remove_locks()
Remove all locks
Returns
-------
removed : int
Number of locks removed
'''
lockdir = path.join(self.jugdir, 'locks')
if not exists(lockdir): return 0
removed = 0
for f in os.listdir(lockdir):
os.unlink(path.join(lockdir, f))
removed += 1
return removed
def getlock(self, name):
'''
lock = store.getlock(name)
Retrieve a lock object associated with ``name``.
Parameters
----------
name : str
Key
Returns
-------
lock : Lock object
This is a file_lock object
'''
self._maybe_create()
return file_based_lock(self.jugdir, name)
def close(self):
'''
store.close()
Has no effect on file based stores.
'''
pass
def metadata(self, t):
'''
meta = store.metadata(t)
Retrieves information on the state of the computation
Parameters
----------
t : Task
A Task object
Returns
-------
meta : dict
Dictionary describing the state of the computation
'''
from os import stat, path
from time import ctime
fname = self._getfname(t.hash())
if path.exists(fname):
st = stat(fname)
return {
'computed': True,
'completed': ctime(st.st_mtime),
}
return {
'computed': False
}
@staticmethod
def remove_store(jugdir):
'''
file_store.remove_store(jugdir)
Removes from disk all the files associated with this jugdir.
'''
shutil.rmtree(jugdir)
class file_based_lock(object):
'''
file_based_lock: File-system based locks
Functions:
----------
- get(): acquire the lock
- release(): release the lock
- is_locked(): check lock state
'''
def __init__(self, jugdir, name):
self.fullname = path.join(jugdir, 'locks', '{0}.lock'.format(name))
def get(self):
'''
lock.get()
Create a lock for name in an NFS compatible way.
Parameters
----------
None
Returns
-------
locked : bool
Whether the lock was created
'''
if exists(self.fullname): return False
create_directories(path.dirname(self.fullname))
try:
import socket
fd = os.open(self.fullname,os.O_RDWR|os.O_CREAT|os.O_EXCL)
F = os.fdopen(fd,'w')
F.write('%s on %s\n' % (os.getpid(), socket.gethostname()))
F.close()
return True
except OSError:
return False
def release(self):
'''
lock.release()
Removes lock
'''
try:
os.unlink(self.fullname)
except OSError:
pass
def is_locked(self):
'''
locked = lock.is_locked()
Returns whether a lock exists for name. Note that the answer can
be invalid by the time this function returns. Only by trying to
acquire the lock can you avoid race-conditions. See the get() function.
'''
return path.exists(self.fullname)
BUG Handle Bytes correctly in lock constructor
Close #12.
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2014, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
file_store : file-system based data store & locks.
'''
import os
from os import path, mkdir
from os.path import dirname, exists
import errno
import tempfile
import shutil
from .base import base_store
from jug.backends.encode import encode_to, decode_from
def create_directories(dname):
'''
create_directories(dname)
Recursively create directories.
'''
if dname.endswith('/'): dname = dname[:-1]
head, tail = path.split(dname)
if path.exists(dname): return
if head: create_directories(head)
try:
mkdir(dname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class file_store(base_store):
def __init__(self, dname):
'''
file_store(dname)
Recursively create directories.
'''
if dname.endswith('/'): dname = dname[:-1]
self.jugdir = dname
def __repr__(self):
return 'file_store({})'.format(self.jugdir)
__str__ = __repr__
def create(self):
'''
Recursively create directories.
'''
create_directories(self.jugdir)
create_directories(self.tempdir())
def _maybe_create(self):
'''
Calls self.create() the first time it is called; then becomes a no-op.
'''
self.create()
self._maybe_create = (lambda : None)
def tempdir(self):
return path.join(self.jugdir, 'tempfiles')
def _getfname(self, name):
import six
if type(name) != six.text_type:
name = six.text_type(name, 'utf-8')
return path.join(self.jugdir, name[:2], name[2:])
def dump(self, object, name):
'''
store.dump(object, name)
Performs the same as
pickle.dump(object, open(name,'w'))
but does it in a way that is guaranteed to be atomic even over NFS.
'''
name = self._getfname(name)
create_directories(dirname(name))
self._maybe_create()
fd, fname = tempfile.mkstemp('.jugtmp', 'jugtemp', self.tempdir())
output = os.fdopen(fd, 'wb')
try:
import numpy as np
if type(object) == np.ndarray:
np.lib.format.write_array(output, object)
output.flush()
os.fsync(output.fileno())
output.close()
os.rename(fname, name)
return
except ImportError:
pass
except OSError:
pass
except ValueError:
pass
encode_to(object, output)
output.flush()
os.fsync(output.fileno())
output.close()
# Rename is atomic even over NFS.
os.rename(fname, name)
def list(self):
'''
keys = store.list()
Returns a list of all the keys in the store
'''
if not exists(self.jugdir):
return []
keys = []
for d in os.listdir(self.jugdir):
if len(d) == 2:
for f in os.listdir(path.join(self.jugdir, d)):
keys.append((d+f).encode('ascii'))
return keys
def listlocks(self):
'''
keys = store.listlocks()
Returns a list of all the locks in the store
This is an unsafe function as the results may be outdated by the time
the function returns.
'''
if not exists(path.join(self.jugdir, 'locks')):
return []
keys = []
for k in os.listdir(path.join(self.jugdir, 'locks')):
keys.append(k[:-len('.lock')].encode('ascii'))
return keys
def can_load(self, name):
'''
can = store.can_load(name)
'''
fname = self._getfname(name)
return exists(fname)
def load(self, name):
'''
obj = store.load(name)
Loads the objects. Equivalent to pickle.load(), but a bit smarter at
times.
Parameters
----------
name : str
Key to use
Returns
-------
obj : any
The object that was saved under ``name``
'''
fname = self._getfname(name)
input = open(fname, 'rb')
try:
import numpy as np
return np.lib.format.read_array(input)
except ValueError:
input.seek(0)
except ImportError:
pass
return decode_from(input)
def remove(self, name):
'''
was_removed = store.remove(name)
Remove the entry associated with name.
Returns whether any entry was actually removed.
'''
try:
fname = self._getfname(name)
os.unlink(fname)
return True
except OSError:
return False
def cleanup(self, active):
'''
nr_removed = store.cleanup(active)
Implement 'cleanup' command
Parameters
----------
active : sequence
files *not to remove*
Returns
-------
nr_removed : integer
number of removed files
'''
active = frozenset(self._getfname(t.hash()) for t in active)
removed = 0
for dir,_,fs in os.walk(self.jugdir):
for f in fs:
f = path.join(dir, f)
if f not in active:
os.unlink(f)
removed += 1
return removed
def remove_locks(self):
'''
removed = store.remove_locks()
Remove all locks
Returns
-------
removed : int
Number of locks removed
'''
lockdir = path.join(self.jugdir, 'locks')
if not exists(lockdir): return 0
removed = 0
for f in os.listdir(lockdir):
os.unlink(path.join(lockdir, f))
removed += 1
return removed
def getlock(self, name):
'''
lock = store.getlock(name)
Retrieve a lock object associated with ``name``.
Parameters
----------
name : str
Key
Returns
-------
lock : Lock object
This is a file_lock object
'''
self._maybe_create()
return file_based_lock(self.jugdir, name)
def close(self):
'''
store.close()
Has no effect on file based stores.
'''
pass
def metadata(self, t):
'''
meta = store.metadata(t)
Retrieves information on the state of the computation
Parameters
----------
t : Task
A Task object
Returns
-------
meta : dict
Dictionary describing the state of the computation
'''
from os import stat, path
from time import ctime
fname = self._getfname(t.hash())
if path.exists(fname):
st = stat(fname)
return {
'computed': True,
'completed': ctime(st.st_mtime),
}
return {
'computed': False
}
@staticmethod
def remove_store(jugdir):
'''
file_store.remove_store(jugdir)
Removes from disk all the files associated with this jugdir.
'''
shutil.rmtree(jugdir)
class file_based_lock(object):
'''
file_based_lock: File-system based locks
Functions:
----------
- get(): acquire the lock
- release(): release the lock
- is_locked(): check lock state
'''
def __init__(self, jugdir, name):
import six
if type(name) != six.text_type:
name = six.text_type(name, 'utf-8')
self.fullname = path.join(jugdir, 'locks', '{0}.lock'.format(name))
def get(self):
'''
lock.get()
Create a lock for name in an NFS compatible way.
Parameters
----------
None
Returns
-------
locked : bool
Whether the lock was created
'''
if exists(self.fullname): return False
create_directories(path.dirname(self.fullname))
try:
import socket
fd = os.open(self.fullname,os.O_RDWR|os.O_CREAT|os.O_EXCL)
F = os.fdopen(fd,'w')
F.write('%s on %s\n' % (os.getpid(), socket.gethostname()))
F.close()
return True
except OSError:
return False
def release(self):
'''
lock.release()
Removes lock
'''
try:
os.unlink(self.fullname)
except OSError:
pass
def is_locked(self):
'''
locked = lock.is_locked()
Returns whether a lock exists for name. Note that the answer can
be invalid by the time this function returns. Only by trying to
acquire the lock can you avoid race-conditions. See the get() function.
'''
return path.exists(self.fullname)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import StringIO
import logging
import threading
from HTMLParser import HTMLParser
from odoo.addons.base_phone import fields as phone_fields
from pyPdf import PdfFileReader, PdfFileWriter
from reportlab.lib.units import mm
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.colors import white
from odoo import api, models, fields, _
from odoo.exceptions import UserError
logger = logging.getLogger(__name__)
class MLStripper(HTMLParser):
""" Used to remove HTML tags. """
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class CommunicationJob(models.Model):
""" Communication Jobs are task that will either generate and send
an e-mail or print a document when executed.
It is useful to keep a history of the communication sent to partners
and to send again (or print again) a particular communication.
It is also useful to batch send communications without manually looking
for which one to send by e-mail and which one to print.
"""
_name = 'partner.communication.job'
_description = 'Communication Job'
_order = 'date desc,sent_date desc'
_inherit = ['partner.communication.defaults', 'ir.needaction_mixin',
'mail.thread']
##########################################################################
# FIELDS #
##########################################################################
config_id = fields.Many2one(
'partner.communication.config', 'Type', required=True,
default=lambda s: s.env.ref(
'partner_communication.default_communication'),
)
model = fields.Char(related='config_id.model')
partner_id = fields.Many2one(
'res.partner', 'Send to', required=True, ondelete='cascade')
partner_phone = phone_fields.Phone(related='partner_id.phone')
partner_mobile = phone_fields.Phone(related='partner_id.mobile')
country_id = fields.Many2one(related='partner_id.country_id')
parent_id = fields.Many2one(related='partner_id.parent_id')
object_ids = fields.Char('Resource ids', required=True)
date = fields.Datetime(default=fields.Datetime.now)
sent_date = fields.Datetime(readonly=True)
state = fields.Selection([
('call', _('Call partner')),
('pending', _('Pending')),
('done', _('Done')),
('cancel', _('Cancelled')),
], default='pending', readonly=True, track_visibility='onchange')
need_call = fields.Boolean(
readonly=True,
states={'pending': [('readonly', False)]}
)
auto_send = fields.Boolean(
help='Job is processed at creation if set to true')
send_mode = fields.Selection('send_mode_select')
email_template_id = fields.Many2one(
related='config_id.email_template_id', store=True)
email_to = fields.Char(
help='optional e-mail address to override recipient')
email_id = fields.Many2one('mail.mail', 'Generated e-mail', readonly=True)
phonecall_id = fields.Many2one('crm.phonecall', 'Phonecall log',
readonly=True)
body_html = fields.Html(sanitize=False)
pdf_page_count = fields.Integer(string='PDF size',
readonly=True)
subject = fields.Char()
attachment_ids = fields.One2many(
'partner.communication.attachment', 'communication_id',
string="Attachments")
ir_attachment_ids = fields.Many2many(
'ir.attachment', string='Attachments',
compute='_compute_ir_attachments',
inverse='_inverse_ir_attachments',
domain=[('report_id', '!=', False)]
)
def _compute_ir_attachments(self):
for job in self:
job.ir_attachment_ids = job.mapped('attachment_ids.attachment_id')
def count_pdf_page(self):
test_mode = getattr(threading.currentThread(), 'testing', False)
if not test_mode:
for record in self.filtered('report_id'):
if record.send_mode == 'physical':
report_obj = record.env['report'].with_context(
lang=record.partner_id.lang,
must_skip_send_to_printer=True)
pdf_str = report_obj.get_pdf(record.ids,
record.report_id.report_name)
pdf = PdfFileReader(StringIO.StringIO(pdf_str))
record.pdf_page_count = pdf.getNumPages()
def _inverse_ir_attachments(self):
attach_obj = self.env['partner.communication.attachment']
for job in self:
for attachment in job.ir_attachment_ids:
if attachment not in job.attachment_ids.mapped(
'attachment_id'):
if not attachment.report_id and not \
self.env.context.get('no_print'):
raise UserError(
_("Please select a printing configuration for the "
"attachments you add.")
)
attach_obj.create({
'name': attachment.name,
'communication_id': job.id,
'report_name': attachment.report_id.report_name or '',
'attachment_id': attachment.id
})
# Remove deleted attachments
job.attachment_ids.filtered(
lambda a: a.attachment_id not in job.ir_attachment_ids
).unlink()
@api.model
def send_mode_select(self):
return [
('digital', _('By e-mail')),
('physical', _('Print report')),
('both', _('Both'))
]
##########################################################################
# ORM METHODS #
##########################################################################
@api.model
def create(self, vals):
""" If a pending communication for same partner exists,
add the object_ids to it. Otherwise, create a new communication.
opt-out partners won't create any communication.
"""
# Object ids accept lists, integer or string values. It should contain
# a comma separated list of integers
object_ids = vals.get('object_ids')
if isinstance(object_ids, list):
vals['object_ids'] = ','.join(map(str, object_ids))
elif object_ids:
vals['object_ids'] = str(object_ids)
else:
vals['object_ids'] = str(vals['partner_id'])
same_job_search = [
('partner_id', '=', vals.get('partner_id')),
('config_id', '=', vals.get('config_id')),
('config_id', '!=',
self.env.ref('partner_communication.default_communication').id),
('state', 'in', ('call', 'pending'))
] + self.env.context.get('same_job_search', [])
job = self.search(same_job_search)
if job:
job.object_ids = job.object_ids + ',' + vals['object_ids']
job.refresh_text()
return job
self._get_default_vals(vals)
job = super(CommunicationJob, self).create(vals)
# Determine send mode
send_mode = job.config_id.get_inform_mode(job.partner_id)
if 'send_mode' not in vals and 'default_send_mode' not in \
self.env.context:
job.send_mode = send_mode[0]
if 'auto_send' not in vals and 'default_auto_send' not in \
self.env.context:
job.auto_send = send_mode[1]
if not job.body_html or not strip_tags(job.body_html):
job.refresh_text()
else:
job.set_attachments()
# Check if phonecall is needed
if job.need_call or job.config_id.need_call:
job.state = 'call'
if job.body_html or job.send_mode == 'physical':
job.count_pdf_page()
if job.auto_send:
job.send()
return job
@api.model
def _get_default_vals(self, vals, default_vals=None):
"""
Used at record creation to find default values given the config of the
communication.
:param vals: dict: record values
:param default_vals: list of fields to copy from config to job.
:return: config record to use in inheritances.
The vals dict is updated.
"""
if default_vals is None:
default_vals = []
default_vals.extend(['report_id', 'need_call', 'omr_enable_marks',
'omr_should_close_envelope',
'omr_add_attachment_tray_1',
'omr_add_attachment_tray_2'])
config = self.config_id.browse(vals['config_id'])
# Determine user by default : take in config or employee
if not vals.get('user_id'):
vals['user_id'] = config.user_id.id or self.env.uid
# Check all default_vals fields
for default_val in default_vals:
if default_val not in vals:
value = getattr(config, default_val)
if default_val.endswith('_id'):
value = value.id
vals[default_val] = value
return config
@api.multi
def write(self, vals):
object_ids = vals.get('object_ids')
if isinstance(object_ids, list):
vals['object_ids'] = ','.join(map(str, object_ids))
elif object_ids:
vals['object_ids'] = str(object_ids)
if vals.get('need_call'):
vals['state'] = 'call'
super(CommunicationJob, self).write(vals)
if vals.get('body_html') or vals.get('send_mode') == 'physical':
self.count_pdf_page()
return True
##########################################################################
# PUBLIC METHODS #
##########################################################################
@api.multi
def send(self):
""" Executes the job. """
no_call = self.filtered(lambda j: not j.need_call)
to_print = no_call.filtered(lambda j: j.send_mode == 'physical')
for job in no_call.filtered(lambda j: j.send_mode in ('both',
'digital')):
state = job._send_mail()
if job.send_mode != 'both':
job.write({
'state': state,
'sent_date': state != 'pending' and fields.Datetime.now()
})
else:
# Job was sent by e-mail and must now be printed
job.send_mode = 'physical'
job.refresh_text()
if to_print:
return to_print._print_report()
return True
@api.multi
def cancel(self):
to_call = self.filtered(lambda j: j.state == 'call')
to_call.write({'state': 'pending', 'need_call': False})
(self - to_call).write({'state': 'cancel'})
return True
@api.multi
def reset(self):
self.write({
'state': 'pending',
'date_sent': False,
'email_id': False,
})
return True
@api.multi
def refresh_text(self, refresh_uid=False):
self.mapped('attachment_ids').unlink()
self.set_attachments()
for job in self:
if job.email_template_id and job.object_ids:
fields = self.env['mail.compose.message'].with_context(
lang=job.partner_id.lang).get_generated_fields(
job.email_template_id, [job.id])
job.write({
'body_html': fields['body_html'],
'subject': fields['subject'],
})
if refresh_uid:
job.user_id = self.env.user
if job.state == 'call' and not job.need_call:
job.state = 'pending'
return True
@api.multi
def quick_refresh(self):
# Only refresh text and subject, all at once
jobs = self.filtered('email_template_id').filtered('object_ids')
langs = set(jobs.mapped('partner_id.lang'))
template = jobs.mapped('email_template_id')
if len(langs) > 1:
raise UserError(_("This is only possible for one lang at time"))
if len(template) > 1:
raise UserError(_(
"This is only possible for one template at time"))
values = self.env['mail.compose.message'].with_context(
lang=langs.pop()).get_generated_fields(template, jobs.ids)
if not isinstance(values, list):
values = [values]
for index in range(0, len(values)):
jobs[index].write({
'body_html': values[index]['body_html'],
'subject': values[index]['subject']
})
return True
@api.onchange('config_id', 'partner_id')
def onchange_config_id(self):
if self.config_id and self.partner_id:
send_mode = self.config_id.get_inform_mode(self.partner_id)
self.send_mode = send_mode[0]
# set default fields
default_vals = {'config_id': self.config_id.id}
self._get_default_vals(default_vals)
for key, val in default_vals.iteritems():
if key.endswith('_id'):
val = getattr(self, key).browse(val)
setattr(self, key, val)
@api.multi
def open_related(self):
object_ids = map(int, self.object_ids.split(','))
action = {
'name': _('Related objects'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': self.config_id.model,
'context': self.with_context(group_by=False).env.context,
'target': 'current',
}
if len(object_ids) > 1:
action.update({
'view_mode': 'tree,form',
'domain': [('id', 'in', object_ids)]
})
else:
action['res_id'] = object_ids[0]
return action
@api.multi
def log_call(self):
return {
'name': _("Log your call"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.communication.call.wizard',
'context': self.with_context({
'click2dial_id': self.partner_id.id,
'phone_number': self.partner_phone or self.partner_mobile,
'call_name': self.config_id.name,
'timestamp': fields.Datetime.now(),
'communication_id': self.id,
}).env.context,
'target': 'new',
}
@api.multi
def call(self):
""" Call partner from tree view button. """
self.ensure_one()
self.env['phone.common'].with_context(
click2dial_model=self._name, click2dial_id=self.id)\
.click2dial(self.partner_phone or self.partner_mobile)
return self.log_call()
@api.multi
def get_objects(self):
config = self.mapped('config_id')
config.ensure_one()
object_ids = list()
object_id_strings = self.mapped('object_ids')
for id_strings in object_id_strings:
object_ids += map(int, id_strings.split(','))
return self.env[config.model].browse(set(object_ids))
@api.multi
def set_attachments(self):
"""
Generates attachments for the communication and link them to the
communication record.
"""
attachment_obj = self.env['partner.communication.attachment']
for job in self.with_context(must_skip_send_to_printer=True):
if job.config_id.attachments_function:
binaries = getattr(
job.with_context(lang=job.partner_id.lang),
job.config_id.attachments_function, lambda: dict())()
for name, data in binaries.iteritems():
attachment_obj.create({
'name': name,
'communication_id': job.id,
'report_name': data[0],
'data': data[1],
})
@api.multi
def preview_pdf(self):
preview_model = 'partner.communication.pdf.wizard'
preview = self.env[preview_model].create({
'communication_id': self.id
})
return {
'name': _("Preview"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': preview_model,
'res_id': preview.id,
'context': self.env.context,
'target': 'new',
}
@api.multi
def message_post(self, **kwargs):
"""
If message is not from a user, it is probably the answer of the
partner by e-mail. We post it on the partner thread instead of
the communication thread
:param kwargs: arguments
:return: mail_message record
"""
message = super(CommunicationJob, self).message_post(**kwargs)
if not message.author_id.user_ids:
message.write({
'model': 'res.partner',
'res_id': self.partner_id.id
})
return message.id
@api.multi
def add_omr_marks(self, pdf_data, is_latest_document):
# Documentation
# http://meteorite.unm.edu/site_media/pdf/reportlab-userguide.pdf
# https://pythonhosted.org/PyPDF2/PdfFileReader.html
# https://stackoverflow.com/a/17538003
# https://gist.github.com/kzim44/5023021
# https://www.blog.pythonlibrary.org/2013/07/16/
# pypdf-how-to-write-a-pdf-to-memory/
self.ensure_one()
# OMR Parameters
number_of_alimentation = 2
number_of_marks = 7
orm_mark_length = 7 * mm
# margin around the omr code which should stay white
horizontal_margin = 4.2 * mm
vertical_margin = 8.5 * mm
x1 = 194 * mm
x2 = x1 + orm_mark_length
y1 = 180 * mm
y_step = 4 * mm
pdf_buffer = StringIO.StringIO()
pdf_buffer.write(pdf_data)
existing_pdf = PdfFileReader(pdf_buffer)
total_pages = existing_pdf.getNumPages()
# print latest omr mark on latest pair page (recto)
latest_omr_page = (total_pages - 1) if total_pages % 2 is 0 \
else total_pages
output = PdfFileWriter()
for page_number in range(total_pages):
y_position = y1
# only print omr marks on pair pages (recto)
if page_number % 2 is 0:
is_latest_page = True if \
is_latest_document and \
page_number == (latest_omr_page - 1) else False
omr_buffer = StringIO.StringIO()
# Create a canvas to write on
p = Canvas(omr_buffer)
# line (x1, y1, x2, y2)
p.setLineWidth(0.2 * mm)
# add a white background for the omr code
p.setFillColor(white)
p.rect(
x1 - horizontal_margin,
y1 - (number_of_marks - 1) * y_step - vertical_margin,
orm_mark_length + 2 * horizontal_margin,
(number_of_marks - 1) * y_step + 2 * vertical_margin,
fill=True,
stroke=False
)
# start mark (compulsory)
p.line(x1, y_position, x2, y_position)
y_position -= y_step
# insert mark (only on latest page)
if is_latest_page:
p.line(x1, y_position, x2, y_position)
y_position -= y_step
# alimentation (2 marks)
# back 1 is the "big special" one (the lower)
# back 2 is at the middle
for alimentation_number in range(number_of_alimentation):
if is_latest_page:
if self.omr_add_attachment_tray_1 and \
alimentation_number == 0:
p.line(x1, y_position, x2, y_position)
elif self.omr_add_attachment_tray_2 and \
alimentation_number == 1:
p.line(x1, y_position, x2, y_position)
y_position -= y_step
# close envelop (if display the envelop is not closed)
if is_latest_page \
and not self.omr_should_close_envelope:
p.line(x1, y_position, x2, y_position)
y_position -= y_step
# # number of pages in binary (MSB first with sequence:
# # 00, 01, 10, 11, 00, 01, ...)
# p.line(x1, y_position, x2, y_position)
# y_position -= y_step
#
# p.line(x1, y_position, x2, y_position)
# y_position -= y_step
# parity mark (total number of marks should be pair)
if self._display_parity(is_latest_page):
p.line(x1, y_position, x2, y_position)
y_position -= y_step
# end mark (compulsory)
p.line(x1, y_position, x2, y_position)
# Close the PDF object cleanly.
p.showPage()
p.save()
# move to the beginning of the StringIO buffer
omr_buffer.seek(0)
omr_pdf = PdfFileReader(omr_buffer)
# add the omr marks to the page
page = existing_pdf.getPage(page_number)
page.mergePage(omr_pdf.getPage(0))
else:
page = existing_pdf.getPage(page_number)
output.addPage(page)
out_buffer = StringIO.StringIO()
output.write(out_buffer)
return out_buffer.getvalue()
##########################################################################
# PRIVATE METHODS #
##########################################################################
def _send_mail(self):
"""
Called for sending the communication by e-mail.
:return: state of the communication depending if the e-mail was
successfully sent or not.
"""
self.ensure_one()
partner = self.partner_id
# Send by e-mail
email = self.email_id
if not email:
email_vals = {
'recipient_ids': [(4, partner.id)],
'communication_config_id': self.config_id.id,
'body_html': self.body_html,
'subject': self.subject,
'attachment_ids': [(6, 0, self.ir_attachment_ids.ids)],
'auto_delete': False,
'reply_to': self.email_template_id.reply_to or
self.user_id.email
}
if self.email_to:
# Replace partner e-mail by specified address
email_vals['email_to'] = self.email_to
del email_vals['recipient_ids']
if 'default_email_vals' in self.env.context:
email_vals.update(
self.env.context['default_email_vals'])
email = self.env['mail.compose.message'].with_context(
lang=partner.lang).create_emails(
self.email_template_id, [self.id], email_vals)
self.email_id = email
email.send()
# Subscribe author to thread, so that the reply
# notifies the author.
self.message_subscribe(self.user_id.partner_id.ids)
return 'done' if email.state == 'sent' else 'pending'
def _print_report(self):
report_obj = self.env['report']
for job in self:
# Get pdf should directly send it to the printer if report
# is correctly configured.
to_print = report_obj.with_context(
print_name=self.env.user.firstname[:3] + ' ' + (
job.subject or ''),
must_skip_send_to_printer=True
).get_pdf(job.ids, job.report_id.report_name)
# Print letter
report = job.report_id
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
if printer:
printer.print_document(
report, to_print, report.report_type)
# Print attachments
job.attachment_ids.print_attachments()
# Save info
job.partner_id.message_post(
job.body_html, job.subject)
job.write({
'state': 'done',
'sent_date': fields.Datetime.now()
})
# Commit to avoid invalid state if process fails
self.env.cr.commit() # pylint: disable=invalid-commit
return True
def _display_parity(self, is_latest_page):
# current_page_number = 0
nb_displayed_marks = 2 # always display start and stop marks
# insert mark is displayed only on latest page
if is_latest_page:
nb_displayed_marks += 1
# a mark is added if the envelope should not be closed
if not self.omr_should_close_envelope:
nb_displayed_marks += 1
# count attachment marks
if self.omr_add_attachment_tray_1:
nb_displayed_marks += 1
if self.omr_add_attachment_tray_2:
nb_displayed_marks += 1
# # page number (2) marks
# # no mark for page 00 (binary)
# # one mark for page 01 and 10 (binary)
# if current_page_number % 4 in {1, 2}:
# nb_displayed_marks += 1
# # two marks for page 11 (binary)
# elif current_page_number % 4 is 3:
# nb_displayed_marks += 2
# if the nb_displayed_marks is pair, do not display the parity
if nb_displayed_marks % 2 is 0:
return False
else:
return True
@api.model
def _needaction_domain_get(self):
"""
Used to display a count icon in the menu
:return: domain of jobs counted
"""
return [('state', 'in', ('call', 'pending'))]
CO-1885 refactor omr generation to make it a bit understandable
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import StringIO
import logging
import threading
from HTMLParser import HTMLParser
from odoo.addons.base_phone import fields as phone_fields
from pyPdf import PdfFileReader, PdfFileWriter
from reportlab.lib.units import mm
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.colors import white
from odoo import api, models, fields, _
from odoo.exceptions import UserError
logger = logging.getLogger(__name__)
class MLStripper(HTMLParser):
""" Used to remove HTML tags. """
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class CommunicationJob(models.Model):
""" Communication Jobs are task that will either generate and send
an e-mail or print a document when executed.
It is useful to keep a history of the communication sent to partners
and to send again (or print again) a particular communication.
It is also useful to batch send communications without manually looking
for which one to send by e-mail and which one to print.
"""
_name = 'partner.communication.job'
_description = 'Communication Job'
_order = 'date desc,sent_date desc'
_inherit = ['partner.communication.defaults', 'ir.needaction_mixin',
'mail.thread']
##########################################################################
# FIELDS #
##########################################################################
config_id = fields.Many2one(
'partner.communication.config', 'Type', required=True,
default=lambda s: s.env.ref(
'partner_communication.default_communication'),
)
model = fields.Char(related='config_id.model')
partner_id = fields.Many2one(
'res.partner', 'Send to', required=True, ondelete='cascade')
partner_phone = phone_fields.Phone(related='partner_id.phone')
partner_mobile = phone_fields.Phone(related='partner_id.mobile')
country_id = fields.Many2one(related='partner_id.country_id')
parent_id = fields.Many2one(related='partner_id.parent_id')
object_ids = fields.Char('Resource ids', required=True)
date = fields.Datetime(default=fields.Datetime.now)
sent_date = fields.Datetime(readonly=True)
state = fields.Selection([
('call', _('Call partner')),
('pending', _('Pending')),
('done', _('Done')),
('cancel', _('Cancelled')),
], default='pending', readonly=True, track_visibility='onchange')
need_call = fields.Boolean(
readonly=True,
states={'pending': [('readonly', False)]}
)
auto_send = fields.Boolean(
help='Job is processed at creation if set to true')
send_mode = fields.Selection('send_mode_select')
email_template_id = fields.Many2one(
related='config_id.email_template_id', store=True)
email_to = fields.Char(
help='optional e-mail address to override recipient')
email_id = fields.Many2one('mail.mail', 'Generated e-mail', readonly=True)
phonecall_id = fields.Many2one('crm.phonecall', 'Phonecall log',
readonly=True)
body_html = fields.Html(sanitize=False)
pdf_page_count = fields.Integer(string='PDF size',
readonly=True)
subject = fields.Char()
attachment_ids = fields.One2many(
'partner.communication.attachment', 'communication_id',
string="Attachments")
ir_attachment_ids = fields.Many2many(
'ir.attachment', string='Attachments',
compute='_compute_ir_attachments',
inverse='_inverse_ir_attachments',
domain=[('report_id', '!=', False)]
)
def _compute_ir_attachments(self):
for job in self:
job.ir_attachment_ids = job.mapped('attachment_ids.attachment_id')
def count_pdf_page(self):
test_mode = getattr(threading.currentThread(), 'testing', False)
if not test_mode:
for record in self.filtered('report_id'):
if record.send_mode == 'physical':
report_obj = record.env['report'].with_context(
lang=record.partner_id.lang,
must_skip_send_to_printer=True)
pdf_str = report_obj.get_pdf(record.ids,
record.report_id.report_name)
pdf = PdfFileReader(StringIO.StringIO(pdf_str))
record.pdf_page_count = pdf.getNumPages()
def _inverse_ir_attachments(self):
attach_obj = self.env['partner.communication.attachment']
for job in self:
for attachment in job.ir_attachment_ids:
if attachment not in job.attachment_ids.mapped(
'attachment_id'):
if not attachment.report_id and not \
self.env.context.get('no_print'):
raise UserError(
_("Please select a printing configuration for the "
"attachments you add.")
)
attach_obj.create({
'name': attachment.name,
'communication_id': job.id,
'report_name': attachment.report_id.report_name or '',
'attachment_id': attachment.id
})
# Remove deleted attachments
job.attachment_ids.filtered(
lambda a: a.attachment_id not in job.ir_attachment_ids
).unlink()
@api.model
def send_mode_select(self):
return [
('digital', _('By e-mail')),
('physical', _('Print report')),
('both', _('Both'))
]
##########################################################################
# ORM METHODS #
##########################################################################
@api.model
def create(self, vals):
""" If a pending communication for same partner exists,
add the object_ids to it. Otherwise, create a new communication.
opt-out partners won't create any communication.
"""
# Object ids accept lists, integer or string values. It should contain
# a comma separated list of integers
object_ids = vals.get('object_ids')
if isinstance(object_ids, list):
vals['object_ids'] = ','.join(map(str, object_ids))
elif object_ids:
vals['object_ids'] = str(object_ids)
else:
vals['object_ids'] = str(vals['partner_id'])
same_job_search = [
('partner_id', '=', vals.get('partner_id')),
('config_id', '=', vals.get('config_id')),
('config_id', '!=',
self.env.ref('partner_communication.default_communication').id),
('state', 'in', ('call', 'pending'))
] + self.env.context.get('same_job_search', [])
job = self.search(same_job_search)
if job:
job.object_ids = job.object_ids + ',' + vals['object_ids']
job.refresh_text()
return job
self._get_default_vals(vals)
job = super(CommunicationJob, self).create(vals)
# Determine send mode
send_mode = job.config_id.get_inform_mode(job.partner_id)
if 'send_mode' not in vals and 'default_send_mode' not in \
self.env.context:
job.send_mode = send_mode[0]
if 'auto_send' not in vals and 'default_auto_send' not in \
self.env.context:
job.auto_send = send_mode[1]
if not job.body_html or not strip_tags(job.body_html):
job.refresh_text()
else:
job.set_attachments()
# Check if phonecall is needed
if job.need_call or job.config_id.need_call:
job.state = 'call'
if job.body_html or job.send_mode == 'physical':
job.count_pdf_page()
if job.auto_send:
job.send()
return job
@api.model
def _get_default_vals(self, vals, default_vals=None):
"""
Used at record creation to find default values given the config of the
communication.
:param vals: dict: record values
:param default_vals: list of fields to copy from config to job.
:return: config record to use in inheritances.
The vals dict is updated.
"""
if default_vals is None:
default_vals = []
default_vals.extend(['report_id', 'need_call', 'omr_enable_marks',
'omr_should_close_envelope',
'omr_add_attachment_tray_1',
'omr_add_attachment_tray_2'])
config = self.config_id.browse(vals['config_id'])
# Determine user by default : take in config or employee
if not vals.get('user_id'):
vals['user_id'] = config.user_id.id or self.env.uid
# Check all default_vals fields
for default_val in default_vals:
if default_val not in vals:
value = getattr(config, default_val)
if default_val.endswith('_id'):
value = value.id
vals[default_val] = value
return config
@api.multi
def write(self, vals):
object_ids = vals.get('object_ids')
if isinstance(object_ids, list):
vals['object_ids'] = ','.join(map(str, object_ids))
elif object_ids:
vals['object_ids'] = str(object_ids)
if vals.get('need_call'):
vals['state'] = 'call'
super(CommunicationJob, self).write(vals)
if vals.get('body_html') or vals.get('send_mode') == 'physical':
self.count_pdf_page()
return True
##########################################################################
# PUBLIC METHODS #
##########################################################################
@api.multi
def send(self):
""" Executes the job. """
no_call = self.filtered(lambda j: not j.need_call)
to_print = no_call.filtered(lambda j: j.send_mode == 'physical')
for job in no_call.filtered(lambda j: j.send_mode in ('both',
'digital')):
state = job._send_mail()
if job.send_mode != 'both':
job.write({
'state': state,
'sent_date': state != 'pending' and fields.Datetime.now()
})
else:
# Job was sent by e-mail and must now be printed
job.send_mode = 'physical'
job.refresh_text()
if to_print:
return to_print._print_report()
return True
@api.multi
def cancel(self):
to_call = self.filtered(lambda j: j.state == 'call')
to_call.write({'state': 'pending', 'need_call': False})
(self - to_call).write({'state': 'cancel'})
return True
@api.multi
def reset(self):
self.write({
'state': 'pending',
'date_sent': False,
'email_id': False,
})
return True
@api.multi
def refresh_text(self, refresh_uid=False):
self.mapped('attachment_ids').unlink()
self.set_attachments()
for job in self:
if job.email_template_id and job.object_ids:
fields = self.env['mail.compose.message'].with_context(
lang=job.partner_id.lang).get_generated_fields(
job.email_template_id, [job.id])
job.write({
'body_html': fields['body_html'],
'subject': fields['subject'],
})
if refresh_uid:
job.user_id = self.env.user
if job.state == 'call' and not job.need_call:
job.state = 'pending'
return True
@api.multi
def quick_refresh(self):
# Only refresh text and subject, all at once
jobs = self.filtered('email_template_id').filtered('object_ids')
langs = set(jobs.mapped('partner_id.lang'))
template = jobs.mapped('email_template_id')
if len(langs) > 1:
raise UserError(_("This is only possible for one lang at time"))
if len(template) > 1:
raise UserError(_(
"This is only possible for one template at time"))
values = self.env['mail.compose.message'].with_context(
lang=langs.pop()).get_generated_fields(template, jobs.ids)
if not isinstance(values, list):
values = [values]
for index in range(0, len(values)):
jobs[index].write({
'body_html': values[index]['body_html'],
'subject': values[index]['subject']
})
return True
@api.onchange('config_id', 'partner_id')
def onchange_config_id(self):
if self.config_id and self.partner_id:
send_mode = self.config_id.get_inform_mode(self.partner_id)
self.send_mode = send_mode[0]
# set default fields
default_vals = {'config_id': self.config_id.id}
self._get_default_vals(default_vals)
for key, val in default_vals.iteritems():
if key.endswith('_id'):
val = getattr(self, key).browse(val)
setattr(self, key, val)
@api.multi
def open_related(self):
object_ids = map(int, self.object_ids.split(','))
action = {
'name': _('Related objects'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': self.config_id.model,
'context': self.with_context(group_by=False).env.context,
'target': 'current',
}
if len(object_ids) > 1:
action.update({
'view_mode': 'tree,form',
'domain': [('id', 'in', object_ids)]
})
else:
action['res_id'] = object_ids[0]
return action
@api.multi
def log_call(self):
return {
'name': _("Log your call"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.communication.call.wizard',
'context': self.with_context({
'click2dial_id': self.partner_id.id,
'phone_number': self.partner_phone or self.partner_mobile,
'call_name': self.config_id.name,
'timestamp': fields.Datetime.now(),
'communication_id': self.id,
}).env.context,
'target': 'new',
}
@api.multi
def call(self):
""" Call partner from tree view button. """
self.ensure_one()
self.env['phone.common'].with_context(
click2dial_model=self._name, click2dial_id=self.id)\
.click2dial(self.partner_phone or self.partner_mobile)
return self.log_call()
@api.multi
def get_objects(self):
config = self.mapped('config_id')
config.ensure_one()
object_ids = list()
object_id_strings = self.mapped('object_ids')
for id_strings in object_id_strings:
object_ids += map(int, id_strings.split(','))
return self.env[config.model].browse(set(object_ids))
@api.multi
def set_attachments(self):
"""
Generates attachments for the communication and link them to the
communication record.
"""
attachment_obj = self.env['partner.communication.attachment']
for job in self.with_context(must_skip_send_to_printer=True):
if job.config_id.attachments_function:
binaries = getattr(
job.with_context(lang=job.partner_id.lang),
job.config_id.attachments_function, lambda: dict())()
for name, data in binaries.iteritems():
attachment_obj.create({
'name': name,
'communication_id': job.id,
'report_name': data[0],
'data': data[1],
})
@api.multi
def preview_pdf(self):
preview_model = 'partner.communication.pdf.wizard'
preview = self.env[preview_model].create({
'communication_id': self.id
})
return {
'name': _("Preview"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': preview_model,
'res_id': preview.id,
'context': self.env.context,
'target': 'new',
}
@api.multi
def message_post(self, **kwargs):
"""
If message is not from a user, it is probably the answer of the
partner by e-mail. We post it on the partner thread instead of
the communication thread
:param kwargs: arguments
:return: mail_message record
"""
message = super(CommunicationJob, self).message_post(**kwargs)
if not message.author_id.user_ids:
message.write({
'model': 'res.partner',
'res_id': self.partner_id.id
})
return message.id
@api.multi
def add_omr_marks(self, pdf_data, is_latest_document):
# Documentation
# http://meteorite.unm.edu/site_media/pdf/reportlab-userguide.pdf
# https://pythonhosted.org/PyPDF2/PdfFileReader.html
# https://stackoverflow.com/a/17538003
# https://gist.github.com/kzim44/5023021
# https://www.blog.pythonlibrary.org/2013/07/16/
# pypdf-how-to-write-a-pdf-to-memory/
self.ensure_one()
pdf_buffer = StringIO.StringIO()
pdf_buffer.write(pdf_data)
existing_pdf = PdfFileReader(pdf_buffer)
output = PdfFileWriter()
total_pages = existing_pdf.getNumPages()
# print latest omr mark on latest pair page (recto)
latest_omr_page = total_pages // 2
for page_number in range(total_pages):
page = existing_pdf.getPage(page_number)
# only print omr marks on pair pages (recto)
if page_number % 2 is 0:
is_latest_page = is_latest_document and \
page_number == latest_omr_page
marks = self._compute_marks(is_latest_page)
omr_layer = self._build_omr_layer(marks)
page.mergePage(omr_layer)
output.addPage(page)
out_buffer = StringIO.StringIO()
output.write(out_buffer)
return out_buffer.getvalue()
def _compute_marks(self, is_latest_page):
marks = [
True, # Start mark (compulsory)
is_latest_page,
is_latest_page and self.omr_add_attachment_tray_1,
is_latest_page and self.omr_add_attachment_tray_2,
is_latest_page and not self.omr_should_close_envelope
]
parity_check = sum(marks) % 2 == 0
marks.append(parity_check)
marks.append(True) # End mark (compulsory)
return marks
@staticmethod
def _build_omr_layer(marks):
# margin around the omr code which should stay white
horizontal_margin = 4.2 * mm
vertical_margin = 8.5 * mm
x1 = 194 * mm
y1 = 180 * mm
y_step = 4 * mm
number_of_marks = len(marks)
orm_mark_length = len(marks) * mm
x2 = x1 + orm_mark_length
omr_buffer = StringIO.StringIO()
omr_canvas = Canvas(omr_buffer)
omr_canvas.setLineWidth(0.2 * mm)
# add a white background for the omr code
omr_canvas.setFillColor(white)
omr_canvas.rect(
x1 - horizontal_margin,
y1 - (number_of_marks - 1) * y_step - vertical_margin,
orm_mark_length + 2 * horizontal_margin,
(number_of_marks - 1) * y_step + 2 * vertical_margin,
fill=True,
stroke=False
)
for offset, mark in enumerate(marks):
y_position = y1 - offset * y_step
if mark:
omr_canvas.line(x1, y_position, x2, y_position)
# Close the PDF object cleanly.
omr_canvas.showPage()
omr_canvas.save()
# move to the beginning of the StringIO buffer
omr_buffer.seek(0)
omr_pdf = PdfFileReader(omr_buffer)
return omr_pdf.getPage(0)
##########################################################################
# PRIVATE METHODS #
##########################################################################
def _send_mail(self):
"""
Called for sending the communication by e-mail.
:return: state of the communication depending if the e-mail was
successfully sent or not.
"""
self.ensure_one()
partner = self.partner_id
# Send by e-mail
email = self.email_id
if not email:
email_vals = {
'recipient_ids': [(4, partner.id)],
'communication_config_id': self.config_id.id,
'body_html': self.body_html,
'subject': self.subject,
'attachment_ids': [(6, 0, self.ir_attachment_ids.ids)],
'auto_delete': False,
'reply_to': self.email_template_id.reply_to or
self.user_id.email
}
if self.email_to:
# Replace partner e-mail by specified address
email_vals['email_to'] = self.email_to
del email_vals['recipient_ids']
if 'default_email_vals' in self.env.context:
email_vals.update(
self.env.context['default_email_vals'])
email = self.env['mail.compose.message'].with_context(
lang=partner.lang).create_emails(
self.email_template_id, [self.id], email_vals)
self.email_id = email
email.send()
# Subscribe author to thread, so that the reply
# notifies the author.
self.message_subscribe(self.user_id.partner_id.ids)
return 'done' if email.state == 'sent' else 'pending'
def _print_report(self):
report_obj = self.env['report']
for job in self:
# Get pdf should directly send it to the printer if report
# is correctly configured.
to_print = report_obj.with_context(
print_name=self.env.user.firstname[:3] + ' ' + (
job.subject or ''),
must_skip_send_to_printer=True
).get_pdf(job.ids, job.report_id.report_name)
# Print letter
report = job.report_id
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
if printer:
printer.print_document(
report, to_print, report.report_type)
# Print attachments
job.attachment_ids.print_attachments()
# Save info
job.partner_id.message_post(
job.body_html, job.subject)
job.write({
'state': 'done',
'sent_date': fields.Datetime.now()
})
# Commit to avoid invalid state if process fails
self.env.cr.commit() # pylint: disable=invalid-commit
return True
@api.model
def _needaction_domain_get(self):
"""
Used to display a count icon in the menu
:return: domain of jobs counted
"""
return [('state', 'in', ('call', 'pending'))]
|
placements = r'''
>>> from django.contrib import admin
>>> from django.utils.datastructures import MultiValueDict
>>> from ella.core.models import Placement
>>> from ella.core.admin import PlacementInlineOptions
>>> from coretest.admin_tests.models import SampleAdminObject, EmptyAdminObject
>>> empty_data = MultiValueDict({
... "core-placement-target_ct-target_id-TOTAL_FORMS": ["2"],
... "core-placement-target_ct-target_id-INITIAL_FORMS": ["0"],
...
... "core-placement-target_ct-target_id-0-category": [""],
... "core-placement-target_ct-target_id-0-publish_from_0": [""],
... "core-placement-target_ct-target_id-0-publish_from_1": [""],
... "core-placement-target_ct-target_id-0-publish_to_0": [""],
... "core-placement-target_ct-target_id-0-publish_to_1": [""],
... "core-placement-target_ct-target_id-0-slug": [""],
... "core-placement-target_ct-target_id-0-static": [""],
... "core-placement-target_ct-target_id-0-listings": [],
...
... "core-placement-target_ct-target_id-1-category": [""],
... "core-placement-target_ct-target_id-1-publish_from_0": [""],
... "core-placement-target_ct-target_id-1-publish_from_1": [""],
... "core-placement-target_ct-target_id-1-publish_to_0": [""],
... "core-placement-target_ct-target_id-1-publish_to_1": [""],
... "core-placement-target_ct-target_id-1-slug": [""],
... "core-placement-target_ct-target_id-1-static": [""],
... "core-placement-target_ct-target_id-1-listings": [],
...})
# disable pre-filling current date to publish_from
>>> Placement._meta.get_field('publish_from').default=None
>>> sao1 = SampleAdminObject.objects.get(pk=1)
>>> pio = PlacementInlineOptions(SampleAdminObject, admin.site)
>>> pif = pio.get_formset(None)
>>> pifi = pif(data=empty_data, files={}, instance=sao1)
>>> pifi.is_valid()
True
>>> pifi.cleaned_data
[{}, {}]
# create first placement
>>> data = empty_data.copy()
>>> data.update(MultiValueDict({
... "core-placement-target_ct-target_id-0-category": ["1"],
... "core-placement-target_ct-target_id-0-publish_from_0": ["2007-07-01"],
... "core-placement-target_ct-target_id-0-publish_from_1": ["00:00:00"],
...}))
>>> pifi = pif(data=data, files={}, instance=sao1)
>>> pifi.is_valid()
True
>>> pifi.errors
[{}, {}]
>>> pifi.cleaned_data
[{'category': <Category: example.com/>, 'publish_from': datetime.datetime(2007, 7, 1, 0, 0), 'listings': [], 'publish_to': None, 'id': None, 'static': False, 'slug': u''}, {}]
>>> objs = pifi.save()
>>> placement = objs[0]
>>> placement.slug
u'sample1'
>>> sao1.get_absolute_url()
'/2007/7/1/sample-admin-objects/sample1/'
'''
__test__ = {
'placements' : placements,
}
if __name__ == '__main__':
import doctest
doctest.testmod()
Added tests for listings field in the PlacementForm
git-svn-id: 68306dcae4dfdaa1cb4d69e3361815c000c4314b@969 2d143e24-0a30-0410-89d7-a2e95868dc81
placements = r'''
>>> from django.contrib import admin
>>> from django.utils.datastructures import MultiValueDict
>>> from ella.core.models import Placement
>>> from ella.core.admin import PlacementInlineOptions
>>> from coretest.admin_tests.models import SampleAdminObject, EmptyAdminObject
>>> empty_data = MultiValueDict({
... "core-placement-target_ct-target_id-TOTAL_FORMS": ["2"],
... "core-placement-target_ct-target_id-INITIAL_FORMS": ["0"],
...
... "core-placement-target_ct-target_id-0-category": [""],
... "core-placement-target_ct-target_id-0-publish_from_0": [""],
... "core-placement-target_ct-target_id-0-publish_from_1": [""],
... "core-placement-target_ct-target_id-0-publish_to_0": [""],
... "core-placement-target_ct-target_id-0-publish_to_1": [""],
... "core-placement-target_ct-target_id-0-slug": [""],
... "core-placement-target_ct-target_id-0-static": [""],
... "core-placement-target_ct-target_id-0-listings": [],
...
... "core-placement-target_ct-target_id-1-category": [""],
... "core-placement-target_ct-target_id-1-publish_from_0": [""],
... "core-placement-target_ct-target_id-1-publish_from_1": [""],
... "core-placement-target_ct-target_id-1-publish_to_0": [""],
... "core-placement-target_ct-target_id-1-publish_to_1": [""],
... "core-placement-target_ct-target_id-1-slug": [""],
... "core-placement-target_ct-target_id-1-static": [""],
... "core-placement-target_ct-target_id-1-listings": [],
...})
# disable pre-filling current date to publish_from
>>> Placement._meta.get_field('publish_from').default=None
>>> sao1 = SampleAdminObject.objects.get(pk=1)
>>> pio = PlacementInlineOptions(SampleAdminObject, admin.site)
>>> pif = pio.get_formset(None)
>>> pifi = pif(data=empty_data, files={}, instance=sao1)
>>> pifi.is_valid()
True
>>> pifi.cleaned_data
[{}, {}]
# create first placement and one listing
>>> data = empty_data.copy()
>>> data.update(MultiValueDict({
... "core-placement-target_ct-target_id-0-category": ["1"],
... "core-placement-target_ct-target_id-0-publish_from_0": ["2007-07-01"],
... "core-placement-target_ct-target_id-0-publish_from_1": ["00:00:00"],
... "core-placement-target_ct-target_id-0-listings": ["1"],
...}))
>>> pifi = pif(data=data, files={}, instance=sao1)
>>> pifi.is_valid()
True
>>> pifi.errors
[{}, {}]
>>> pifi.cleaned_data
[{'category': <Category: example.com/>, 'publish_from': datetime.datetime(2007, 7, 1, 0, 0), 'listings': [<Category: example.com/>], 'publish_to': None, 'id': None, 'static': False, 'slug': u''}, {}]
>>> objs = pifi.save()
>>> placement = objs[0]
>>> placement.slug
u'sample1'
>>> placement.listing_set.all()
[<Listing: SampleAdminObject object listed in example.com/>]
>>> sao1.get_absolute_url()
'/2007/7/1/sample-admin-objects/sample1/'
# delete the created listing
>>> data = empty_data.copy()
>>> data.update(MultiValueDict({
... "core-placement-target_ct-target_id-TOTAL_FORMS": ["2"],
... "core-placement-target_ct-target_id-INITIAL_FORMS": ["1"],
...
... "core-placement-target_ct-target_id-0-id": [str(placement.pk)],
... "core-placement-target_ct-target_id-0-category": ["1"],
... "core-placement-target_ct-target_id-0-publish_from_0": ["2007-07-01"],
... "core-placement-target_ct-target_id-0-publish_from_1": ["00:00:00"],
... "core-placement-target_ct-target_id-0-listings": [],
...}))
>>> pifi = pif(data=data, files={}, instance=sao1)
>>> pifi.is_valid()
True
>>> objs = pifi.save()
>>> from ella.core.models import Listing
>>> Listing.objects.filter(placement=placement)
[]
'''
__test__ = {
'placements' : placements,
}
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import os.path
import time
from collections import namedtuple
from evdev import UInput, UInputError, ecodes
from .exceptions import DeviceError
BUTTON_MODIFIERS = ("+", "-")
DEFAULT_A2D_DEADZONE = 50
DEFAULT_AXIS_OPTIONS = (0, 255, 0, 5)
DEFAULT_MOUSE_SENSITIVTY = 0.6
DEFAULT_MOUSE_DEADZONE = 5
DEFAULT_SCROLL_REPEAT_DELAY = .250 # Seconds to wait before continual scrolling
DEFAULT_SCROLL_DELAY = .035 # Seconds to wait between scroll events
UInputMapping = namedtuple("UInputMapping",
"name bustype vendor product version "
"axes axes_options buttons hats keys mouse "
"mouse_options")
_mappings = {}
# Add our simulated mousewheel codes
ecodes.REL_WHEELUP = 13 # Unique value for this lib
ecodes.REL_WHEELDOWN = 14 # Ditto
def parse_button(attr):
if attr[0] in BUTTON_MODIFIERS:
modifier = attr[0]
attr = attr[1:]
else:
modifier = None
return (attr, modifier)
def create_mapping(name, description, bustype=0, vendor=0, product=0,
version=0, axes={}, axes_options={}, buttons={},
hats={}, keys={}, mouse={}, mouse_options={}):
axes = {getattr(ecodes, k): v for k,v in axes.items()}
axes_options = {getattr(ecodes, k): v for k,v in axes_options.items()}
buttons = {getattr(ecodes, k): parse_button(v) for k,v in buttons.items()}
hats = {getattr(ecodes, k): v for k,v in hats.items()}
mouse = {getattr(ecodes, k): v for k,v in mouse.items()}
mapping = UInputMapping(description, bustype, vendor, product, version,
axes, axes_options, buttons, hats, keys, mouse,
mouse_options)
_mappings[name] = mapping
# Pre-configued mappings
# Emulate it the (mostly) correct way
create_mapping(
"ds4drv", "Sony Computer Entertainment Wireless Controller (Userspace)",
# Bus type, vendor, product, version
ecodes.BUS_USB, 1356, 1476, 273,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_RX": "right_analog_x",
"ABS_RY": "right_analog_y",
"ABS_Z": "l2_analog",
"ABS_RZ": "r2_analog",
"ABS_THROTTLE": "orientation_roll",
"ABS_RUDDER": "orientation_pitch",
"ABS_WHEEL": "orientation_yaw",
"ABS_DISTANCE": "motion_z",
"ABS_TILT_X": "motion_x",
"ABS_TILT_Y": "motion_y",
},
# Axes options
{
"ABS_THROTTLE": (-16385, 16384, 0, 0),
"ABS_RUDDER": (-16385, 16384, 0, 0),
"ABS_WHEEL": (-16385, 16384, 0, 0),
"ABS_DISTANCE": (-32768, 32767, 0, 10),
"ABS_TILT_X": (-32768, 32767, 0, 10),
"ABS_TILT_Y": (-32768, 32767, 0, 10),
},
# Buttons
{
"BTN_START": "button_options",
"BTN_MODE": "button_ps",
"BTN_SELECT": "button_share",
"BTN_A": "button_cross",
"BTN_B": "button_circle",
"BTN_X": "button_square",
"BTN_Y": "button_triangle",
"BTN_TL": "button_l1",
"BTN_TR": "button_r1",
"BTN_THUMBL": "button_l3",
"BTN_THUMBR": "button_r3",
},
# Hats
{
"ABS_HAT0X": ("dpad_left", "dpad_right"),
"ABS_HAT0Y": ("dpad_up", "dpad_down")
},
)
#Emulate the way the kernel does it
create_mapping(
"ds4", "Sony Computer Entertainment Wireless Controller",
# Bus type, vendor, product, version
ecodes.BUS_USB, 1356, 1476, 273,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_Z": "right_analog_x",
"ABS_RZ": "right_analog_y",
"ABS_RX": "l2_analog",
"ABS_RY": "r2_analog",
"ABS_THROTTLE": "orientation_roll",
"ABS_RUDDER": "orientation_pitch",
"ABS_WHEEL": "orientation_yaw",
"ABS_DISTANCE": "motion_z",
"ABS_TILT_X": "motion_x",
"ABS_TILT_Y": "motion_y",
},
# Axes options
{
"ABS_THROTTLE": (-16385, 16384, 0, 0),
"ABS_RUDDER": (-16385, 16384, 0, 0),
"ABS_WHEEL": (-16385, 16384, 0, 0),
"ABS_DISTANCE": (-32768, 32767, 0, 10),
"ABS_TILT_X": (-32768, 32767, 0, 10),
"ABS_TILT_Y": (-32768, 32767, 0, 10),
},
# Buttons
{
"BTN_TR2": "button_options",
"BTN_MODE": "button_ps",
"BTN_TL2": "button_share",
"BTN_B": "button_cross",
"BTN_C": "button_circle",
"BTN_A": "button_square",
"BTN_X": "button_triangle",
"BTN_Y": "button_l1",
"BTN_Z": "button_r1",
"BTN_TL": "button_l2",
"BTN_TR": "button_r2",
"BTN_SELECT": "button_l3",
"BTN_START": "button_r3",
"BTN_THUMBL": "button_trackpad"
},
# Hats
{
"ABS_HAT0X": ("dpad_left", "dpad_right"),
"ABS_HAT0Y": ("dpad_up", "dpad_down")
}
)
#Emulate xboxdrv's button assignments.
create_mapping(
"xboxdrv", "Xbox Gamepad (userspace driver)",
# Bus type, vendor, product, version
0, 0, 0, 0,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_RX": "right_analog_x",
"ABS_RY": "right_analog_y",
"ABS_BRAKE": "l2_analog",
"ABS_GAS": "r2_analog"
},
# Axes settings
{},
#Buttons
{
"BTN_START": "button_options",
"BTN_MODE": "button_ps",
"BTN_SELECT": "button_share",
"BTN_A": "button_cross",
"BTN_B": "button_circle",
"BTN_X": "button_square",
"BTN_Y": "button_triangle",
"BTN_TL": "button_l1",
"BTN_TR": "button_r1",
"BTN_THUMBL": "button_l3",
"BTN_THUMBR": "button_r3"
},
# Hats
{
"ABS_HAT0X": ("dpad_left", "dpad_right"),
"ABS_HAT0Y": ("dpad_up", "dpad_down")
}
)
# Emulate the way the kernel does Xbox 360/Xbone controllers with xpad
create_mapping(
"xpad", "Microsoft X-Box 360 pad",
# Bus type, vendor, product, version
ecodes.BUS_USB, 1118, 654, 272,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_RX": "right_analog_x",
"ABS_RY": "right_analog_y",
"ABS_Z": "l2_analog",
"ABS_RZ": "r2_analog"
},
# Axes settings
{},
#Buttons
{
"BTN_START": "button_options",
"BTN_MODE": "button_ps",
"BTN_SELECT": "button_share",
"BTN_A": "button_cross",
"BTN_B": "button_circle",
"BTN_X": "button_square",
"BTN_Y": "button_triangle",
"BTN_TL": "button_l1",
"BTN_TR": "button_r1",
"BTN_THUMBL": "button_l3",
"BTN_THUMBR": "button_r3"
},
# Hats
{
"ABS_HAT0X": ("dpad_left", "dpad_right"),
"ABS_HAT0Y": ("dpad_up", "dpad_down")
}
)
#Emulate the way the kernel does Xbox 360 Wireless controllers with xpad
create_mapping(
"xpad_wireless", "Xbox 360 Wireless Receiver",
# Bus type, vendor, product, version
ecodes.BUS_USB, 1118, 1817, 256,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_RX": "right_analog_x",
"ABS_RY": "right_analog_y",
"ABS_Z": "l2_analog",
"ABS_RZ": "r2_analog"
},
# Axes settings
{},
#Buttons
{
"BTN_START": "button_options",
"BTN_MODE": "button_ps",
"BTN_SELECT": "button_share",
"BTN_A": "button_cross",
"BTN_B": "button_circle",
"BTN_X": "button_square",
"BTN_Y": "button_triangle",
"BTN_TL": "button_l1",
"BTN_TR": "button_r1",
"BTN_THUMBL": "button_l3",
"BTN_THUMBR": "button_r3",
"BTN_TRIGGER_HAPPY1": "dpad_left",
"BTN_TRIGGER_HAPPY2": "dpad_right",
"BTN_TRIGGER_HAPPY3": "dpad_up",
"BTN_TRIGGER_HAPPY4": "dpad_down",
},
)
#Emulate a multi-touch trackpad
create_mapping(
"mouse", "DualShock4 Mouse Emulation",
buttons={
"BTN_LEFT": "button_trackpad",
"BTN_TOOL_FINGER" : "trackpad_touch0_active",
"BTN_TOOL_DOUBLETAP" : "trackpad_touch1_active"
},
mouse={
"REL_X": "trackpad_touch0_x",
"REL_Y": "trackpad_touch0_y",
"REL_MT_X" : "trackpad_touch1_x",
"REL_MT_Y" : "trackpad_touch1_y"
},
)
class UInputDevice(object):
def __init__(self, layout):
self.joystick_dev = None
self.evdev_dev = None
self.ignored_buttons = set()
self.create_device(layout)
self._write_cache = {}
self._scroll_details = {}
def create_device(self, layout):
"""Creates a uinput device using the specified layout."""
events = {ecodes.EV_ABS: [], ecodes.EV_KEY: [],
ecodes.EV_REL: []}
# Joystick device
if layout.axes or layout.buttons or layout.hats:
self.joystick_dev = next_joystick_device()
for name in layout.axes:
params = layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)
events[ecodes.EV_ABS].append((name, params))
for name in layout.hats:
params = (-1, 1, 0, 0)
events[ecodes.EV_ABS].append((name, params))
for name in layout.buttons:
events[ecodes.EV_KEY].append(name)
if layout.mouse:
self.mouse_pos = {}
self.mouse_rel = {}
self.mouse_analog_sensitivity = float(
layout.mouse_options.get("MOUSE_SENSITIVITY",
DEFAULT_MOUSE_SENSITIVTY)
)
self.mouse_analog_deadzone = int(
layout.mouse_options.get("MOUSE_DEADZONE",
DEFAULT_MOUSE_DEADZONE)
)
self.scroll_repeat_delay = float(
layout.mouse_options.get("MOUSE_SCROLL_REPEAT_DELAY",
DEFAULT_SCROLL_REPEAT_DELAY)
)
self.scroll_delay = float(
layout.mouse_options.get("MOUSE_SCROLL_DELAY",
DEFAULT_SCROLL_DELAY)
)
for name in layout.mouse:
if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):
if ecodes.REL_WHEEL not in events[ecodes.EV_REL]:
# This ensures that scroll wheel events can work
events[ecodes.EV_REL].append(ecodes.REL_WHEEL)
else:
events[ecodes.EV_REL].append(name)
self.mouse_rel[name] = 0.0
self.device = UInput(name=layout.name, events=events,
bustype=layout.bustype, vendor=layout.vendor,
product=layout.product, version=layout.version)
self.layout = layout
def write_event(self, etype, code, value):
"""Writes a event to the device, if it has changed."""
last_value = self._write_cache.get(code)
if last_value != value:
self.device.write(etype, code, value)
self._write_cache[code] = value
def emit(self, report):
"""Writes axes, buttons and hats with values from the report to
the device."""
for name, attr in self.layout.axes.items():
value = getattr(report, attr)
self.write_event(ecodes.EV_ABS, name, value)
for name, attr in self.layout.buttons.items():
attr, modifier = attr
if attr in self.ignored_buttons:
value = False
else:
value = getattr(report, attr)
if modifier and "analog" in attr:
if modifier == "+":
value = value > (128 + DEFAULT_A2D_DEADZONE)
elif modifier == "-":
value = value < (128 - DEFAULT_A2D_DEADZONE)
self.write_event(ecodes.EV_KEY, name, value)
for name, attr in self.layout.hats.items():
if getattr(report, attr[0]):
value = -1
elif getattr(report, attr[1]):
value = 1
else:
value = 0
self.write_event(ecodes.EV_ABS, name, value)
self.device.syn()
def emit_reset(self):
"""Resets the device to a blank state."""
for name in self.layout.axes:
params = self.layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)
self.write_event(ecodes.EV_ABS, name, int(sum(params[:2]) / 2))
for name in self.layout.buttons:
self.write_event(ecodes.EV_KEY, name, False)
for name in self.layout.hats:
self.write_event(ecodes.EV_ABS, name, 0)
self.device.syn()
def emit_mouse(self, report):
"""Calculates relative mouse values from a report and writes them."""
for name, attr in self.layout.mouse.items():
if attr.startswith("trackpad_touch"):
active_attr = attr[:16] + "active"
if not getattr(report, active_attr):
self.mouse_pos.pop(name, None)
continue
pos = getattr(report, attr)
if name not in self.mouse_pos:
self.mouse_pos[name] = pos
sensitivity = 0.5
self.mouse_rel[name] += (pos - self.mouse_pos[name]) * sensitivity
self.mouse_pos[name] = pos
elif "analog" in attr:
pos = getattr(report, attr)
if (pos > (128 + self.mouse_analog_deadzone)
or pos < (128 - self.mouse_analog_deadzone)):
accel = (pos - 128) / 10
else:
continue
sensitivity = self.mouse_analog_sensitivity
self.mouse_rel[name] += accel * sensitivity
# Emulate mouse wheel (needs special handling)
if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):
ecode = ecodes.REL_WHEEL # The real event we need to emit
write = False
if getattr(report, attr):
self._scroll_details['direction'] = name
now = time.time()
last_write = self._scroll_details.get('last_write')
if not last_write:
# No delay for the first button press for fast feedback
write = True
self._scroll_details['count'] = 0
if name == ecodes.REL_WHEELUP:
value = 1
elif name == ecodes.REL_WHEELDOWN:
value = -1
if last_write:
# Delay at least one cycle before continual scrolling
if self._scroll_details['count'] > 1:
if now - last_write > self.scroll_delay:
write = True
elif now - last_write > self.scroll_repeat_delay:
write = True
if write:
self.device.write(ecodes.EV_REL, ecode, value)
self._scroll_details['last_write'] = now
self._scroll_details['count'] += 1
continue # No need to proceed further
else:
# Reset so you can quickly tap the button to scroll
if self._scroll_details.get('direction') == name:
self._scroll_details['last_write'] = 0
self._scroll_details['count'] = 0
rel = int(self.mouse_rel[name])
self.mouse_rel[name] = self.mouse_rel[name] - rel
self.device.write(ecodes.EV_REL, name, rel)
self.device.syn()
def create_uinput_device(mapping):
"""Creates a uinput device."""
if mapping not in _mappings:
raise DeviceError("Unknown device mapping: {0}".format(mapping))
try:
mapping = _mappings[mapping]
device = UInputDevice(mapping)
except UInputError as err:
raise DeviceError(err)
return device
def parse_uinput_mapping(name, mapping):
"""Parses a dict of mapping options."""
axes, buttons, mouse, mouse_options = {}, {}, {}, {}
description = "ds4drv custom mapping ({0})".format(name)
for key, attr in mapping.items():
key = key.upper()
if key.startswith("BTN_") or key.startswith("KEY_"):
buttons[key] = attr
elif key.startswith("ABS_"):
axes[key] = attr
elif key.startswith("REL_"):
mouse[key] = attr
elif key.startswith("MOUSE_"):
mouse_options[key] = attr
create_mapping(name, description, axes=axes, buttons=buttons,
mouse=mouse, mouse_options=mouse_options)
def next_joystick_device():
"""Finds the next available js device name."""
for i in range(100):
dev = "/dev/input/js{0}".format(i)
if not os.path.exists(dev):
return dev
Fixed multitouch
import os.path
import time
from collections import namedtuple
from evdev import UInput, UInputError, ecodes
from .exceptions import DeviceError
BUTTON_MODIFIERS = ("+", "-")
DEFAULT_A2D_DEADZONE = 50
DEFAULT_AXIS_OPTIONS = (0, 255, 0, 5)
DEFAULT_MOUSE_SENSITIVTY = 0.6
DEFAULT_MOUSE_DEADZONE = 5
DEFAULT_SCROLL_REPEAT_DELAY = .250 # Seconds to wait before continual scrolling
DEFAULT_SCROLL_DELAY = .035 # Seconds to wait between scroll events
UInputMapping = namedtuple("UInputMapping",
"name bustype vendor product version "
"axes axes_options buttons hats keys mouse "
"mouse_options")
_mappings = {}
# Add our simulated mousewheel codes
ecodes.REL_WHEELUP = 13 # Unique value for this lib
ecodes.REL_WHEELDOWN = 14 # Ditto
def parse_button(attr):
if attr[0] in BUTTON_MODIFIERS:
modifier = attr[0]
attr = attr[1:]
else:
modifier = None
return (attr, modifier)
def create_mapping(name, description, bustype=0, vendor=0, product=0,
version=0, axes={}, axes_options={}, buttons={},
hats={}, keys={}, mouse={}, mouse_options={}):
axes = {getattr(ecodes, k): v for k,v in axes.items()}
axes_options = {getattr(ecodes, k): v for k,v in axes_options.items()}
buttons = {getattr(ecodes, k): parse_button(v) for k,v in buttons.items()}
hats = {getattr(ecodes, k): v for k,v in hats.items()}
mouse = {getattr(ecodes, k): v for k,v in mouse.items()}
mapping = UInputMapping(description, bustype, vendor, product, version,
axes, axes_options, buttons, hats, keys, mouse,
mouse_options)
_mappings[name] = mapping
# Pre-configued mappings
# Emulate it the (mostly) correct way
create_mapping(
"ds4drv", "Sony Computer Entertainment Wireless Controller (Userspace)",
# Bus type, vendor, product, version
ecodes.BUS_USB, 1356, 1476, 273,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_RX": "right_analog_x",
"ABS_RY": "right_analog_y",
"ABS_Z": "l2_analog",
"ABS_RZ": "r2_analog",
"ABS_THROTTLE": "orientation_roll",
"ABS_RUDDER": "orientation_pitch",
"ABS_WHEEL": "orientation_yaw",
"ABS_DISTANCE": "motion_z",
"ABS_TILT_X": "motion_x",
"ABS_TILT_Y": "motion_y",
},
# Axes options
{
"ABS_THROTTLE": (-16385, 16384, 0, 0),
"ABS_RUDDER": (-16385, 16384, 0, 0),
"ABS_WHEEL": (-16385, 16384, 0, 0),
"ABS_DISTANCE": (-32768, 32767, 0, 10),
"ABS_TILT_X": (-32768, 32767, 0, 10),
"ABS_TILT_Y": (-32768, 32767, 0, 10),
},
# Buttons
{
"BTN_START": "button_options",
"BTN_MODE": "button_ps",
"BTN_SELECT": "button_share",
"BTN_A": "button_cross",
"BTN_B": "button_circle",
"BTN_X": "button_square",
"BTN_Y": "button_triangle",
"BTN_TL": "button_l1",
"BTN_TR": "button_r1",
"BTN_THUMBL": "button_l3",
"BTN_THUMBR": "button_r3",
},
# Hats
{
"ABS_HAT0X": ("dpad_left", "dpad_right"),
"ABS_HAT0Y": ("dpad_up", "dpad_down")
},
)
#Emulate the way the kernel does it
create_mapping(
"ds4", "Sony Computer Entertainment Wireless Controller",
# Bus type, vendor, product, version
ecodes.BUS_USB, 1356, 1476, 273,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_Z": "right_analog_x",
"ABS_RZ": "right_analog_y",
"ABS_RX": "l2_analog",
"ABS_RY": "r2_analog",
"ABS_THROTTLE": "orientation_roll",
"ABS_RUDDER": "orientation_pitch",
"ABS_WHEEL": "orientation_yaw",
"ABS_DISTANCE": "motion_z",
"ABS_TILT_X": "motion_x",
"ABS_TILT_Y": "motion_y",
},
# Axes options
{
"ABS_THROTTLE": (-16385, 16384, 0, 0),
"ABS_RUDDER": (-16385, 16384, 0, 0),
"ABS_WHEEL": (-16385, 16384, 0, 0),
"ABS_DISTANCE": (-32768, 32767, 0, 10),
"ABS_TILT_X": (-32768, 32767, 0, 10),
"ABS_TILT_Y": (-32768, 32767, 0, 10),
},
# Buttons
{
"BTN_TR2": "button_options",
"BTN_MODE": "button_ps",
"BTN_TL2": "button_share",
"BTN_B": "button_cross",
"BTN_C": "button_circle",
"BTN_A": "button_square",
"BTN_X": "button_triangle",
"BTN_Y": "button_l1",
"BTN_Z": "button_r1",
"BTN_TL": "button_l2",
"BTN_TR": "button_r2",
"BTN_SELECT": "button_l3",
"BTN_START": "button_r3",
"BTN_THUMBL": "button_trackpad"
},
# Hats
{
"ABS_HAT0X": ("dpad_left", "dpad_right"),
"ABS_HAT0Y": ("dpad_up", "dpad_down")
}
)
#Emulate xboxdrv's button assignments.
create_mapping(
"xboxdrv", "Xbox Gamepad (userspace driver)",
# Bus type, vendor, product, version
0, 0, 0, 0,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_RX": "right_analog_x",
"ABS_RY": "right_analog_y",
"ABS_BRAKE": "l2_analog",
"ABS_GAS": "r2_analog"
},
# Axes settings
{},
#Buttons
{
"BTN_START": "button_options",
"BTN_MODE": "button_ps",
"BTN_SELECT": "button_share",
"BTN_A": "button_cross",
"BTN_B": "button_circle",
"BTN_X": "button_square",
"BTN_Y": "button_triangle",
"BTN_TL": "button_l1",
"BTN_TR": "button_r1",
"BTN_THUMBL": "button_l3",
"BTN_THUMBR": "button_r3"
},
# Hats
{
"ABS_HAT0X": ("dpad_left", "dpad_right"),
"ABS_HAT0Y": ("dpad_up", "dpad_down")
}
)
# Emulate the way the kernel does Xbox 360/Xbone controllers with xpad
create_mapping(
"xpad", "Microsoft X-Box 360 pad",
# Bus type, vendor, product, version
ecodes.BUS_USB, 1118, 654, 272,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_RX": "right_analog_x",
"ABS_RY": "right_analog_y",
"ABS_Z": "l2_analog",
"ABS_RZ": "r2_analog"
},
# Axes settings
{},
#Buttons
{
"BTN_START": "button_options",
"BTN_MODE": "button_ps",
"BTN_SELECT": "button_share",
"BTN_A": "button_cross",
"BTN_B": "button_circle",
"BTN_X": "button_square",
"BTN_Y": "button_triangle",
"BTN_TL": "button_l1",
"BTN_TR": "button_r1",
"BTN_THUMBL": "button_l3",
"BTN_THUMBR": "button_r3"
},
# Hats
{
"ABS_HAT0X": ("dpad_left", "dpad_right"),
"ABS_HAT0Y": ("dpad_up", "dpad_down")
}
)
#Emulate the way the kernel does Xbox 360 Wireless controllers with xpad
create_mapping(
"xpad_wireless", "Xbox 360 Wireless Receiver",
# Bus type, vendor, product, version
ecodes.BUS_USB, 1118, 1817, 256,
# Axes
{
"ABS_X": "left_analog_x",
"ABS_Y": "left_analog_y",
"ABS_RX": "right_analog_x",
"ABS_RY": "right_analog_y",
"ABS_Z": "l2_analog",
"ABS_RZ": "r2_analog"
},
# Axes settings
{},
#Buttons
{
"BTN_START": "button_options",
"BTN_MODE": "button_ps",
"BTN_SELECT": "button_share",
"BTN_A": "button_cross",
"BTN_B": "button_circle",
"BTN_X": "button_square",
"BTN_Y": "button_triangle",
"BTN_TL": "button_l1",
"BTN_TR": "button_r1",
"BTN_THUMBL": "button_l3",
"BTN_THUMBR": "button_r3",
"BTN_TRIGGER_HAPPY1": "dpad_left",
"BTN_TRIGGER_HAPPY2": "dpad_right",
"BTN_TRIGGER_HAPPY3": "dpad_up",
"BTN_TRIGGER_HAPPY4": "dpad_down",
},
)
#Emulate a multi-touch trackpad
create_mapping(
"mouse", "DualShock4 Mouse Emulation",
buttons={
"BTN_LEFT": "button_trackpad",
"BTN_TOOL_FINGER" : "trackpad_touch0_active",
"BTN_TOOL_DOUBLETAP" : "trackpad_touch1_active"
},
mouse={
"REL_X" : "trackpad_touch0_x",
"REL_Y" : "trackpad_touch0_y",
"REL_RX" : "trackpad_touch1_x",
"REL_RY" : "trackpad_touch1_y"
},
)
class UInputDevice(object):
def __init__(self, layout):
self.joystick_dev = None
self.evdev_dev = None
self.ignored_buttons = set()
self.create_device(layout)
self._write_cache = {}
self._scroll_details = {}
def create_device(self, layout):
"""Creates a uinput device using the specified layout."""
events = {ecodes.EV_ABS: [], ecodes.EV_KEY: [],
ecodes.EV_REL: []}
# Joystick device
if layout.axes or layout.buttons or layout.hats:
self.joystick_dev = next_joystick_device()
for name in layout.axes:
params = layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)
events[ecodes.EV_ABS].append((name, params))
for name in layout.hats:
params = (-1, 1, 0, 0)
events[ecodes.EV_ABS].append((name, params))
for name in layout.buttons:
events[ecodes.EV_KEY].append(name)
if layout.mouse:
self.mouse_pos = {}
self.mouse_rel = {}
self.mouse_analog_sensitivity = float(
layout.mouse_options.get("MOUSE_SENSITIVITY",
DEFAULT_MOUSE_SENSITIVTY)
)
self.mouse_analog_deadzone = int(
layout.mouse_options.get("MOUSE_DEADZONE",
DEFAULT_MOUSE_DEADZONE)
)
self.scroll_repeat_delay = float(
layout.mouse_options.get("MOUSE_SCROLL_REPEAT_DELAY",
DEFAULT_SCROLL_REPEAT_DELAY)
)
self.scroll_delay = float(
layout.mouse_options.get("MOUSE_SCROLL_DELAY",
DEFAULT_SCROLL_DELAY)
)
for name in layout.mouse:
if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):
if ecodes.REL_WHEEL not in events[ecodes.EV_REL]:
# This ensures that scroll wheel events can work
events[ecodes.EV_REL].append(ecodes.REL_WHEEL)
else:
events[ecodes.EV_REL].append(name)
self.mouse_rel[name] = 0.0
self.device = UInput(name=layout.name, events=events,
bustype=layout.bustype, vendor=layout.vendor,
product=layout.product, version=layout.version)
self.layout = layout
def write_event(self, etype, code, value):
"""Writes a event to the device, if it has changed."""
last_value = self._write_cache.get(code)
if last_value != value:
self.device.write(etype, code, value)
self._write_cache[code] = value
def emit(self, report):
"""Writes axes, buttons and hats with values from the report to
the device."""
for name, attr in self.layout.axes.items():
value = getattr(report, attr)
self.write_event(ecodes.EV_ABS, name, value)
for name, attr in self.layout.buttons.items():
attr, modifier = attr
if attr in self.ignored_buttons:
value = False
else:
value = getattr(report, attr)
if modifier and "analog" in attr:
if modifier == "+":
value = value > (128 + DEFAULT_A2D_DEADZONE)
elif modifier == "-":
value = value < (128 - DEFAULT_A2D_DEADZONE)
self.write_event(ecodes.EV_KEY, name, value)
for name, attr in self.layout.hats.items():
if getattr(report, attr[0]):
value = -1
elif getattr(report, attr[1]):
value = 1
else:
value = 0
self.write_event(ecodes.EV_ABS, name, value)
self.device.syn()
def emit_reset(self):
"""Resets the device to a blank state."""
for name in self.layout.axes:
params = self.layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)
self.write_event(ecodes.EV_ABS, name, int(sum(params[:2]) / 2))
for name in self.layout.buttons:
self.write_event(ecodes.EV_KEY, name, False)
for name in self.layout.hats:
self.write_event(ecodes.EV_ABS, name, 0)
self.device.syn()
def emit_mouse(self, report):
"""Calculates relative mouse values from a report and writes them."""
for name, attr in self.layout.mouse.items():
if attr.startswith("trackpad_touch"):
active_attr = attr[:16] + "active"
if not getattr(report, active_attr):
self.mouse_pos.pop(name, None)
continue
pos = getattr(report, attr)
if name not in self.mouse_pos:
self.mouse_pos[name] = pos
sensitivity = 0.5
self.mouse_rel[name] += (pos - self.mouse_pos[name]) * sensitivity
self.mouse_pos[name] = pos
elif "analog" in attr:
pos = getattr(report, attr)
if (pos > (128 + self.mouse_analog_deadzone)
or pos < (128 - self.mouse_analog_deadzone)):
accel = (pos - 128) / 10
else:
continue
sensitivity = self.mouse_analog_sensitivity
self.mouse_rel[name] += accel * sensitivity
# Emulate mouse wheel (needs special handling)
if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):
ecode = ecodes.REL_WHEEL # The real event we need to emit
write = False
if getattr(report, attr):
self._scroll_details['direction'] = name
now = time.time()
last_write = self._scroll_details.get('last_write')
if not last_write:
# No delay for the first button press for fast feedback
write = True
self._scroll_details['count'] = 0
if name == ecodes.REL_WHEELUP:
value = 1
elif name == ecodes.REL_WHEELDOWN:
value = -1
if last_write:
# Delay at least one cycle before continual scrolling
if self._scroll_details['count'] > 1:
if now - last_write > self.scroll_delay:
write = True
elif now - last_write > self.scroll_repeat_delay:
write = True
if write:
self.device.write(ecodes.EV_REL, ecode, value)
self._scroll_details['last_write'] = now
self._scroll_details['count'] += 1
continue # No need to proceed further
else:
# Reset so you can quickly tap the button to scroll
if self._scroll_details.get('direction') == name:
self._scroll_details['last_write'] = 0
self._scroll_details['count'] = 0
rel = int(self.mouse_rel[name])
self.mouse_rel[name] = self.mouse_rel[name] - rel
self.device.write(ecodes.EV_REL, name, rel)
self.device.syn()
def create_uinput_device(mapping):
"""Creates a uinput device."""
if mapping not in _mappings:
raise DeviceError("Unknown device mapping: {0}".format(mapping))
try:
mapping = _mappings[mapping]
device = UInputDevice(mapping)
except UInputError as err:
raise DeviceError(err)
return device
def parse_uinput_mapping(name, mapping):
"""Parses a dict of mapping options."""
axes, buttons, mouse, mouse_options = {}, {}, {}, {}
description = "ds4drv custom mapping ({0})".format(name)
for key, attr in mapping.items():
key = key.upper()
if key.startswith("BTN_") or key.startswith("KEY_"):
buttons[key] = attr
elif key.startswith("ABS_"):
axes[key] = attr
elif key.startswith("REL_"):
mouse[key] = attr
elif key.startswith("MOUSE_"):
mouse_options[key] = attr
create_mapping(name, description, axes=axes, buttons=buttons,
mouse=mouse, mouse_options=mouse_options)
def next_joystick_device():
"""Finds the next available js device name."""
for i in range(100):
dev = "/dev/input/js{0}".format(i)
if not os.path.exists(dev):
return dev
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.