repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ryano144/intellij-community | python/lib/Lib/site-packages/django/db/models/manager.py | 306 | 7872 | from django.utils import copycompat as copy
from django.conf import settings
from django.db import router
from django.db.models.query import QuerySet, EmptyQuerySet, insert_query, RawQuerySet
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
def ensure_default_manager(sender, **kwargs):
"""
Ensures that a Model subclass contains a default manager and sets the
_default_manager attribute on the class. Also sets up the _base_manager
points to a plain Manager instance (which could be the same as
_default_manager if it's not a subclass of Manager).
"""
cls = sender
if cls._meta.abstract:
return
if not getattr(cls, '_default_manager', None):
# Create the default manager, if needed.
try:
cls._meta.get_field('objects')
raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__)
except FieldDoesNotExist:
pass
cls.add_to_class('objects', Manager())
cls._base_manager = cls.objects
elif not getattr(cls, '_base_manager', None):
default_mgr = cls._default_manager.__class__
if (default_mgr is Manager or
getattr(default_mgr, "use_for_related_fields", False)):
cls._base_manager = cls._default_manager
else:
# Default manager isn't a plain Manager class, or a suitable
# replacement, so we walk up the base class hierarchy until we hit
# something appropriate.
for base_class in default_mgr.mro()[1:]:
if (base_class is Manager or
getattr(base_class, "use_for_related_fields", False)):
cls.add_to_class('_base_manager', base_class())
return
raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.")
signals.class_prepared.connect(ensure_default_manager)
class Manager(object):
# Tracks each time a Manager instance is created. Used to retain order.
creation_counter = 0
def __init__(self):
super(Manager, self).__init__()
self._set_creation_counter()
self.model = None
self._inherited = False
self._db = None
def contribute_to_class(self, model, name):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
setattr(model, name, ManagerDescriptor(self))
if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
model._default_manager = self
if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
model._meta.abstract_managers.append((self.creation_counter, name,
self))
else:
model._meta.concrete_managers.append((self.creation_counter, name,
self))
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
class-level copy.
"""
self.creation_counter = Manager.creation_counter
Manager.creation_counter += 1
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
assert issubclass(model, self.model)
mgr = copy.copy(self)
mgr._set_creation_counter()
mgr.model = model
mgr._inherited = True
return mgr
def db_manager(self, using):
obj = copy.copy(self)
obj._db = using
return obj
@property
def db(self):
return self._db or router.db_for_read(self.model)
#######################
# PROXIES TO QUERYSET #
#######################
def get_empty_query_set(self):
return EmptyQuerySet(self.model, using=self._db)
def get_query_set(self):
"""Returns a new QuerySet object. Subclasses can override this method
to easily customize the behavior of the Manager.
"""
return QuerySet(self.model, using=self._db)
def none(self):
return self.get_empty_query_set()
def all(self):
return self.get_query_set()
def count(self):
return self.get_query_set().count()
def dates(self, *args, **kwargs):
return self.get_query_set().dates(*args, **kwargs)
def distinct(self, *args, **kwargs):
return self.get_query_set().distinct(*args, **kwargs)
def extra(self, *args, **kwargs):
return self.get_query_set().extra(*args, **kwargs)
def get(self, *args, **kwargs):
return self.get_query_set().get(*args, **kwargs)
def get_or_create(self, **kwargs):
return self.get_query_set().get_or_create(**kwargs)
def create(self, **kwargs):
return self.get_query_set().create(**kwargs)
def filter(self, *args, **kwargs):
return self.get_query_set().filter(*args, **kwargs)
def aggregate(self, *args, **kwargs):
return self.get_query_set().aggregate(*args, **kwargs)
def annotate(self, *args, **kwargs):
return self.get_query_set().annotate(*args, **kwargs)
def complex_filter(self, *args, **kwargs):
return self.get_query_set().complex_filter(*args, **kwargs)
def exclude(self, *args, **kwargs):
return self.get_query_set().exclude(*args, **kwargs)
def in_bulk(self, *args, **kwargs):
return self.get_query_set().in_bulk(*args, **kwargs)
def iterator(self, *args, **kwargs):
return self.get_query_set().iterator(*args, **kwargs)
def latest(self, *args, **kwargs):
return self.get_query_set().latest(*args, **kwargs)
def order_by(self, *args, **kwargs):
return self.get_query_set().order_by(*args, **kwargs)
def select_related(self, *args, **kwargs):
return self.get_query_set().select_related(*args, **kwargs)
def values(self, *args, **kwargs):
return self.get_query_set().values(*args, **kwargs)
def values_list(self, *args, **kwargs):
return self.get_query_set().values_list(*args, **kwargs)
def update(self, *args, **kwargs):
return self.get_query_set().update(*args, **kwargs)
def reverse(self, *args, **kwargs):
return self.get_query_set().reverse(*args, **kwargs)
def defer(self, *args, **kwargs):
return self.get_query_set().defer(*args, **kwargs)
def only(self, *args, **kwargs):
return self.get_query_set().only(*args, **kwargs)
def using(self, *args, **kwargs):
return self.get_query_set().using(*args, **kwargs)
def exists(self, *args, **kwargs):
return self.get_query_set().exists(*args, **kwargs)
def _insert(self, values, **kwargs):
return insert_query(self.model, values, **kwargs)
def _update(self, values, **kwargs):
return self.get_query_set()._update(values, **kwargs)
def raw(self, raw_query, params=None, *args, **kwargs):
return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)
class ManagerDescriptor(object):
# This class ensures managers aren't accessible via model instances.
# For example, Poll.objects works, but poll_obj.objects raises AttributeError.
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, type=None):
if instance != None:
raise AttributeError("Manager isn't accessible via %s instances" % type.__name__)
return self.manager
class EmptyManager(Manager):
def get_query_set(self):
return self.get_empty_query_set()
| apache-2.0 |
pathway27/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/test_stream.py | 446 | 6264 | from __future__ import absolute_import, division, unicode_literals
from . import support # flake8: noqa
import unittest
import codecs
from io import BytesIO
from six.moves import http_client
from html5lib.inputstream import (BufferedStream, HTMLInputStream,
HTMLUnicodeInputStream, HTMLBinaryInputStream)
class BufferedStreamTest(unittest.TestCase):
def test_basic(self):
s = b"abc"
fp = BufferedStream(BytesIO(s))
read = fp.read(10)
assert read == s
def test_read_length(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert read1 == b"a"
read2 = fp.read(2)
assert read2 == b"bc"
read3 = fp.read(3)
assert read3 == b"def"
read4 = fp.read(4)
assert read4 == b""
def test_tell(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert fp.tell() == 1
read2 = fp.read(2)
assert fp.tell() == 3
read3 = fp.read(3)
assert fp.tell() == 6
read4 = fp.read(4)
assert fp.tell() == 6
def test_seek(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert read1 == b"a"
fp.seek(0)
read2 = fp.read(1)
assert read2 == b"a"
read3 = fp.read(2)
assert read3 == b"bc"
fp.seek(2)
read4 = fp.read(2)
assert read4 == b"cd"
fp.seek(4)
read5 = fp.read(2)
assert read5 == b"ef"
def test_seek_tell(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert fp.tell() == 1
fp.seek(0)
read2 = fp.read(1)
assert fp.tell() == 1
read3 = fp.read(2)
assert fp.tell() == 3
fp.seek(2)
read4 = fp.read(2)
assert fp.tell() == 4
fp.seek(4)
read5 = fp.read(2)
assert fp.tell() == 6
class HTMLUnicodeInputStreamShortChunk(HTMLUnicodeInputStream):
_defaultChunkSize = 2
class HTMLBinaryInputStreamShortChunk(HTMLBinaryInputStream):
_defaultChunkSize = 2
class HTMLInputStreamTest(unittest.TestCase):
def test_char_ascii(self):
stream = HTMLInputStream(b"'", encoding='ascii')
self.assertEqual(stream.charEncoding[0], 'ascii')
self.assertEqual(stream.char(), "'")
def test_char_utf8(self):
stream = HTMLInputStream('\u2018'.encode('utf-8'), encoding='utf-8')
self.assertEqual(stream.charEncoding[0], 'utf-8')
self.assertEqual(stream.char(), '\u2018')
def test_char_win1252(self):
stream = HTMLInputStream("\xa9\xf1\u2019".encode('windows-1252'))
self.assertEqual(stream.charEncoding[0], 'windows-1252')
self.assertEqual(stream.char(), "\xa9")
self.assertEqual(stream.char(), "\xf1")
self.assertEqual(stream.char(), "\u2019")
def test_bom(self):
stream = HTMLInputStream(codecs.BOM_UTF8 + b"'")
self.assertEqual(stream.charEncoding[0], 'utf-8')
self.assertEqual(stream.char(), "'")
def test_utf_16(self):
stream = HTMLInputStream((' ' * 1025).encode('utf-16'))
self.assertTrue(stream.charEncoding[0] in ['utf-16-le', 'utf-16-be'], stream.charEncoding)
self.assertEqual(len(stream.charsUntil(' ', True)), 1025)
def test_newlines(self):
stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\r\nccc\rddddxe")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.charsUntil('c'), "a\nbb\n")
self.assertEqual(stream.position(), (3, 0))
self.assertEqual(stream.charsUntil('x'), "ccc\ndddd")
self.assertEqual(stream.position(), (4, 4))
self.assertEqual(stream.charsUntil('e'), "x")
self.assertEqual(stream.position(), (4, 5))
def test_newlines2(self):
size = HTMLUnicodeInputStream._defaultChunkSize
stream = HTMLInputStream("\r" * size + "\n")
self.assertEqual(stream.charsUntil('x'), "\n" * size)
def test_position(self):
stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\nccc\nddde\nf\ngh")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.charsUntil('c'), "a\nbb\n")
self.assertEqual(stream.position(), (3, 0))
stream.unget("\n")
self.assertEqual(stream.position(), (2, 2))
self.assertEqual(stream.charsUntil('c'), "\n")
self.assertEqual(stream.position(), (3, 0))
stream.unget("\n")
self.assertEqual(stream.position(), (2, 2))
self.assertEqual(stream.char(), "\n")
self.assertEqual(stream.position(), (3, 0))
self.assertEqual(stream.charsUntil('e'), "ccc\nddd")
self.assertEqual(stream.position(), (4, 3))
self.assertEqual(stream.charsUntil('h'), "e\nf\ng")
self.assertEqual(stream.position(), (6, 1))
def test_position2(self):
stream = HTMLUnicodeInputStreamShortChunk("abc\nd")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.char(), "a")
self.assertEqual(stream.position(), (1, 1))
self.assertEqual(stream.char(), "b")
self.assertEqual(stream.position(), (1, 2))
self.assertEqual(stream.char(), "c")
self.assertEqual(stream.position(), (1, 3))
self.assertEqual(stream.char(), "\n")
self.assertEqual(stream.position(), (2, 0))
self.assertEqual(stream.char(), "d")
self.assertEqual(stream.position(), (2, 1))
def test_python_issue_20007(self):
"""
Make sure we have a work-around for Python bug #20007
http://bugs.python.org/issue20007
"""
class FakeSocket(object):
def makefile(self, _mode, _bufsize=None):
return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText")
source = http_client.HTTPResponse(FakeSocket())
source.begin()
stream = HTMLInputStream(source)
self.assertEqual(stream.charsUntil(" "), "Text")
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == '__main__':
main()
| mpl-2.0 |
LS80/script.libreelec.devupdater | resources/lib/milhouse.py | 5 | 3008 | #! /usr/bin/python
import re
import os
import urlparse
from bs4 import BeautifulSoup
import html2text
from . import builds, config
class MilhouseBuildLinkExtractor(builds.BuildLinkExtractor):
BUILD_RE = (r"{dist}-{arch}-(?:\d+\.\d+-|)"
r"Milhouse-(\d+)-(?:r|%23)(\d+[a-z]*)-g[0-9a-z]+\.tar(|\.bz2)")
class MilhouseBuildDetailsExtractor(builds.BuildDetailsExtractor):
"""Class for extracting the full build details for a Milhouse build.
from the release post on the Kodi forum.
"""
def get_text(self):
soup = BeautifulSoup(self._text(), 'html5lib')
pid = urlparse.parse_qs(urlparse.urlparse(self.url).query)['pid'][0]
post_div_id = "pid_{}".format(pid)
post = soup.find('div', 'post_body', id=post_div_id)
text_maker = html2text.HTML2Text()
text_maker.ignore_links = True
text_maker.ul_item_mark = '-'
text = text_maker.handle(unicode(post))
text = re.search(r"(Build Highlights:.*)", text, re.DOTALL).group(1)
text = re.sub(r"(Build Highlights:)", r"[B]\1[/B]", text)
text = re.sub(r"(Build Details:)", r"[B]\1[/B]", text)
return text
class MilhouseBuildInfoExtractor(builds.BuildInfoExtractor):
"""Class for creating a dictionary of BuildInfo objects for Milhouse builds
keyed on the build version."""
URL_FMT = "http://forum.kodi.tv/showthread.php?tid={}"
R = re.compile(r"#(\d{4}[a-z]?).*?\((.+)\)")
def _get_info(self, soup):
for post in soup('div', 'post_body', limit=3):
for ul in post('ul'):
for li in ul('li'):
m = self.R.match(li.get_text())
if m:
url = li.find('a', text="Release post")['href']
yield (m.group(1),
builds.BuildInfo(m.group(2),
MilhouseBuildDetailsExtractor(url)))
def get_info(self):
soup = BeautifulSoup(self._text(), 'html5lib')
return dict(self._get_info(soup))
@classmethod
def from_thread_id(cls, thread_id):
"""Create a Milhouse build info extractor from the thread id number."""
url = cls.URL_FMT.format(thread_id)
return cls(url)
def milhouse_build_info_extractors():
if config.arch.startswith("RPi"):
threads = [269814, 298461]
else:
threads = [269815, 298462]
for thread_id in threads:
yield MilhouseBuildInfoExtractor.from_thread_id(thread_id)
class MilhouseBuildsURL(builds.BuildsURL):
def __init__(self, subdir="master"):
self.subdir = subdir
url = "http://milhouse.libreelec.tv/builds/"
super(MilhouseBuildsURL, self).__init__(
url, os.path.join(subdir, config.arch.split('.')[0]),
MilhouseBuildLinkExtractor, list(milhouse_build_info_extractors()))
def __repr__(self):
return "{}('{}')".format(self.__class__.__name__, self.subdir)
| gpl-3.0 |
kyuupichan/electrumx | tests/server/test_notifications.py | 1 | 1422 | import pytest
from electrumx.server.controller import Notifications
@pytest.mark.asyncio
async def test_simple_mempool():
n = Notifications()
notified = []
async def notify(height, touched):
notified.append((height, touched))
await n.start(5, notify)
mtouched = {'a', 'b'}
btouched = {'b', 'c'}
await n.on_mempool(mtouched, 6)
assert notified == [(5, set())]
await n.on_block(btouched, 6)
assert notified == [(5, set()), (6, set.union(mtouched, btouched))]
@pytest.mark.asyncio
async def test_enter_mempool_quick_blocks_2():
n = Notifications()
notified = []
async def notify(height, touched):
notified.append((height, touched))
await n.start(5, notify)
# Suppose a gets in block 6 and blocks 7,8 found right after and
# the block processer processes them together.
await n.on_mempool({'a'}, 5)
assert notified == [(5, set()), (5, {'a'})]
# Mempool refreshes with daemon on block 6
await n.on_mempool({'a'}, 6)
assert notified == [(5, set()), (5, {'a'})]
# Blocks 6, 7 processed together
await n.on_block({'a', 'b'}, 7)
assert notified == [(5, set()), (5, {'a'})]
# Then block 8 processed
await n.on_block({'c'}, 8)
assert notified == [(5, set()), (5, {'a'})]
# Now mempool refreshes
await n.on_mempool(set(), 8)
assert notified == [(5, set()), (5, {'a'}), (8, {'a', 'b', 'c'})]
| mit |
tensorflow/tensorflow | tensorflow/python/keras/layers/kernelized.py | 6 | 11017 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes
"""Keras layers that implement explicit (approximate) kernel feature maps."""
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
_SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian']
@keras_export('keras.layers.experimental.RandomFourierFeatures')
class RandomFourierFeatures(base_layer.Layer):
r"""Layer that projects its inputs into a random feature space.
This layer implements a mapping from input space to a space with `output_dim`
dimensions, which approximates shift-invariant kernels. A kernel function
`K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for some function `k`.
Many popular Radial Basis Functions (RBF), including Gaussian and
Laplacian kernels, are shift-invariant.
The implementation of this layer is based on the following paper:
["Random Features for Large-Scale Kernel Machines"](
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
by Ali Rahimi and Ben Recht.
The distribution from which the parameters of the random features map (layer)
are sampled determines which shift-invariant kernel the layer approximates
(see paper for more details). You can use the distribution of your
choice. The layer supports out-of-the-box
approximation sof the following two RBF kernels:
- Gaussian: `K(x, y) == exp(- square(x - y) / (2 * square(scale)))`
- Laplacian: `K(x, y) = exp(-abs(x - y) / scale))`
**Note:** Unlike what is described in the paper and unlike what is used in
the Scikit-Learn implementation, the output of this layer does not apply
the `sqrt(2 / D)` normalization factor.
**Usage:** Typically, this layer is used to "kernelize" linear models by
applying a non-linear transformation (this layer) to the input features and
then training a linear model on top of the transformed features. Depending on
the loss function of the linear model, the composition of this layer and the
linear model results to models that are equivalent (up to approximation) to
kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),
kernel linear regression (for squared loss), etc.
Examples:
A kernel multinomial logistic regression model with Gaussian kernel for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10, activation='softmax'),
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['categorical_accuracy']
)
```
A quasi-SVM classifier for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10),
])
model.compile(
optimizer='adam',
loss='hinge',
metrics=['categorical_accuracy']
)
```
To use another kernel, just replace the layer creation line with:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer=<my_initializer>,
scale=...,
...)
```
Args:
output_dim: Positive integer, the dimension of the layer's output, i.e., the
number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the
random features map (and therefore the kernel approximated by the layer).
It can be either a string identifier or a Keras `Initializer` instance.
Currently only 'gaussian' and 'laplacian' are supported string
identifiers (case insensitive). Note that the kernel matrix is not
trainable.
scale: For Gaussian and Laplacian kernels, this corresponds to a scaling
factor of the corresponding kernel approximated by the layer (see concrete
definitions above). When provided, it should be a positive float. If None,
a default value is used: if the kernel initializer is set to "gaussian",
`scale` defaults to `sqrt(input_dim / 2)`, otherwise, it defaults to 1.0.
Both the approximation error of the kernel and the classification quality
are sensitive to this parameter. If `trainable` is set to `True`, this
parameter is learned end-to-end during training and the provided value
serves as the initial value.
**Note:** When features from this layer are fed to a linear model,
by making `scale` trainable, the resulting optimization problem is
no longer convex (even if the loss function used by the linear model
is convex).
trainable: Whether the scaling parameter of the layer should be trainable.
Defaults to `False`.
name: String, name to use for this layer.
"""
def __init__(self,
output_dim,
kernel_initializer='gaussian',
scale=None,
trainable=False,
name=None,
**kwargs):
if output_dim <= 0:
raise ValueError(
'`output_dim` should be a positive integer. Given: {}.'.format(
output_dim))
if isinstance(kernel_initializer, str):
if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'
.format(kernel_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
if scale is not None and scale <= 0.0:
raise ValueError('When provided, `scale` should be a positive float. '
'Given: {}.'.format(scale))
super(RandomFourierFeatures, self).__init__(
trainable=trainable, name=name, **kwargs)
self.output_dim = output_dim
self.kernel_initializer = kernel_initializer
self.scale = scale
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
# TODO(sibyl-vie3Poto): Allow higher dimension inputs. Currently the input is expected
# to have shape [batch_size, dimension].
if input_shape.rank != 2:
raise ValueError(
'The rank of the input tensor should be 2. Got {} instead.'.format(
input_shape.ndims))
if input_shape.dims[1].value is None:
raise ValueError(
'The last dimension of the inputs to `RandomFourierFeatures` '
'should be defined. Found `None`.')
self.input_spec = input_spec.InputSpec(
ndim=2, axes={1: input_shape.dims[1].value})
input_dim = input_shape.dims[1].value
kernel_initializer = _get_random_features_initializer(
self.kernel_initializer, shape=(input_dim, self.output_dim))
self.unscaled_kernel = self.add_weight(
name='unscaled_kernel',
shape=(input_dim, self.output_dim),
dtype=dtypes.float32,
initializer=kernel_initializer,
trainable=False)
self.bias = self.add_weight(
name='bias',
shape=(self.output_dim,),
dtype=dtypes.float32,
initializer=init_ops.random_uniform_initializer(
minval=0.0, maxval=2 * np.pi, dtype=dtypes.float32),
trainable=False)
if self.scale is None:
self.scale = _get_default_scale(self.kernel_initializer, input_dim)
self.kernel_scale = self.add_weight(
name='kernel_scale',
shape=(1,),
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(self.scale),
trainable=True,
constraint='NonNeg')
super(RandomFourierFeatures, self).build(input_shape)
def call(self, inputs):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs, dtype=self.dtype)
inputs = math_ops.cast(inputs, dtypes.float32)
kernel = (1.0 / self.kernel_scale) * self.unscaled_kernel
outputs = gen_math_ops.MatMul(a=inputs, b=kernel)
outputs = nn.bias_add(outputs, self.bias)
return gen_math_ops.cos(outputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank(2)
if input_shape.dims[-1].value is None:
raise ValueError(
'The innermost dimension of input shape must be defined. Given: %s' %
input_shape)
return input_shape[:-1].concatenate(self.output_dim)
def get_config(self):
kernel_initializer = self.kernel_initializer
if not isinstance(kernel_initializer, str):
kernel_initializer = initializers.serialize(kernel_initializer)
config = {
'output_dim': self.output_dim,
'kernel_initializer': kernel_initializer,
'scale': self.scale,
}
base_config = super(RandomFourierFeatures, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _get_random_features_initializer(initializer, shape):
"""Returns Initializer object for random features."""
def _get_cauchy_samples(loc, scale, shape):
probs = np.random.uniform(low=0., high=1., size=shape)
return loc + scale * np.tan(np.pi * (probs - 0.5))
random_features_initializer = initializer
if isinstance(initializer, str):
if initializer.lower() == 'gaussian':
random_features_initializer = init_ops.random_normal_initializer(
stddev=1.0)
elif initializer.lower() == 'laplacian':
random_features_initializer = init_ops.constant_initializer(
_get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))
else:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'.format(
random_features_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
return random_features_initializer
def _get_default_scale(initializer, input_dim):
if (isinstance(initializer, str) and
initializer.lower() == 'gaussian'):
return np.sqrt(input_dim / 2.0)
return 1.0
| apache-2.0 |
Bashar/django | django/contrib/gis/gdal/error.py | 104 | 1430 | """
This module houses the OGR & SRS Exception objects, and the
check_err() routine which checks the status code returned by
OGR methods.
"""
#### OGR & SRS Exceptions ####
class GDALException(Exception):
pass
class OGRException(Exception):
pass
class SRSException(Exception):
pass
class OGRIndexError(OGRException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
#### OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = {
1: (OGRException, 'Not enough data.'),
2: (OGRException, 'Not enough memory.'),
3: (OGRException, 'Unsupported geometry type.'),
4: (OGRException, 'Unsupported operation.'),
5: (OGRException, 'Corrupt data.'),
6: (OGRException, 'OGR failure.'),
7: (SRSException, 'Unsupported SRS.'),
8: (OGRException, 'Invalid handle.'),
}
OGRERR_NONE = 0
def check_err(code):
"Checks the given OGRERR, and raises an exception where appropriate."
if code == OGRERR_NONE:
return
elif code in OGRERR_DICT:
e, msg = OGRERR_DICT[code]
raise e(msg)
else:
raise OGRException('Unknown error code: "%s"' % code)
| bsd-3-clause |
probml/pyprobml | scripts/lms_demo.py | 1 | 6196 | # SGD on linear regression aka least mean squares
# Written by Duane Rich
# Based on https://github.com/probml/pmtk3/blob/master/demos/LMSdemoSimple.m
import numpy as np
import matplotlib.pyplot as plt
from pyprobml_utils import save_fig
#from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
plt.rcParams["figure.figsize"] = (5,5) # width x height
np.random.seed(0)
#Generating synthetic data:
N = 21
wTrue = np.array([1.45, 0.92])
X = np.random.uniform(-2, 2, N)
X = np.column_stack((np.ones(N), X))
y = wTrue[0] * X[:, 0] + wTrue[1] * X[:, 1] + np.random.normal(0, .1, N)
#Plot SSE surface over parameter space.
v = np.arange(-1, 3, .1)
W0, W1 = np.meshgrid(v, v)
SS = np.array([sum((w0 * X[:, 0] + w1 * X[:, 1] - y) ** 2) for w0, w1 in zip(np.ravel(W0), np.ravel(W1))])
SS = SS.reshape(W0.shape)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(W0, W1, SS)
save_fig('lmsSSE.pdf')
plt.draw()
#Mean SE with gradient and Hessian:
def LinregLossScaled(w, X, y):
Xt = np.transpose(X)
XtX = Xt.dot(X)
N = X.shape[0]
err = X.dot(w) - y
f = np.mean(err*err)
g = (1/N) * Xt.dot(err)
H = (1/N) * XtX
return f, g, H
#Starting point from to search for optimal parameters
w0 = np.array([-0.5, 2])
# Determine loss at optimal param values:
def funObj(w):
out,_,_ = LinregLossScaled(w, X, y)
return out
res = minimize(funObj, w0, method='L-BFGS-B')
wOpt = res.x
fopt = funObj(wOpt)
# fopt,_ ,_ = LinregLossScaled(wTrue, X, y)
#Options for stochastic gradient descent
opts = {}
opts['batchsize'] = 1
opts['verbose'] = True
opts['storeParamTrace'] = True
opts['storeFvalTrace'] = True
opts['storeStepTrace'] = True
opts['maxUpdates'] = 30
opts['eta0'] = 0.5
opts['t0'] = 3
#Breaks the matrix X and vector y into batches
def batchify(X, y, batchsize):
nTrain = X.shape[0]
batchdata = []
batchlabels = []
for i in range(0, nTrain, batchsize):
nxt = min(i+batchsize, nTrain+1)
batchdata.append(X[i:nxt, :])
batchlabels.append(y[i:nxt])
return batchdata, batchlabels
def stochgradSimple(objFun, w0, X, y, *args, **kwargs):
#Stochastic gradient descent.
#Algorithm works by breaking up the data into batches. It
#determines a gradient for each batch and moves the current
#choice of parameters in that direction. The extent to which
#we move in that direction is determined by our shrinking
#stepsize over time.
#Includes options for the batchsize, total number of sweeps over
#the data (maxepoch), total number of batches inspected (maxUpdated,
#whether the algo should print updates as it progresses, options
#controlling what infomation we keep track of,and parameters to
#determine how the step size shinks over time.
#Default options
batchsize = kwargs['batchsize'] if 'batchsize' in kwargs else 10
maxepoch = kwargs['maxepoch'] if 'maxepoch' in kwargs else 500
maxUpdates = kwargs['maxUpdates'] if 'maxUpdates' in kwargs else 1000
verbose = kwargs['verbose'] if 'verbose' in kwargs else False
storeParamTrace = kwargs['storeParamTrace'] if 'storeParamTrace' in kwargs else False
storeFvalTrace = kwargs['storeFvalTrace'] if 'storeFvalTrace' in kwargs else False
storeStepTrace = kwargs['storeStepTrace'] if 'storeStepTrace' in kwargs else False
t0 = kwargs['t0'] if 't0' in kwargs else 1
eta0 = kwargs['eta0'] if 'eta0' in kwargs else 0.1
stepSizeFn = kwargs['stepSizeFn'] if 'stepSizeFn' in kwargs else lambda x: eta0*t0/(x+t0)
#Turn the data into batches
batchdata, batchlabels = batchify(X, y, batchsize);
num_batches = len(batchlabels)
if verbose:
print('%d batches of size %d\n' %(num_batches, batchsize))
w = w0
trace = {}
trace['fvalMinibatch'] = []
trace['params'] = []
trace['stepSize'] = []
# Main loop:
nupdates = 1
for epoch in range(1, maxepoch+1):
if verbose:
print('epoch %d\n' % epoch)
for b in range(num_batches):
bdata = batchdata[b]
blabels = batchlabels[b]
if verbose and b % 100 == 0:
print('epoch %d batch %d nupdates %d\n' %(epoch, b, nupdates))
fb, g, _ = objFun(w, bdata, blabels, *args)
eta = stepSizeFn(nupdates)
w = w - eta*g #steepest descent
nupdates += 1
if storeParamTrace:
#Storing the history of the parameters may take a lot of space
trace['params'].append(w)
if storeFvalTrace:
trace['fvalMinibatch'].append(fb)
if storeStepTrace:
trace['stepSize'].append(eta)
if nupdates > maxUpdates:
break
if nupdates > maxUpdates:
break
return w, trace
w, trace = stochgradSimple(LinregLossScaled, w0, X, y, **opts)
def stochgradTracePostprocess(objFun, trace, X, y, *args):
#This is to determine the losses for each set of parameters
#chosen over the parameter path.
fvalhist = []
for t in range(len(trace)):
fval,_ ,_ = objFun(trace[t], X, y, *args)
fvalhist.append(fval)
return fvalhist
print(w)
whist = np.asarray(trace['params'])
#Parameter trajectory
if True:
fig, ax = plt.subplots()
ax.set_title('black line = LMS trajectory towards LS soln (red cross)')
CS = plt.contour(W0, W1, SS)
plt.plot(wOpt[0], wOpt[1], 'x', color='r', ms=10, mew=5)
plt.plot(whist[:, 0], whist[:, 1], 'ko-', lw=2)
save_fig('lmsTraj.pdf')
plt.draw()
#Loss values over the parameter path compared to the optimal loss.
if True:
fvalhist = np.asarray(stochgradTracePostprocess(LinregLossScaled, trace['params'], X, y))
fig, ax = plt.subplots()
ax.set_title('RSS vs iteration')
plt.plot(fvalhist,'ko-', lw=2)
plt.axhline(fopt)
save_fig('lmsRssHist.pdf')
plt.draw()
#Stepsize graph if desired:
if True:
stephist = np.asarray(trace['stepSize'])
fig, ax = plt.subplots()
ax.set_title('Stepsize vs iteration')
plt.plot(stephist,'ko-', lw=2)
save_fig('lmsStepSizeHist.pdf')
plt.draw()
plt.show()
| mit |
robhudson/django | django/db/backends/mysql/operations.py | 45 | 8598 | from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import six, timezone
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 4294967295),
PositiveIntegerField=(0, 18446744073709551615),
)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = "DATE(%s)" % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, timedelta):
return "INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND" % (
timedelta.days, timedelta.seconds, timedelta.microseconds), []
def format_for_duration_arithmetic(self, sql):
if self.connection.features.supports_microsecond_precision:
return 'INTERVAL %s MICROSECOND' % sql
else:
return 'INTERVAL FLOOR(%s / 1000000) SECOND' % sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return six.text_type(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if value is not None:
value = force_text(value)
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
| bsd-3-clause |
rubyu/anki | aqt/stats.py | 1 | 3060 | # Copyright: Damien Elmes <anki@ichi2.net>
# -*- coding: utf-8 -*-
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from aqt.qt import *
import os, time
from aqt.utils import saveGeom, restoreGeom, maybeHideClose, showInfo, addCloseShortcut
import aqt
# Deck Stats
######################################################################
class DeckStats(QDialog):
def __init__(self, mw):
QDialog.__init__(self, mw, Qt.Window)
self.mw = mw
self.name = "deckStats"
self.period = 0
self.form = aqt.forms.stats.Ui_Dialog()
self.oldPos = None
self.wholeCollection = False
self.setMinimumWidth(700)
f = self.form
f.setupUi(self)
restoreGeom(self, self.name)
b = f.buttonBox.addButton(_("Save Image"),
QDialogButtonBox.ActionRole)
b.connect(b, SIGNAL("clicked()"), self.browser)
b.setAutoDefault(False)
c = self.connect
s = SIGNAL("clicked()")
c(f.groups, s, lambda: self.changeScope("deck"))
f.groups.setShortcut("g")
c(f.all, s, lambda: self.changeScope("collection"))
c(f.month, s, lambda: self.changePeriod(0))
c(f.year, s, lambda: self.changePeriod(1))
c(f.life, s, lambda: self.changePeriod(2))
c(f.web, SIGNAL("loadFinished(bool)"), self.loadFin)
maybeHideClose(self.form.buttonBox)
addCloseShortcut(self)
self.refresh()
self.exec_()
def reject(self):
saveGeom(self, self.name)
QDialog.reject(self)
def browser(self):
name = time.strftime("-%Y-%m-%d@%H-%M-%S.png",
time.localtime(time.time()))
name = "anki-"+_("stats")+name
desktopPath = QDesktopServices.storageLocation(QDesktopServices.DesktopLocation)
if not os.path.exists(desktopPath):
os.mkdir(desktopPath)
path = os.path.join(desktopPath, name)
p = self.form.web.page()
oldsize = p.viewportSize()
p.setViewportSize(p.mainFrame().contentsSize())
image = QImage(p.viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
p.mainFrame().render(painter)
painter.end()
image.save(path, "png")
p.setViewportSize(oldsize)
showInfo(_("An image was saved to your desktop."))
def changePeriod(self, n):
self.period = n
self.refresh()
def changeScope(self, type):
self.wholeCollection = type == "collection"
self.refresh()
def loadFin(self, b):
self.form.web.page().mainFrame().setScrollPosition(self.oldPos)
def refresh(self):
self.mw.progress.start(immediate=True)
self.oldPos = self.form.web.page().mainFrame().scrollPosition()
stats = self.mw.col.stats()
stats.wholeCollection = self.wholeCollection
self.report = stats.report(type=self.period)
self.form.web.setHtml(self.report)
self.mw.progress.finish()
| agpl-3.0 |
halitalptekin/isip | isip/scapy/main.py | 6 | 10787 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Main module for interactive startup.
"""
from __future__ import generators
import os,sys
import glob
import __builtin__
from error import *
import utils
def _probe_config_file(cf):
cf_path = os.path.join(os.path.expanduser("~"), cf)
try:
os.stat(cf_path)
except OSError:
return None
else:
return cf_path
def _read_config_file(cf):
log_loading.debug("Loading config file [%s]" % cf)
try:
execfile(cf)
except IOError,e:
log_loading.warning("Cannot read config file [%s] [%s]" % (cf,e))
except Exception,e:
log_loading.exception("Error during evaluation of config file [%s]" % cf)
DEFAULT_PRESTART_FILE = _probe_config_file(".scapy_prestart.py")
DEFAULT_STARTUP_FILE = _probe_config_file(".scapy_startup.py")
def _usage():
print """Usage: scapy.py [-s sessionfile] [-c new_startup_file] [-p new_prestart_file] [-C] [-P]
-C: do not read startup file
-P: do not read pre-startup file"""
sys.exit(0)
from config import conf
from themes import DefaultTheme
######################
## Extension system ##
######################
def _load(module):
try:
mod = __import__(module,globals(),locals(),".")
__builtin__.__dict__.update(mod.__dict__)
except Exception,e:
log_interactive.error(e)
def load_module(name):
_load("scapy.modules."+name)
def load_layer(name):
_load("scapy.layers."+name)
def load_contrib(name):
_load("scapy.contrib."+name)
def list_contrib(name=None):
if name is None:
name="*.py"
elif "*" not in name and "?" not in name and not name.endswith(".py"):
name += ".py"
name = os.path.join(os.path.dirname(__file__), "contrib", name)
for f in glob.glob(name):
mod = os.path.basename(f)
if mod.startswith("__"):
continue
if mod.endswith(".py"):
mod = mod[:-3]
desc = { "description":"-", "status":"?", "name":mod }
for l in open(f):
p = l.find("scapy.contrib.")
if p >= 0:
p += 14
q = l.find("=", p)
key = l[p:q].strip()
value = l[q+1:].strip()
desc[key] = value
print "%(name)-20s: %(description)-40s status=%(status)s" % desc
##############################
## Session saving/restoring ##
##############################
def save_session(fname=None, session=None, pickleProto=-1):
if fname is None:
fname = conf.session
if not fname:
conf.session = fname = utils.get_temp_file(keep=True)
log_interactive.info("Use [%s] as session file" % fname)
if session is None:
session = __builtin__.__dict__["scapy_session"]
to_be_saved = session.copy()
if to_be_saved.has_key("__builtins__"):
del(to_be_saved["__builtins__"])
for k in to_be_saved.keys():
if type(to_be_saved[k]) in [types.TypeType, types.ClassType, types.ModuleType]:
log_interactive.error("[%s] (%s) can't be saved." % (k, type(to_be_saved[k])))
del(to_be_saved[k])
try:
os.rename(fname, fname+".bak")
except OSError:
pass
f=gzip.open(fname,"wb")
cPickle.dump(to_be_saved, f, pickleProto)
f.close()
def load_session(fname=None):
if fname is None:
fname = conf.session
try:
s = cPickle.load(gzip.open(fname,"rb"))
except IOError:
s = cPickle.load(open(fname,"rb"))
scapy_session = __builtin__.__dict__["scapy_session"]
scapy_session.clear()
scapy_session.update(s)
def update_session(fname=None):
if fname is None:
fname = conf.session
try:
s = cPickle.load(gzip.open(fname,"rb"))
except IOError:
s = cPickle.load(open(fname,"rb"))
scapy_session = __builtin__.__dict__["scapy_session"]
scapy_session.update(s)
################
##### Main #####
################
def scapy_delete_temp_files():
for f in conf.temp_files:
try:
os.unlink(f)
except:
pass
def scapy_write_history_file(readline):
if conf.histfile:
try:
readline.write_history_file(conf.histfile)
except IOError,e:
try:
warning("Could not write history to [%s]\n\t (%s)" % (conf.histfile,e))
tmp = utils.get_temp_file(keep=True)
readline.write_history_file(tmp)
warning("Wrote history to [%s]" % tmp)
except:
warning("Cound not write history to [%s]. Discarded" % tmp)
def interact(mydict=None,argv=None,mybanner=None,loglevel=20):
global session
import code,sys,cPickle,os,getopt,re
from config import conf
conf.interactive = True
if loglevel is not None:
conf.logLevel=loglevel
the_banner = "Welcome to Scapy (%s)"
if mybanner is not None:
the_banner += "\n"
the_banner += mybanner
if argv is None:
argv = sys.argv
import atexit
try:
import rlcompleter,readline
except ImportError:
log_loading.info("Can't load Python libreadline or completer")
READLINE=0
else:
READLINE=1
class ScapyCompleter(rlcompleter.Completer):
def global_matches(self, text):
matches = []
n = len(text)
for lst in [dir(__builtin__), session.keys()]:
for word in lst:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
def attr_matches(self, text):
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
try:
object = eval(expr)
except:
object = eval(expr, session)
if isinstance(object, Packet) or isinstance(object, Packet_metaclass):
words = filter(lambda x: x[0]!="_",dir(object))
words += [x.name for x in object.fields_desc]
else:
words = dir(object)
if hasattr( object,"__class__" ):
words = words + rlcompleter.get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
readline.set_completer(ScapyCompleter().complete)
readline.parse_and_bind("C-o: operate-and-get-next")
readline.parse_and_bind("tab: complete")
session=None
session_name=""
STARTUP_FILE = DEFAULT_STARTUP_FILE
PRESTART_FILE = DEFAULT_PRESTART_FILE
iface = None
try:
opts=getopt.getopt(argv[1:], "hs:Cc:Pp:d")
for opt, parm in opts[0]:
if opt == "-h":
_usage()
elif opt == "-s":
session_name = parm
elif opt == "-c":
STARTUP_FILE = parm
elif opt == "-C":
STARTUP_FILE = None
elif opt == "-p":
PRESTART_FILE = parm
elif opt == "-P":
PRESTART_FILE = None
elif opt == "-d":
conf.logLevel = max(1,conf.logLevel-10)
if len(opts[1]) > 0:
raise getopt.GetoptError("Too many parameters : [%s]" % " ".join(opts[1]))
except getopt.GetoptError, msg:
log_loading.error(msg)
sys.exit(1)
if PRESTART_FILE:
_read_config_file(PRESTART_FILE)
scapy_builtins = __import__("all",globals(),locals(),".").__dict__
__builtin__.__dict__.update(scapy_builtins)
globkeys = scapy_builtins.keys()
globkeys.append("scapy_session")
scapy_builtins=None # XXX replace with "with" statement
if mydict is not None:
__builtin__.__dict__.update(mydict)
globkeys += mydict.keys()
conf.color_theme = DefaultTheme()
if STARTUP_FILE:
_read_config_file(STARTUP_FILE)
if session_name:
try:
os.stat(session_name)
except OSError:
log_loading.info("New session [%s]" % session_name)
else:
try:
try:
session = cPickle.load(gzip.open(session_name,"rb"))
except IOError:
session = cPickle.load(open(session_name,"rb"))
log_loading.info("Using session [%s]" % session_name)
except EOFError:
log_loading.error("Error opening session [%s]" % session_name)
except AttributeError:
log_loading.error("Error opening session [%s]. Attribute missing" % session_name)
if session:
if "conf" in session:
conf.configure(session["conf"])
session["conf"] = conf
else:
conf.session = session_name
session={"conf":conf}
else:
session={"conf": conf}
__builtin__.__dict__["scapy_session"] = session
if READLINE:
if conf.histfile:
try:
readline.read_history_file(conf.histfile)
except IOError:
pass
atexit.register(scapy_write_history_file,readline)
atexit.register(scapy_delete_temp_files)
IPYTHON=False
if conf.interactive_shell.lower() == "ipython":
try:
import IPython
IPYTHON=True
except ImportError, e:
log_loading.warning("IPython not available. Using standard Python shell instead.")
IPYTHON=False
if IPYTHON:
banner = the_banner % (conf.version) + " using IPython %s" % IPython.__version__
args = [''] # IPython command line args (will be seen as sys.argv)
ipshell = IPython.Shell.IPShellEmbed(args, banner = banner)
ipshell(local_ns=session)
else:
code.interact(banner = the_banner % (conf.version),
local=session, readfunc=conf.readfunc)
if conf.session:
save_session(conf.session, session)
for k in globkeys:
try:
del(__builtin__.__dict__[k])
except:
pass
if __name__ == "__main__":
interact()
| mit |
chenshuo/linux-study | tools/perf/scripts/python/exported-sql-viewer.py | 199 | 159603 | #!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
# To use this script you will need to have exported data using either the
# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
# scripts for details.
#
# Following on from the example in the export scripts, a
# call-graph can be displayed for the pt_example database like this:
#
# python tools/perf/scripts/python/exported-sql-viewer.py pt_example
#
# Note that for PostgreSQL, this script supports connecting to remote databases
# by setting hostname, port, username, password, and dbname e.g.
#
# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
#
# The result is a GUI window with a tree representing a context-sensitive
# call-graph. Expanding a couple of levels of the tree and adjusting column
# widths to suit will display something like:
#
# Call Graph: pt_example
# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
# v- ls
# v- 2638:2638
# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
# |- unknown unknown 1 13198 0.1 1 0.0
# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
# >- __libc_csu_init ls 1 10354 0.1 10 0.0
# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
# v- main ls 1 8182043 99.6 180254 99.9
#
# Points to note:
# The top level is a command name (comm)
# The next level is a thread (pid:tid)
# Subsequent levels are functions
# 'Count' is the number of calls
# 'Time' is the elapsed time until the function returns
# Percentages are relative to the level above
# 'Branch Count' is the total number of branches for that function and all
# functions that it calls
# There is also a "All branches" report, which displays branches and
# possibly disassembly. However, presently, the only supported disassembler is
# Intel XED, and additionally the object code must be present in perf build ID
# cache. To use Intel XED, libxed.so must be present. To build and install
# libxed.so:
# git clone https://github.com/intelxed/mbuild.git mbuild
# git clone https://github.com/intelxed/xed
# cd xed
# ./mfile.py --share
# sudo ./mfile.py --prefix=/usr/local install
# sudo ldconfig
#
# Example report:
#
# Time CPU Command PID TID Branch Type In Tx Branch
# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930
# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
# 7fab593ea930 55 pushq %rbp
# 7fab593ea931 48 89 e5 mov %rsp, %rbp
# 7fab593ea934 41 57 pushq %r15
# 7fab593ea936 41 56 pushq %r14
# 7fab593ea938 41 55 pushq %r13
# 7fab593ea93a 41 54 pushq %r12
# 7fab593ea93c 53 pushq %rbx
# 7fab593ea93d 48 89 fb mov %rdi, %rbx
# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp
# 7fab593ea944 0f 31 rdtsc
# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx
# 7fab593ea94a 89 c0 mov %eax, %eax
# 7fab593ea94c 48 09 c2 or %rax, %rdx
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
from __future__ import print_function
import sys
import argparse
import weakref
import threading
import string
try:
# Python2
import cPickle as pickle
# size of pickled integer big enough for record size
glb_nsz = 8
except ImportError:
import pickle
glb_nsz = 16
import re
import os
import random
import copy
import math
pyside_version_1 = True
if not "--pyside-version-1" in sys.argv:
try:
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtSql import *
from PySide2.QtWidgets import *
pyside_version_1 = False
except:
pass
if pyside_version_1:
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
# xrange is range in Python3
try:
xrange
except NameError:
xrange = range
def printerr(*args, **keyword_args):
print(*args, file=sys.stderr, **keyword_args)
# Data formatting helpers
def tohex(ip):
if ip < 0:
ip += 1 << 64
return "%x" % ip
def offstr(offset):
if offset:
return "+0x%x" % offset
return ""
def dsoname(name):
if name == "[kernel.kallsyms]":
return "[kernel]"
return name
def findnth(s, sub, n, offs=0):
pos = s.find(sub)
if pos < 0:
return pos
if n <= 1:
return offs + pos
return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
# Percent to one decimal place
def PercentToOneDP(n, d):
if not d:
return "0.0"
x = (n * Decimal(100)) / d
return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP))
# Helper for queries that must not fail
def QueryExec(query, stmt):
ret = query.exec_(stmt)
if not ret:
raise Exception("Query failed: " + query.lastError().text())
# Background thread
class Thread(QThread):
done = Signal(object)
def __init__(self, task, param=None, parent=None):
super(Thread, self).__init__(parent)
self.task = task
self.param = param
def run(self):
while True:
if self.param is None:
done, result = self.task()
else:
done, result = self.task(self.param)
self.done.emit(result)
if done:
break
# Tree data model
class TreeModel(QAbstractItemModel):
def __init__(self, glb, params, parent=None):
super(TreeModel, self).__init__(parent)
self.glb = glb
self.params = params
self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self.root
def rowCount(self, parent):
result = self.Item(parent).childCount()
if result < 0:
result = 0
self.dataChanged.emit(parent, parent)
return result
def hasChildren(self, parent):
return self.Item(parent).hasChildren()
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def parent(self, child):
child_item = child.internalPointer()
if child_item is self.root:
return QModelIndex()
parent_item = child_item.getParentItem()
return self.createIndex(parent_item.getRow(), 0, parent_item)
def index(self, row, column, parent):
child_item = self.Item(parent).getChildItem(row)
return self.createIndex(row, column, child_item)
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.root.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Table data model
class TableModel(QAbstractTableModel):
def __init__(self, parent=None):
super(TableModel, self).__init__(parent)
self.child_count = 0
self.child_items = []
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self
def rowCount(self, parent):
return self.child_count
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def index(self, row, column, parent):
return self.createIndex(row, column, self.child_items[row])
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Model cache
model_cache = weakref.WeakValueDictionary()
model_cache_lock = threading.Lock()
def LookupCreateModel(model_name, create_fn):
model_cache_lock.acquire()
try:
model = model_cache[model_name]
except:
model = None
if model is None:
model = create_fn()
model_cache[model_name] = model
model_cache_lock.release()
return model
def LookupModel(model_name):
model_cache_lock.acquire()
try:
model = model_cache[model_name]
except:
model = None
model_cache_lock.release()
return model
# Find bar
class FindBar():
def __init__(self, parent, finder, is_reg_expr=False):
self.finder = finder
self.context = []
self.last_value = None
self.last_pattern = None
label = QLabel("Find:")
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.textbox = QComboBox()
self.textbox.setEditable(True)
self.textbox.currentIndexChanged.connect(self.ValueChanged)
self.progress = QProgressBar()
self.progress.setRange(0, 0)
self.progress.hide()
if is_reg_expr:
self.pattern = QCheckBox("Regular Expression")
else:
self.pattern = QCheckBox("Pattern")
self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.next_button = QToolButton()
self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
self.next_button.released.connect(lambda: self.NextPrev(1))
self.prev_button = QToolButton()
self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
self.prev_button.released.connect(lambda: self.NextPrev(-1))
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(label)
self.hbox.addWidget(self.textbox)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.pattern)
self.hbox.addWidget(self.next_button)
self.hbox.addWidget(self.prev_button)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox)
self.bar.hide()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.textbox.lineEdit().selectAll()
self.textbox.setFocus()
def Deactivate(self):
self.bar.hide()
def Busy(self):
self.textbox.setEnabled(False)
self.pattern.hide()
self.next_button.hide()
self.prev_button.hide()
self.progress.show()
def Idle(self):
self.textbox.setEnabled(True)
self.progress.hide()
self.pattern.show()
self.next_button.show()
self.prev_button.show()
def Find(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
self.last_value = value
self.last_pattern = pattern
self.finder.Find(value, direction, pattern, self.context)
def ValueChanged(self):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
index = self.textbox.currentIndex()
data = self.textbox.itemData(index)
# Store the pattern in the combo box to keep it with the text value
if data == None:
self.textbox.setItemData(index, pattern)
else:
self.pattern.setChecked(data)
self.Find(0)
def NextPrev(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
if value != self.last_value:
index = self.textbox.findText(value)
# Allow for a button press before the value has been added to the combo box
if index < 0:
index = self.textbox.count()
self.textbox.addItem(value, pattern)
self.textbox.setCurrentIndex(index)
return
else:
self.textbox.setItemData(index, pattern)
elif pattern != self.last_pattern:
# Keep the pattern recorded in the combo box up to date
index = self.textbox.currentIndex()
self.textbox.setItemData(index, pattern)
self.Find(direction)
def NotFound(self):
QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found")
# Context-sensitive call graph data model item base
class CallGraphLevelItemBase(object):
def __init__(self, glb, params, row, parent_item):
self.glb = glb
self.params = params
self.row = row
self.parent_item = parent_item
self.query_done = False
self.child_count = 0
self.child_items = []
if parent_item:
self.level = parent_item.level + 1
else:
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Context-sensitive call graph data model level 2+ item base
class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.call_path_id = call_path_id
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
if self.params.have_ipc:
ipc_str = ", SUM(insn_count), SUM(cyc_count)"
else:
ipc_str = ""
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE parent_call_path_id = " + str(self.call_path_id) +
" AND comm_id = " + str(self.comm_id) +
" AND thread_id = " + str(self.thread_id) +
" GROUP BY call_path_id, name, short_name"
" ORDER BY call_path_id")
while query.next():
if self.params.have_ipc:
insn_cnt = int(query.value(5))
cyc_cnt = int(query.value(6))
branch_count = int(query.value(7))
else:
insn_cnt = 0
cyc_cnt = 0
branch_count = int(query.value(5))
child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model level three item
class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = call_path_id
# Context-sensitive call graph data model level two item
class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallGraphLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.insn_cnt += child_item.insn_cnt
self.cyc_cnt += child_item.cyc_cnt
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
if self.params.have_ipc:
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
else:
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Context-sensitive call graph data model level one item
class CallGraphLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, comm, parent_item):
super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item)
if self.params.have_ipc:
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model root item
class CallGraphRootItem(CallGraphLevelItemBase):
def __init__(self, glb, params):
super(CallGraphRootItem, self).__init__(glb, params, 0, None)
self.dbid = 0
self.query_done = True
if_has_calls = ""
if IsSelectable(glb.db, "comms", columns = "has_calls"):
if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
while query.next():
if not query.value(0):
continue
child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call graph model parameters
class CallGraphModelParams():
def __init__(self, glb, parent=None):
self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count")
# Context-sensitive call graph data model base
class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent)
def FindSelect(self, value, pattern, query):
if pattern:
# postgresql and sqlite pattern patching differences:
# postgresql LIKE is case sensitive but sqlite LIKE is not
# postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not
# postgresql supports ILIKE which is case insensitive
# sqlite supports GLOB (text only) which uses * and ? and is case sensitive
if not self.glb.dbref.is_sqlite3:
# Escape % and _
s = value.replace("%", "\%")
s = s.replace("_", "\_")
# Translate * and ? into SQL LIKE pattern characters % and _
trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
else:
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
return self.FindPath(query)
return []
def FindValue(self, value, pattern, query, last_value, last_pattern):
if last_value == value and pattern == last_pattern:
found = query.first()
else:
self.FindSelect(value, pattern, query)
found = query.next()
return self.Found(query, found)
def FindNext(self, query):
found = query.next()
if not found:
found = query.first()
return self.Found(query, found)
def FindPrev(self, query):
found = query.previous()
if not found:
found = query.last()
return self.Found(query, found)
def FindThread(self, c):
if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern:
ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern)
elif c.direction > 0:
ids = self.FindNext(c.query)
else:
ids = self.FindPrev(c.query)
return (True, ids)
def Find(self, value, direction, pattern, context, callback):
class Context():
def __init__(self, *x):
self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x
def Update(self, *x):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern)
if len(context):
context[0].Update(value, direction, pattern)
else:
context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None))
# Use a thread so the UI is not blocked during the SELECT
thread = Thread(self.FindThread, context[0])
thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, ids):
callback(ids)
# Context-sensitive call graph data model
class CallGraphModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallGraphModel, self).__init__(glb, parent)
def GetRoot(self):
return CallGraphRootItem(self.glb, self.params)
def columnCount(self, parent=None):
if self.params.have_ipc:
return 12
else:
return 7
def columnHeader(self, column):
if self.params.have_ipc:
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
else:
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
if self.params.have_ipc:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
else:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE calls.id <> 0"
" AND symbols.name" + match +
" GROUP BY comm_id, thread_id, call_path_id"
" ORDER BY comm_id, thread_id, call_path_id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM call_paths"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
# The call path root is not used
if ids[0] == 1:
del ids[0]
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Call tree data model level 2+ item base
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.calls_id = calls_id
self.call_time = call_time
self.time = time
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
def Select(self):
self.query_done = True
if self.calls_id == 0:
comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
else:
comm_thread = ""
if self.params.have_ipc:
ipc_str = ", insn_count, cyc_count"
else:
ipc_str = ""
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
" ORDER BY call_time, calls.id")
while query.next():
if self.params.have_ipc:
insn_cnt = int(query.value(5))
cyc_cnt = int(query.value(6))
branch_count = int(query.value(7))
else:
insn_cnt = 0
cyc_cnt = 0
branch_count = int(query.value(5))
child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model level three item
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = calls_id
# Call tree data model level two item
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallTreeLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.insn_cnt += child_item.insn_cnt
self.cyc_cnt += child_item.cyc_cnt
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
if self.params.have_ipc:
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
else:
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Call tree data model level one item
class CallTreeLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, comm, parent_item):
super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item)
if self.params.have_ipc:
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model root item
class CallTreeRootItem(CallGraphLevelItemBase):
def __init__(self, glb, params):
super(CallTreeRootItem, self).__init__(glb, params, 0, None)
self.dbid = 0
self.query_done = True
if_has_calls = ""
if IsSelectable(glb.db, "comms", columns = "has_calls"):
if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
while query.next():
if not query.value(0):
continue
child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call Tree data model
class CallTreeModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallTreeModel, self).__init__(glb, parent)
def GetRoot(self):
return CallTreeRootItem(self.glb, self.params)
def columnCount(self, parent=None):
if self.params.have_ipc:
return 12
else:
return 7
def columnHeader(self, column):
if self.params.have_ipc:
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
else:
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
if self.params.have_ipc:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
else:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT calls.id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE calls.id <> 0"
" AND symbols.name" + match +
" ORDER BY comm_id, thread_id, call_time, calls.id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM calls"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Vertical layout
class HBoxLayout(QHBoxLayout):
def __init__(self, *children):
super(HBoxLayout, self).__init__()
self.layout().setContentsMargins(0, 0, 0, 0)
for child in children:
if child.isWidgetType():
self.layout().addWidget(child)
else:
self.layout().addLayout(child)
# Horizontal layout
class VBoxLayout(QVBoxLayout):
def __init__(self, *children):
super(VBoxLayout, self).__init__()
self.layout().setContentsMargins(0, 0, 0, 0)
for child in children:
if child.isWidgetType():
self.layout().addWidget(child)
else:
self.layout().addLayout(child)
# Vertical layout widget
class VBox():
def __init__(self, *children):
self.vbox = QWidget()
self.vbox.setLayout(VBoxLayout(*children))
def Widget(self):
return self.vbox
# Tree window base
class TreeWindowBase(QMdiSubWindow):
def __init__(self, parent=None):
super(TreeWindowBase, self).__init__(parent)
self.model = None
self.find_bar = None
self.view = QTreeView()
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.context_menu = TreeContextMenu(self.view)
def DisplayFound(self, ids):
if not len(ids):
return False
parent = QModelIndex()
for dbid in ids:
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
if not found:
break
return found
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.model.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, ids):
found = True
if not self.DisplayFound(ids):
found = False
self.find_bar.Idle()
if not found:
self.find_bar.NotFound()
# Context-sensitive call graph window
class CallGraphWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallGraphWindow, self).__init__(parent)
self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
self.view.setModel(self.model)
for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
# Call tree window
class CallTreeWindow(TreeWindowBase):
def __init__(self, glb, parent=None, thread_at_time=None):
super(CallTreeWindow, self).__init__(parent)
self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
self.view.setModel(self.model)
for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
if thread_at_time:
self.DisplayThreadAtTime(*thread_at_time)
def DisplayThreadAtTime(self, comm_id, thread_id, time):
parent = QModelIndex()
for dbid in (comm_id, thread_id):
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
if not found:
return
found = False
while True:
n = self.model.rowCount(parent)
if not n:
return
last_child = None
for row in xrange(n):
self.view.setExpanded(parent, True)
child = self.model.index(row, 0, parent)
child_call_time = child.internalPointer().call_time
if child_call_time < time:
last_child = child
elif child_call_time == time:
self.view.setCurrentIndex(child)
return
elif child_call_time > time:
break
if not last_child:
if not found:
child = self.model.index(0, 0, parent)
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
return
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(last_child)
parent = last_child
# ExecComm() gets the comm_id of the command string that was set when the process exec'd i.e. the program name
def ExecComm(db, thread_id, time):
query = QSqlQuery(db)
QueryExec(query, "SELECT comm_threads.comm_id, comms.c_time, comms.exec_flag"
" FROM comm_threads"
" INNER JOIN comms ON comms.id = comm_threads.comm_id"
" WHERE comm_threads.thread_id = " + str(thread_id) +
" ORDER BY comms.c_time, comms.id")
first = None
last = None
while query.next():
if first is None:
first = query.value(0)
if query.value(2) and Decimal(query.value(1)) <= Decimal(time):
last = query.value(0)
if not(last is None):
return last
return first
# Container for (x, y) data
class XY():
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "XY({}, {})".format(str(self.x), str(self.y))
# Container for sub-range data
class Subrange():
def __init__(self, lo=0, hi=0):
self.lo = lo
self.hi = hi
def __str__(self):
return "Subrange({}, {})".format(str(self.lo), str(self.hi))
# Graph data region base class
class GraphDataRegion(object):
def __init__(self, key, title = "", ordinal = ""):
self.key = key
self.title = title
self.ordinal = ordinal
# Function to sort GraphDataRegion
def GraphDataRegionOrdinal(data_region):
return data_region.ordinal
# Attributes for a graph region
class GraphRegionAttribute():
def __init__(self, colour):
self.colour = colour
# Switch graph data region represents a task
class SwitchGraphDataRegion(GraphDataRegion):
def __init__(self, key, exec_comm_id, pid, tid, comm, thread_id, comm_id):
super(SwitchGraphDataRegion, self).__init__(key)
self.title = str(pid) + " / " + str(tid) + " " + comm
# Order graph legend within exec comm by pid / tid / time
self.ordinal = str(pid).rjust(16) + str(exec_comm_id).rjust(8) + str(tid).rjust(16)
self.exec_comm_id = exec_comm_id
self.pid = pid
self.tid = tid
self.comm = comm
self.thread_id = thread_id
self.comm_id = comm_id
# Graph data point
class GraphDataPoint():
def __init__(self, data, index, x, y, altx=None, alty=None, hregion=None, vregion=None):
self.data = data
self.index = index
self.x = x
self.y = y
self.altx = altx
self.alty = alty
self.hregion = hregion
self.vregion = vregion
# Graph data (single graph) base class
class GraphData(object):
def __init__(self, collection, xbase=Decimal(0), ybase=Decimal(0)):
self.collection = collection
self.points = []
self.xbase = xbase
self.ybase = ybase
self.title = ""
def AddPoint(self, x, y, altx=None, alty=None, hregion=None, vregion=None):
index = len(self.points)
x = float(Decimal(x) - self.xbase)
y = float(Decimal(y) - self.ybase)
self.points.append(GraphDataPoint(self, index, x, y, altx, alty, hregion, vregion))
def XToData(self, x):
return Decimal(x) + self.xbase
def YToData(self, y):
return Decimal(y) + self.ybase
# Switch graph data (for one CPU)
class SwitchGraphData(GraphData):
def __init__(self, db, collection, cpu, xbase):
super(SwitchGraphData, self).__init__(collection, xbase)
self.cpu = cpu
self.title = "CPU " + str(cpu)
self.SelectSwitches(db)
def SelectComms(self, db, thread_id, last_comm_id, start_time, end_time):
query = QSqlQuery(db)
QueryExec(query, "SELECT id, c_time"
" FROM comms"
" WHERE c_thread_id = " + str(thread_id) +
" AND exec_flag = " + self.collection.glb.dbref.TRUE +
" AND c_time >= " + str(start_time) +
" AND c_time <= " + str(end_time) +
" ORDER BY c_time, id")
while query.next():
comm_id = query.value(0)
if comm_id == last_comm_id:
continue
time = query.value(1)
hregion = self.HRegion(db, thread_id, comm_id, time)
self.AddPoint(time, 1000, None, None, hregion)
def SelectSwitches(self, db):
last_time = None
last_comm_id = None
last_thread_id = None
query = QSqlQuery(db)
QueryExec(query, "SELECT time, thread_out_id, thread_in_id, comm_out_id, comm_in_id, flags"
" FROM context_switches"
" WHERE machine_id = " + str(self.collection.machine_id) +
" AND cpu = " + str(self.cpu) +
" ORDER BY time, id")
while query.next():
flags = int(query.value(5))
if flags & 1:
# Schedule-out: detect and add exec's
if last_thread_id == query.value(1) and last_comm_id is not None and last_comm_id != query.value(3):
self.SelectComms(db, last_thread_id, last_comm_id, last_time, query.value(0))
continue
# Schedule-in: add data point
if len(self.points) == 0:
start_time = self.collection.glb.StartTime(self.collection.machine_id)
hregion = self.HRegion(db, query.value(1), query.value(3), start_time)
self.AddPoint(start_time, 1000, None, None, hregion)
time = query.value(0)
comm_id = query.value(4)
thread_id = query.value(2)
hregion = self.HRegion(db, thread_id, comm_id, time)
self.AddPoint(time, 1000, None, None, hregion)
last_time = time
last_comm_id = comm_id
last_thread_id = thread_id
def NewHRegion(self, db, key, thread_id, comm_id, time):
exec_comm_id = ExecComm(db, thread_id, time)
query = QSqlQuery(db)
QueryExec(query, "SELECT pid, tid FROM threads WHERE id = " + str(thread_id))
if query.next():
pid = query.value(0)
tid = query.value(1)
else:
pid = -1
tid = -1
query = QSqlQuery(db)
QueryExec(query, "SELECT comm FROM comms WHERE id = " + str(comm_id))
if query.next():
comm = query.value(0)
else:
comm = ""
return SwitchGraphDataRegion(key, exec_comm_id, pid, tid, comm, thread_id, comm_id)
def HRegion(self, db, thread_id, comm_id, time):
key = str(thread_id) + ":" + str(comm_id)
hregion = self.collection.LookupHRegion(key)
if hregion is None:
hregion = self.NewHRegion(db, key, thread_id, comm_id, time)
self.collection.AddHRegion(key, hregion)
return hregion
# Graph data collection (multiple related graphs) base class
class GraphDataCollection(object):
def __init__(self, glb):
self.glb = glb
self.data = []
self.hregions = {}
self.xrangelo = None
self.xrangehi = None
self.yrangelo = None
self.yrangehi = None
self.dp = XY(0, 0)
def AddGraphData(self, data):
self.data.append(data)
def LookupHRegion(self, key):
if key in self.hregions:
return self.hregions[key]
return None
def AddHRegion(self, key, hregion):
self.hregions[key] = hregion
# Switch graph data collection (SwitchGraphData for each CPU)
class SwitchGraphDataCollection(GraphDataCollection):
def __init__(self, glb, db, machine_id):
super(SwitchGraphDataCollection, self).__init__(glb)
self.machine_id = machine_id
self.cpus = self.SelectCPUs(db)
self.xrangelo = glb.StartTime(machine_id)
self.xrangehi = glb.FinishTime(machine_id)
self.yrangelo = Decimal(0)
self.yrangehi = Decimal(1000)
for cpu in self.cpus:
self.AddGraphData(SwitchGraphData(db, self, cpu, self.xrangelo))
def SelectCPUs(self, db):
cpus = []
query = QSqlQuery(db)
QueryExec(query, "SELECT DISTINCT cpu"
" FROM context_switches"
" WHERE machine_id = " + str(self.machine_id))
while query.next():
cpus.append(int(query.value(0)))
return sorted(cpus)
# Switch graph data graphics item displays the graphed data
class SwitchGraphDataGraphicsItem(QGraphicsItem):
def __init__(self, data, graph_width, graph_height, attrs, event_handler, parent=None):
super(SwitchGraphDataGraphicsItem, self).__init__(parent)
self.data = data
self.graph_width = graph_width
self.graph_height = graph_height
self.attrs = attrs
self.event_handler = event_handler
self.setAcceptHoverEvents(True)
def boundingRect(self):
return QRectF(0, 0, self.graph_width, self.graph_height)
def PaintPoint(self, painter, last, x):
if not(last is None or last.hregion.pid == 0 or x < self.attrs.subrange.x.lo):
if last.x < self.attrs.subrange.x.lo:
x0 = self.attrs.subrange.x.lo
else:
x0 = last.x
if x > self.attrs.subrange.x.hi:
x1 = self.attrs.subrange.x.hi
else:
x1 = x - 1
x0 = self.attrs.XToPixel(x0)
x1 = self.attrs.XToPixel(x1)
y0 = self.attrs.YToPixel(last.y)
colour = self.attrs.region_attributes[last.hregion.key].colour
width = x1 - x0 + 1
if width < 2:
painter.setPen(colour)
painter.drawLine(x0, self.graph_height - y0, x0, self.graph_height)
else:
painter.fillRect(x0, self.graph_height - y0, width, self.graph_height - 1, colour)
def paint(self, painter, option, widget):
last = None
for point in self.data.points:
self.PaintPoint(painter, last, point.x)
if point.x > self.attrs.subrange.x.hi:
break;
last = point
self.PaintPoint(painter, last, self.attrs.subrange.x.hi + 1)
def BinarySearchPoint(self, target):
lower_pos = 0
higher_pos = len(self.data.points)
while True:
pos = int((lower_pos + higher_pos) / 2)
val = self.data.points[pos].x
if target >= val:
lower_pos = pos
else:
higher_pos = pos
if higher_pos <= lower_pos + 1:
return lower_pos
def XPixelToData(self, x):
x = self.attrs.PixelToX(x)
if x < self.data.points[0].x:
x = 0
pos = 0
low = True
else:
pos = self.BinarySearchPoint(x)
low = False
return (low, pos, self.data.XToData(x))
def EventToData(self, event):
no_data = (None,) * 4
if len(self.data.points) < 1:
return no_data
x = event.pos().x()
if x < 0:
return no_data
low0, pos0, time_from = self.XPixelToData(x)
low1, pos1, time_to = self.XPixelToData(x + 1)
hregions = set()
hregion_times = []
if not low1:
for i in xrange(pos0, pos1 + 1):
hregion = self.data.points[i].hregion
hregions.add(hregion)
if i == pos0:
time = time_from
else:
time = self.data.XToData(self.data.points[i].x)
hregion_times.append((hregion, time))
return (time_from, time_to, hregions, hregion_times)
def hoverMoveEvent(self, event):
time_from, time_to, hregions, hregion_times = self.EventToData(event)
if time_from is not None:
self.event_handler.PointEvent(self.data.cpu, time_from, time_to, hregions)
def hoverLeaveEvent(self, event):
self.event_handler.NoPointEvent()
def mousePressEvent(self, event):
if event.button() != Qt.RightButton:
super(SwitchGraphDataGraphicsItem, self).mousePressEvent(event)
return
time_from, time_to, hregions, hregion_times = self.EventToData(event)
if hregion_times:
self.event_handler.RightClickEvent(self.data.cpu, hregion_times, event.screenPos())
# X-axis graphics item
class XAxisGraphicsItem(QGraphicsItem):
def __init__(self, width, parent=None):
super(XAxisGraphicsItem, self).__init__(parent)
self.width = width
self.max_mark_sz = 4
self.height = self.max_mark_sz + 1
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def Step(self):
attrs = self.parentItem().attrs
subrange = attrs.subrange.x
t = subrange.hi - subrange.lo
s = (3.0 * t) / self.width
n = 1.0
while s > n:
n = n * 10.0
return n
def PaintMarks(self, painter, at_y, lo, hi, step, i):
attrs = self.parentItem().attrs
x = lo
while x <= hi:
xp = attrs.XToPixel(x)
if i % 10:
if i % 5:
sz = 1
else:
sz = 2
else:
sz = self.max_mark_sz
i = 0
painter.drawLine(xp, at_y, xp, at_y + sz)
x += step
i += 1
def paint(self, painter, option, widget):
# Using QPainter::drawLine(int x1, int y1, int x2, int y2) so x2 = width -1
painter.drawLine(0, 0, self.width - 1, 0)
n = self.Step()
attrs = self.parentItem().attrs
subrange = attrs.subrange.x
if subrange.lo:
x_offset = n - (subrange.lo % n)
else:
x_offset = 0.0
x = subrange.lo + x_offset
i = (x / n) % 10
self.PaintMarks(painter, 0, x, subrange.hi, n, i)
def ScaleDimensions(self):
n = self.Step()
attrs = self.parentItem().attrs
lo = attrs.subrange.x.lo
hi = (n * 10.0) + lo
width = attrs.XToPixel(hi)
if width > 500:
width = 0
return (n, lo, hi, width)
def PaintScale(self, painter, at_x, at_y):
n, lo, hi, width = self.ScaleDimensions()
if not width:
return
painter.drawLine(at_x, at_y, at_x + width, at_y)
self.PaintMarks(painter, at_y, lo, hi, n, 0)
def ScaleWidth(self):
n, lo, hi, width = self.ScaleDimensions()
return width
def ScaleHeight(self):
return self.height
def ScaleUnit(self):
return self.Step() * 10
# Scale graphics item base class
class ScaleGraphicsItem(QGraphicsItem):
def __init__(self, axis, parent=None):
super(ScaleGraphicsItem, self).__init__(parent)
self.axis = axis
def boundingRect(self):
scale_width = self.axis.ScaleWidth()
if not scale_width:
return QRectF()
return QRectF(0, 0, self.axis.ScaleWidth() + 100, self.axis.ScaleHeight())
def paint(self, painter, option, widget):
scale_width = self.axis.ScaleWidth()
if not scale_width:
return
self.axis.PaintScale(painter, 0, 5)
x = scale_width + 4
painter.drawText(QPointF(x, 10), self.Text())
def Unit(self):
return self.axis.ScaleUnit()
def Text(self):
return ""
# Switch graph scale graphics item
class SwitchScaleGraphicsItem(ScaleGraphicsItem):
def __init__(self, axis, parent=None):
super(SwitchScaleGraphicsItem, self).__init__(axis, parent)
def Text(self):
unit = self.Unit()
if unit >= 1000000000:
unit = int(unit / 1000000000)
us = "s"
elif unit >= 1000000:
unit = int(unit / 1000000)
us = "ms"
elif unit >= 1000:
unit = int(unit / 1000)
us = "us"
else:
unit = int(unit)
us = "ns"
return " = " + str(unit) + " " + us
# Switch graph graphics item contains graph title, scale, x/y-axis, and the graphed data
class SwitchGraphGraphicsItem(QGraphicsItem):
def __init__(self, collection, data, attrs, event_handler, first, parent=None):
super(SwitchGraphGraphicsItem, self).__init__(parent)
self.collection = collection
self.data = data
self.attrs = attrs
self.event_handler = event_handler
margin = 20
title_width = 50
self.title_graphics = QGraphicsSimpleTextItem(data.title, self)
self.title_graphics.setPos(margin, margin)
graph_width = attrs.XToPixel(attrs.subrange.x.hi) + 1
graph_height = attrs.YToPixel(attrs.subrange.y.hi) + 1
self.graph_origin_x = margin + title_width + margin
self.graph_origin_y = graph_height + margin
x_axis_size = 1
y_axis_size = 1
self.yline = QGraphicsLineItem(0, 0, 0, graph_height, self)
self.x_axis = XAxisGraphicsItem(graph_width, self)
self.x_axis.setPos(self.graph_origin_x, self.graph_origin_y + 1)
if first:
self.scale_item = SwitchScaleGraphicsItem(self.x_axis, self)
self.scale_item.setPos(self.graph_origin_x, self.graph_origin_y + 10)
self.yline.setPos(self.graph_origin_x - y_axis_size, self.graph_origin_y - graph_height)
self.axis_point = QGraphicsLineItem(0, 0, 0, 0, self)
self.axis_point.setPos(self.graph_origin_x - 1, self.graph_origin_y +1)
self.width = self.graph_origin_x + graph_width + margin
self.height = self.graph_origin_y + margin
self.graph = SwitchGraphDataGraphicsItem(data, graph_width, graph_height, attrs, event_handler, self)
self.graph.setPos(self.graph_origin_x, self.graph_origin_y - graph_height)
if parent and 'EnableRubberBand' in dir(parent):
parent.EnableRubberBand(self.graph_origin_x, self.graph_origin_x + graph_width - 1, self)
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def paint(self, painter, option, widget):
pass
def RBXToPixel(self, x):
return self.attrs.PixelToX(x - self.graph_origin_x)
def RBXRangeToPixel(self, x0, x1):
return (self.RBXToPixel(x0), self.RBXToPixel(x1 + 1))
def RBPixelToTime(self, x):
if x < self.data.points[0].x:
return self.data.XToData(0)
return self.data.XToData(x)
def RBEventTimes(self, x0, x1):
x0, x1 = self.RBXRangeToPixel(x0, x1)
time_from = self.RBPixelToTime(x0)
time_to = self.RBPixelToTime(x1)
return (time_from, time_to)
def RBEvent(self, x0, x1):
time_from, time_to = self.RBEventTimes(x0, x1)
self.event_handler.RangeEvent(time_from, time_to)
def RBMoveEvent(self, x0, x1):
if x1 < x0:
x0, x1 = x1, x0
self.RBEvent(x0, x1)
def RBReleaseEvent(self, x0, x1, selection_state):
if x1 < x0:
x0, x1 = x1, x0
x0, x1 = self.RBXRangeToPixel(x0, x1)
self.event_handler.SelectEvent(x0, x1, selection_state)
# Graphics item to draw a vertical bracket (used to highlight "forward" sub-range)
class VerticalBracketGraphicsItem(QGraphicsItem):
def __init__(self, parent=None):
super(VerticalBracketGraphicsItem, self).__init__(parent)
self.width = 0
self.height = 0
self.hide()
def SetSize(self, width, height):
self.width = width + 1
self.height = height + 1
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def paint(self, painter, option, widget):
colour = QColor(255, 255, 0, 32)
painter.fillRect(0, 0, self.width, self.height, colour)
x1 = self.width - 1
y1 = self.height - 1
painter.drawLine(0, 0, x1, 0)
painter.drawLine(0, 0, 0, 3)
painter.drawLine(x1, 0, x1, 3)
painter.drawLine(0, y1, x1, y1)
painter.drawLine(0, y1, 0, y1 - 3)
painter.drawLine(x1, y1, x1, y1 - 3)
# Graphics item to contain graphs arranged vertically
class VertcalGraphSetGraphicsItem(QGraphicsItem):
def __init__(self, collection, attrs, event_handler, child_class, parent=None):
super(VertcalGraphSetGraphicsItem, self).__init__(parent)
self.collection = collection
self.top = 10
self.width = 0
self.height = self.top
self.rubber_band = None
self.rb_enabled = False
first = True
for data in collection.data:
child = child_class(collection, data, attrs, event_handler, first, self)
child.setPos(0, self.height + 1)
rect = child.boundingRect()
if rect.right() > self.width:
self.width = rect.right()
self.height = self.height + rect.bottom() + 1
first = False
self.bracket = VerticalBracketGraphicsItem(self)
def EnableRubberBand(self, xlo, xhi, rb_event_handler):
if self.rb_enabled:
return
self.rb_enabled = True
self.rb_in_view = False
self.setAcceptedMouseButtons(Qt.LeftButton)
self.rb_xlo = xlo
self.rb_xhi = xhi
self.rb_event_handler = rb_event_handler
self.mousePressEvent = self.MousePressEvent
self.mouseMoveEvent = self.MouseMoveEvent
self.mouseReleaseEvent = self.MouseReleaseEvent
def boundingRect(self):
return QRectF(0, 0, self.width, self.height)
def paint(self, painter, option, widget):
pass
def RubberBandParent(self):
scene = self.scene()
view = scene.views()[0]
viewport = view.viewport()
return viewport
def RubberBandSetGeometry(self, rect):
scene_rectf = self.mapRectToScene(QRectF(rect))
scene = self.scene()
view = scene.views()[0]
poly = view.mapFromScene(scene_rectf)
self.rubber_band.setGeometry(poly.boundingRect())
def SetSelection(self, selection_state):
if self.rubber_band:
if selection_state:
self.RubberBandSetGeometry(selection_state)
self.rubber_band.show()
else:
self.rubber_band.hide()
def SetBracket(self, rect):
if rect:
x, y, width, height = rect.x(), rect.y(), rect.width(), rect.height()
self.bracket.setPos(x, y)
self.bracket.SetSize(width, height)
self.bracket.show()
else:
self.bracket.hide()
def RubberBandX(self, event):
x = event.pos().toPoint().x()
if x < self.rb_xlo:
x = self.rb_xlo
elif x > self.rb_xhi:
x = self.rb_xhi
else:
self.rb_in_view = True
return x
def RubberBandRect(self, x):
if self.rb_origin.x() <= x:
width = x - self.rb_origin.x()
rect = QRect(self.rb_origin, QSize(width, self.height))
else:
width = self.rb_origin.x() - x
top_left = QPoint(self.rb_origin.x() - width, self.rb_origin.y())
rect = QRect(top_left, QSize(width, self.height))
return rect
def MousePressEvent(self, event):
self.rb_in_view = False
x = self.RubberBandX(event)
self.rb_origin = QPoint(x, self.top)
if self.rubber_band is None:
self.rubber_band = QRubberBand(QRubberBand.Rectangle, self.RubberBandParent())
self.RubberBandSetGeometry(QRect(self.rb_origin, QSize(0, self.height)))
if self.rb_in_view:
self.rubber_band.show()
self.rb_event_handler.RBMoveEvent(x, x)
else:
self.rubber_band.hide()
def MouseMoveEvent(self, event):
x = self.RubberBandX(event)
rect = self.RubberBandRect(x)
self.RubberBandSetGeometry(rect)
if self.rb_in_view:
self.rubber_band.show()
self.rb_event_handler.RBMoveEvent(self.rb_origin.x(), x)
def MouseReleaseEvent(self, event):
x = self.RubberBandX(event)
if self.rb_in_view:
selection_state = self.RubberBandRect(x)
else:
selection_state = None
self.rb_event_handler.RBReleaseEvent(self.rb_origin.x(), x, selection_state)
# Switch graph legend data model
class SwitchGraphLegendModel(QAbstractTableModel):
def __init__(self, collection, region_attributes, parent=None):
super(SwitchGraphLegendModel, self).__init__(parent)
self.region_attributes = region_attributes
self.child_items = sorted(collection.hregions.values(), key=GraphDataRegionOrdinal)
self.child_count = len(self.child_items)
self.highlight_set = set()
self.column_headers = ("pid", "tid", "comm")
def rowCount(self, parent):
return self.child_count
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def index(self, row, column, parent):
return self.createIndex(row, column, self.child_items[row])
def columnCount(self, parent=None):
return len(self.column_headers)
def columnHeader(self, column):
return self.column_headers[column]
def data(self, index, role):
if role == Qt.BackgroundRole:
child = self.child_items[index.row()]
if child in self.highlight_set:
return self.region_attributes[child.key].colour
return None
if role == Qt.ForegroundRole:
child = self.child_items[index.row()]
if child in self.highlight_set:
return QColor(255, 255, 255)
return self.region_attributes[child.key].colour
if role != Qt.DisplayRole:
return None
hregion = self.child_items[index.row()]
col = index.column()
if col == 0:
return hregion.pid
if col == 1:
return hregion.tid
if col == 2:
return hregion.comm
return None
def SetHighlight(self, row, set_highlight):
child = self.child_items[row]
top_left = self.createIndex(row, 0, child)
bottom_right = self.createIndex(row, len(self.column_headers) - 1, child)
self.dataChanged.emit(top_left, bottom_right)
def Highlight(self, highlight_set):
for row in xrange(self.child_count):
child = self.child_items[row]
if child in self.highlight_set:
if child not in highlight_set:
self.SetHighlight(row, False)
elif child in highlight_set:
self.SetHighlight(row, True)
self.highlight_set = highlight_set
# Switch graph legend is a table
class SwitchGraphLegend(QWidget):
def __init__(self, collection, region_attributes, parent=None):
super(SwitchGraphLegend, self).__init__(parent)
self.data_model = SwitchGraphLegendModel(collection, region_attributes)
self.model = QSortFilterProxyModel()
self.model.setSourceModel(self.data_model)
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
self.view.resizeColumnsToContents()
self.view.resizeRowsToContents()
self.vbox = VBoxLayout(self.view)
self.setLayout(self.vbox)
sz1 = self.view.columnWidth(0) + self.view.columnWidth(1) + self.view.columnWidth(2) + 2
sz1 = sz1 + self.view.verticalScrollBar().sizeHint().width()
self.saved_size = sz1
def resizeEvent(self, event):
self.saved_size = self.size().width()
super(SwitchGraphLegend, self).resizeEvent(event)
def Highlight(self, highlight_set):
self.data_model.Highlight(highlight_set)
self.update()
def changeEvent(self, event):
if event.type() == QEvent.FontChange:
self.view.resizeRowsToContents()
self.view.resizeColumnsToContents()
# Need to resize rows again after column resize
self.view.resizeRowsToContents()
super(SwitchGraphLegend, self).changeEvent(event)
# Random colour generation
def RGBColourTooLight(r, g, b):
if g > 230:
return True
if g <= 160:
return False
if r <= 180 and g <= 180:
return False
if r < 60:
return False
return True
def GenerateColours(x):
cs = [0]
for i in xrange(1, x):
cs.append(int((255.0 / i) + 0.5))
colours = []
for r in cs:
for g in cs:
for b in cs:
# Exclude black and colours that look too light against a white background
if (r, g, b) == (0, 0, 0) or RGBColourTooLight(r, g, b):
continue
colours.append(QColor(r, g, b))
return colours
def GenerateNColours(n):
for x in xrange(2, n + 2):
colours = GenerateColours(x)
if len(colours) >= n:
return colours
return []
def GenerateNRandomColours(n, seed):
colours = GenerateNColours(n)
random.seed(seed)
random.shuffle(colours)
return colours
# Graph attributes, in particular the scale and subrange that change when zooming
class GraphAttributes():
def __init__(self, scale, subrange, region_attributes, dp):
self.scale = scale
self.subrange = subrange
self.region_attributes = region_attributes
# Rounding avoids errors due to finite floating point precision
self.dp = dp # data decimal places
self.Update()
def XToPixel(self, x):
return int(round((x - self.subrange.x.lo) * self.scale.x, self.pdp.x))
def YToPixel(self, y):
return int(round((y - self.subrange.y.lo) * self.scale.y, self.pdp.y))
def PixelToXRounded(self, px):
return round((round(px, 0) / self.scale.x), self.dp.x) + self.subrange.x.lo
def PixelToYRounded(self, py):
return round((round(py, 0) / self.scale.y), self.dp.y) + self.subrange.y.lo
def PixelToX(self, px):
x = self.PixelToXRounded(px)
if self.pdp.x == 0:
rt = self.XToPixel(x)
if rt > px:
return x - 1
return x
def PixelToY(self, py):
y = self.PixelToYRounded(py)
if self.pdp.y == 0:
rt = self.YToPixel(y)
if rt > py:
return y - 1
return y
def ToPDP(self, dp, scale):
# Calculate pixel decimal places:
# (10 ** dp) is the minimum delta in the data
# scale it to get the minimum delta in pixels
# log10 gives the number of decimals places negatively
# subtrace 1 to divide by 10
# round to the lower negative number
# change the sign to get the number of decimals positively
x = math.log10((10 ** dp) * scale)
if x < 0:
x -= 1
x = -int(math.floor(x) - 0.1)
else:
x = 0
return x
def Update(self):
x = self.ToPDP(self.dp.x, self.scale.x)
y = self.ToPDP(self.dp.y, self.scale.y)
self.pdp = XY(x, y) # pixel decimal places
# Switch graph splitter which divides the CPU graphs from the legend
class SwitchGraphSplitter(QSplitter):
def __init__(self, parent=None):
super(SwitchGraphSplitter, self).__init__(parent)
self.first_time = False
def resizeEvent(self, ev):
if self.first_time:
self.first_time = False
sz1 = self.widget(1).view.columnWidth(0) + self.widget(1).view.columnWidth(1) + self.widget(1).view.columnWidth(2) + 2
sz1 = sz1 + self.widget(1).view.verticalScrollBar().sizeHint().width()
sz0 = self.size().width() - self.handleWidth() - sz1
self.setSizes([sz0, sz1])
elif not(self.widget(1).saved_size is None):
sz1 = self.widget(1).saved_size
sz0 = self.size().width() - self.handleWidth() - sz1
self.setSizes([sz0, sz1])
super(SwitchGraphSplitter, self).resizeEvent(ev)
# Graph widget base class
class GraphWidget(QWidget):
graph_title_changed = Signal(object)
def __init__(self, parent=None):
super(GraphWidget, self).__init__(parent)
def GraphTitleChanged(self, title):
self.graph_title_changed.emit(title)
def Title(self):
return ""
# Display time in s, ms, us or ns
def ToTimeStr(val):
val = Decimal(val)
if val >= 1000000000:
return "{} s".format((val / 1000000000).quantize(Decimal("0.000000001")))
if val >= 1000000:
return "{} ms".format((val / 1000000).quantize(Decimal("0.000001")))
if val >= 1000:
return "{} us".format((val / 1000).quantize(Decimal("0.001")))
return "{} ns".format(val.quantize(Decimal("1")))
# Switch (i.e. context switch i.e. Time Chart by CPU) graph widget which contains the CPU graphs and the legend and control buttons
class SwitchGraphWidget(GraphWidget):
def __init__(self, glb, collection, parent=None):
super(SwitchGraphWidget, self).__init__(parent)
self.glb = glb
self.collection = collection
self.back_state = []
self.forward_state = []
self.selection_state = (None, None)
self.fwd_rect = None
self.start_time = self.glb.StartTime(collection.machine_id)
i = 0
hregions = collection.hregions.values()
colours = GenerateNRandomColours(len(hregions), 1013)
region_attributes = {}
for hregion in hregions:
if hregion.pid == 0 and hregion.tid == 0:
region_attributes[hregion.key] = GraphRegionAttribute(QColor(0, 0, 0))
else:
region_attributes[hregion.key] = GraphRegionAttribute(colours[i])
i = i + 1
# Default to entire range
xsubrange = Subrange(0.0, float(collection.xrangehi - collection.xrangelo) + 1.0)
ysubrange = Subrange(0.0, float(collection.yrangehi - collection.yrangelo) + 1.0)
subrange = XY(xsubrange, ysubrange)
scale = self.GetScaleForRange(subrange)
self.attrs = GraphAttributes(scale, subrange, region_attributes, collection.dp)
self.item = VertcalGraphSetGraphicsItem(collection, self.attrs, self, SwitchGraphGraphicsItem)
self.scene = QGraphicsScene()
self.scene.addItem(self.item)
self.view = QGraphicsView(self.scene)
self.view.centerOn(0, 0)
self.view.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.legend = SwitchGraphLegend(collection, region_attributes)
self.splitter = SwitchGraphSplitter()
self.splitter.addWidget(self.view)
self.splitter.addWidget(self.legend)
self.point_label = QLabel("")
self.point_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
self.back_button = QToolButton()
self.back_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))
self.back_button.setDisabled(True)
self.back_button.released.connect(lambda: self.Back())
self.forward_button = QToolButton()
self.forward_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowRight))
self.forward_button.setDisabled(True)
self.forward_button.released.connect(lambda: self.Forward())
self.zoom_button = QToolButton()
self.zoom_button.setText("Zoom")
self.zoom_button.setDisabled(True)
self.zoom_button.released.connect(lambda: self.Zoom())
self.hbox = HBoxLayout(self.back_button, self.forward_button, self.zoom_button, self.point_label)
self.vbox = VBoxLayout(self.splitter, self.hbox)
self.setLayout(self.vbox)
def GetScaleForRangeX(self, xsubrange):
# Default graph 1000 pixels wide
dflt = 1000.0
r = xsubrange.hi - xsubrange.lo
return dflt / r
def GetScaleForRangeY(self, ysubrange):
# Default graph 50 pixels high
dflt = 50.0
r = ysubrange.hi - ysubrange.lo
return dflt / r
def GetScaleForRange(self, subrange):
# Default graph 1000 pixels wide, 50 pixels high
xscale = self.GetScaleForRangeX(subrange.x)
yscale = self.GetScaleForRangeY(subrange.y)
return XY(xscale, yscale)
def PointEvent(self, cpu, time_from, time_to, hregions):
text = "CPU: " + str(cpu)
time_from = time_from.quantize(Decimal(1))
rel_time_from = time_from - self.glb.StartTime(self.collection.machine_id)
text = text + " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ")"
self.point_label.setText(text)
self.legend.Highlight(hregions)
def RightClickEvent(self, cpu, hregion_times, pos):
if not IsSelectable(self.glb.db, "calls", "WHERE parent_id >= 0"):
return
menu = QMenu(self.view)
for hregion, time in hregion_times:
thread_at_time = (hregion.exec_comm_id, hregion.thread_id, time)
menu_text = "Show Call Tree for {} {}:{} at {}".format(hregion.comm, hregion.pid, hregion.tid, time)
menu.addAction(CreateAction(menu_text, "Show Call Tree", lambda a=None, args=thread_at_time: self.RightClickSelect(args), self.view))
menu.exec_(pos)
def RightClickSelect(self, args):
CallTreeWindow(self.glb, self.glb.mainwindow, thread_at_time=args)
def NoPointEvent(self):
self.point_label.setText("")
self.legend.Highlight({})
def RangeEvent(self, time_from, time_to):
time_from = time_from.quantize(Decimal(1))
time_to = time_to.quantize(Decimal(1))
if time_to <= time_from:
self.point_label.setText("")
return
rel_time_from = time_from - self.start_time
rel_time_to = time_to - self.start_time
text = " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ") to: " + str(time_to) + " (+" + ToTimeStr(rel_time_to) + ")"
text = text + " duration: " + ToTimeStr(time_to - time_from)
self.point_label.setText(text)
def BackState(self):
return (self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect)
def PushBackState(self):
state = copy.deepcopy(self.BackState())
self.back_state.append(state)
self.back_button.setEnabled(True)
def PopBackState(self):
self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.back_state.pop()
self.attrs.Update()
if not self.back_state:
self.back_button.setDisabled(True)
def PushForwardState(self):
state = copy.deepcopy(self.BackState())
self.forward_state.append(state)
self.forward_button.setEnabled(True)
def PopForwardState(self):
self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.forward_state.pop()
self.attrs.Update()
if not self.forward_state:
self.forward_button.setDisabled(True)
def Title(self):
time_from = self.collection.xrangelo + Decimal(self.attrs.subrange.x.lo)
time_to = self.collection.xrangelo + Decimal(self.attrs.subrange.x.hi)
rel_time_from = time_from - self.start_time
rel_time_to = time_to - self.start_time
title = "+" + ToTimeStr(rel_time_from) + " to +" + ToTimeStr(rel_time_to)
title = title + " (" + ToTimeStr(time_to - time_from) + ")"
return title
def Update(self):
selected_subrange, selection_state = self.selection_state
self.item.SetSelection(selection_state)
self.item.SetBracket(self.fwd_rect)
self.zoom_button.setDisabled(selected_subrange is None)
self.GraphTitleChanged(self.Title())
self.item.update(self.item.boundingRect())
def Back(self):
if not self.back_state:
return
self.PushForwardState()
self.PopBackState()
self.Update()
def Forward(self):
if not self.forward_state:
return
self.PushBackState()
self.PopForwardState()
self.Update()
def SelectEvent(self, x0, x1, selection_state):
if selection_state is None:
selected_subrange = None
else:
if x1 - x0 < 1.0:
x1 += 1.0
selected_subrange = Subrange(x0, x1)
self.selection_state = (selected_subrange, selection_state)
self.zoom_button.setDisabled(selected_subrange is None)
def Zoom(self):
selected_subrange, selection_state = self.selection_state
if selected_subrange is None:
return
self.fwd_rect = selection_state
self.item.SetSelection(None)
self.PushBackState()
self.attrs.subrange.x = selected_subrange
self.forward_state = []
self.forward_button.setDisabled(True)
self.selection_state = (None, None)
self.fwd_rect = None
self.attrs.scale.x = self.GetScaleForRangeX(self.attrs.subrange.x)
self.attrs.Update()
self.Update()
# Slow initialization - perform non-GUI initialization in a separate thread and put up a modal message box while waiting
class SlowInitClass():
def __init__(self, glb, title, init_fn):
self.init_fn = init_fn
self.done = False
self.result = None
self.msg_box = QMessageBox(glb.mainwindow)
self.msg_box.setText("Initializing " + title + ". Please wait.")
self.msg_box.setWindowTitle("Initializing " + title)
self.msg_box.setWindowIcon(glb.mainwindow.style().standardIcon(QStyle.SP_MessageBoxInformation))
self.init_thread = Thread(self.ThreadFn, glb)
self.init_thread.done.connect(lambda: self.Done(), Qt.QueuedConnection)
self.init_thread.start()
def Done(self):
self.msg_box.done(0)
def ThreadFn(self, glb):
conn_name = "SlowInitClass" + str(os.getpid())
db, dbname = glb.dbref.Open(conn_name)
self.result = self.init_fn(db)
self.done = True
return (True, 0)
def Result(self):
while not self.done:
self.msg_box.exec_()
self.init_thread.wait()
return self.result
def SlowInit(glb, title, init_fn):
init = SlowInitClass(glb, title, init_fn)
return init.Result()
# Time chart by CPU window
class TimeChartByCPUWindow(QMdiSubWindow):
def __init__(self, glb, parent=None):
super(TimeChartByCPUWindow, self).__init__(parent)
self.glb = glb
self.machine_id = glb.HostMachineId()
self.collection_name = "SwitchGraphDataCollection " + str(self.machine_id)
collection = LookupModel(self.collection_name)
if collection is None:
collection = SlowInit(glb, "Time Chart", self.Init)
self.widget = SwitchGraphWidget(glb, collection, self)
self.view = self.widget
self.base_title = "Time Chart by CPU"
self.setWindowTitle(self.base_title + self.widget.Title())
self.widget.graph_title_changed.connect(self.GraphTitleChanged)
self.setWidget(self.widget)
AddSubWindow(glb.mainwindow.mdi_area, self, self.windowTitle())
def Init(self, db):
return LookupCreateModel(self.collection_name, lambda : SwitchGraphDataCollection(self.glb, db, self.machine_id))
def GraphTitleChanged(self, title):
self.setWindowTitle(self.base_title + " : " + title)
# Child data item finder
class ChildDataItemFinder():
def __init__(self, root):
self.root = root
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5
self.rows = []
self.pos = 0
def FindSelect(self):
self.rows = []
if self.pattern:
pattern = re.compile(self.value)
for child in self.root.child_items:
for column_data in child.data:
if re.search(pattern, str(column_data)) is not None:
self.rows.append(child.row)
break
else:
for child in self.root.child_items:
for column_data in child.data:
if self.value in str(column_data):
self.rows.append(child.row)
break
def FindValue(self):
self.pos = 0
if self.last_value != self.value or self.pattern != self.last_pattern:
self.FindSelect()
if not len(self.rows):
return -1
return self.rows[self.pos]
def FindThread(self):
if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern:
row = self.FindValue()
elif len(self.rows):
if self.direction > 0:
self.pos += 1
if self.pos >= len(self.rows):
self.pos = 0
else:
self.pos -= 1
if self.pos < 0:
self.pos = len(self.rows) - 1
row = self.rows[self.pos]
else:
row = -1
return (True, row)
def Find(self, value, direction, pattern, context, callback):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern)
# Use a thread so the UI is not blocked
thread = Thread(self.FindThread)
thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, row):
callback(row)
# Number of database records to fetch in one go
glb_chunk_sz = 10000
# Background process for SQL data fetcher
class SQLFetcherProcess():
def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep):
# Need a unique connection name
conn_name = "SQLFetcher" + str(os.getpid())
self.db, dbname = dbref.Open(conn_name)
self.sql = sql
self.buffer = buffer
self.head = head
self.tail = tail
self.fetch_count = fetch_count
self.fetching_done = fetching_done
self.process_target = process_target
self.wait_event = wait_event
self.fetched_event = fetched_event
self.prep = prep
self.query = QSqlQuery(self.db)
self.query_limit = 0 if "$$last_id$$" in sql else 2
self.last_id = -1
self.fetched = 0
self.more = True
self.local_head = self.head.value
self.local_tail = self.tail.value
def Select(self):
if self.query_limit:
if self.query_limit == 1:
return
self.query_limit -= 1
stmt = self.sql.replace("$$last_id$$", str(self.last_id))
QueryExec(self.query, stmt)
def Next(self):
if not self.query.next():
self.Select()
if not self.query.next():
return None
self.last_id = self.query.value(0)
return self.prep(self.query)
def WaitForTarget(self):
while True:
self.wait_event.clear()
target = self.process_target.value
if target > self.fetched or target < 0:
break
self.wait_event.wait()
return target
def HasSpace(self, sz):
if self.local_tail <= self.local_head:
space = len(self.buffer) - self.local_head
if space > sz:
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
return True
return False
def WaitForSpace(self, sz):
if self.HasSpace(sz):
return
while True:
self.wait_event.clear()
self.local_tail = self.tail.value
if self.HasSpace(sz):
return
self.wait_event.wait()
def AddToBuffer(self, obj):
d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
self.buffer[pos : pos + len(nd)] = nd
self.buffer[pos + glb_nsz : pos + sz] = d
self.local_head += sz
def FetchBatch(self, batch_size):
fetched = 0
while batch_size > fetched:
obj = self.Next()
if obj is None:
self.more = False
break
self.AddToBuffer(obj)
fetched += 1
if fetched:
self.fetched += fetched
with self.fetch_count.get_lock():
self.fetch_count.value += fetched
self.head.value = self.local_head
self.fetched_event.set()
def Run(self):
while self.more:
target = self.WaitForTarget()
if target < 0:
break
batch_size = min(glb_chunk_sz, target - self.fetched)
self.FetchBatch(batch_size)
self.fetching_done.value = True
self.fetched_event.set()
def SQLFetcherFn(*x):
process = SQLFetcherProcess(*x)
process.Run()
# SQL data fetcher
class SQLFetcher(QObject):
done = Signal(object)
def __init__(self, glb, sql, prep, process_data, parent=None):
super(SQLFetcher, self).__init__(parent)
self.process_data = process_data
self.more = True
self.target = 0
self.last_target = 0
self.fetched = 0
self.buffer_size = 16 * 1024 * 1024
self.buffer = Array(c_char, self.buffer_size, lock=False)
self.head = Value(c_longlong)
self.tail = Value(c_longlong)
self.local_tail = 0
self.fetch_count = Value(c_longlong)
self.fetching_done = Value(c_bool)
self.last_count = 0
self.process_target = Value(c_longlong)
self.wait_event = Event()
self.fetched_event = Event()
glb.AddInstanceToShutdownOnExit(self)
self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep))
self.process.start()
self.thread = Thread(self.Thread)
self.thread.done.connect(self.ProcessData, Qt.QueuedConnection)
self.thread.start()
def Shutdown(self):
# Tell the thread and process to exit
self.process_target.value = -1
self.wait_event.set()
self.more = False
self.fetching_done.value = True
self.fetched_event.set()
def Thread(self):
if not self.more:
return True, 0
while True:
self.fetched_event.clear()
fetch_count = self.fetch_count.value
if fetch_count != self.last_count:
break
if self.fetching_done.value:
self.more = False
return True, 0
self.fetched_event.wait()
count = fetch_count - self.last_count
self.last_count = fetch_count
self.fetched += count
return False, count
def Fetch(self, nr):
if not self.more:
# -1 inidcates there are no more
return -1
result = self.fetched
extra = result + nr - self.target
if extra > 0:
self.target += extra
# process_target < 0 indicates shutting down
if self.process_target.value >= 0:
self.process_target.value = self.target
self.wait_event.set()
return result
def RemoveFromBuffer(self):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
def ProcessData(self, count):
for i in xrange(count):
obj = self.RemoveFromBuffer()
self.process_data(obj)
self.tail.value = self.local_tail
self.wait_event.set()
self.done.emit(count)
# Fetch more records bar
class FetchMoreRecordsBar():
def __init__(self, model, parent):
self.model = model
self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:")
self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch_count = QSpinBox()
self.fetch_count.setRange(1, 1000000)
self.fetch_count.setValue(10)
self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch = QPushButton("Go!")
self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch.released.connect(self.FetchMoreRecords)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
self.progress.hide()
self.done_label = QLabel("All records fetched")
self.done_label.hide()
self.spacer = QLabel("")
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(self.label)
self.hbox.addWidget(self.fetch_count)
self.hbox.addWidget(self.fetch)
self.hbox.addWidget(self.spacer)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.done_label)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox)
self.bar.show()
self.in_progress = False
self.model.progress.connect(self.Progress)
self.done = False
if not model.HasMoreRecords():
self.Done()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.fetch.setFocus()
def Deactivate(self):
self.bar.hide()
def Enable(self, enable):
self.fetch.setEnabled(enable)
self.fetch_count.setEnabled(enable)
def Busy(self):
self.Enable(False)
self.fetch.hide()
self.spacer.hide()
self.progress.show()
def Idle(self):
self.in_progress = False
self.Enable(True)
self.progress.hide()
self.fetch.show()
self.spacer.show()
def Target(self):
return self.fetch_count.value() * glb_chunk_sz
def Done(self):
self.done = True
self.Idle()
self.label.hide()
self.fetch_count.hide()
self.fetch.hide()
self.spacer.hide()
self.done_label.show()
def Progress(self, count):
if self.in_progress:
if count:
percent = ((count - self.start) * 100) / self.Target()
if percent >= 100:
self.Idle()
else:
self.progress.setValue(percent)
if not count:
# Count value of zero means no more records
self.Done()
def FetchMoreRecords(self):
if self.done:
return
self.progress.setValue(0)
self.Busy()
self.in_progress = True
self.start = self.model.FetchMoreRecords(self.Target())
# Brance data model level two item
class BranchLevelTwoItem():
def __init__(self, row, col, text, parent_item):
self.row = row
self.parent_item = parent_item
self.data = [""] * (col + 1)
self.data[col] = text
self.level = 2
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
return 0
def hasChildren(self):
return False
def getData(self, column):
return self.data[column]
# Brance data model level one item
class BranchLevelOneItem():
def __init__(self, glb, row, data, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.child_count = 0
self.child_items = []
self.data = data[1:]
self.dbid = data[0]
self.level = 1
self.query_done = False
self.br_col = len(self.data) - 1
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def Select(self):
self.query_done = True
if not self.glb.have_disassembler:
return
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip"
" FROM samples"
" INNER JOIN dsos ON samples.to_dso_id = dsos.id"
" INNER JOIN symbols ON samples.to_symbol_id = symbols.id"
" WHERE samples.id = " + str(self.dbid))
if not query.next():
return
cpu = query.value(0)
dso = query.value(1)
sym = query.value(2)
if dso == 0 or sym == 0:
return
off = query.value(3)
short_name = query.value(4)
long_name = query.value(5)
build_id = query.value(6)
sym_start = query.value(7)
ip = query.value(8)
QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start"
" FROM samples"
" INNER JOIN symbols ON samples.symbol_id = symbols.id"
" WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) +
" ORDER BY samples.id"
" LIMIT 1")
if not query.next():
return
if query.value(0) != dso:
# Cannot disassemble from one dso to another
return
bsym = query.value(1)
boff = query.value(2)
bsym_start = query.value(3)
if bsym == 0:
return
tot = bsym_start + boff + 1 - sym_start - off
if tot <= 0 or tot > 16384:
return
inst = self.glb.disassembler.Instruction()
f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id)
if not f:
return
mode = 0 if Is64Bit(f) else 1
self.glb.disassembler.SetMode(inst, mode)
buf_sz = tot + 16
buf = create_string_buffer(tot + 16)
f.seek(sym_start + off)
buf.value = f.read(buf_sz)
buf_ptr = addressof(buf)
i = 0
while tot > 0:
cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip)
if cnt:
byte_str = tohex(ip).rjust(16)
for k in xrange(cnt):
byte_str += " %02x" % ord(buf[i])
i += 1
while k < 15:
byte_str += " "
k += 1
self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self))
self.child_count += 1
else:
return
buf_ptr += cnt
tot -= cnt
buf_sz -= cnt
ip += cnt
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Brance data model root item
class BranchRootItem():
def __init__(self):
self.child_count = 0
self.child_items = []
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return None
def getRow(self):
return 0
def childCount(self):
return self.child_count
def hasChildren(self):
return self.child_count > 0
def getData(self, column):
return ""
# Calculate instructions per cycle
def CalcIPC(cyc_cnt, insn_cnt):
if cyc_cnt and insn_cnt:
ipc = Decimal(float(insn_cnt) / cyc_cnt)
ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP))
else:
ipc = "0"
return ipc
# Branch data preparation
def BranchDataPrepBr(query, data):
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
def BranchDataPrepIPC(query, data):
insn_cnt = query.value(16)
cyc_cnt = query.value(17)
ipc = CalcIPC(cyc_cnt, insn_cnt)
data.append(insn_cnt)
data.append(cyc_cnt)
data.append(ipc)
def BranchDataPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
BranchDataPrepBr(query, data)
return data
def BranchDataPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
BranchDataPrepBr(query, data)
return data
def BranchDataWithIPCPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
BranchDataPrepIPC(query, data)
BranchDataPrepBr(query, data)
return data
def BranchDataWithIPCPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
BranchDataPrepIPC(query, data)
BranchDataPrepBr(query, data)
return data
# Branch data model
class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
super(BranchModel, self).__init__(glb, None, parent)
self.event_id = event_id
self.more = True
self.populated = 0
self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count")
if self.have_ipc:
select_ipc = ", insn_count, cyc_count"
prep_fn = BranchDataWithIPCPrep
prep_wa_fn = BranchDataWithIPCPrepWA
else:
select_ipc = ""
prep_fn = BranchDataPrep
prep_wa_fn = BranchDataPrepWA
sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
" CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
" ip, symbols.name, sym_offset, dsos.short_name,"
" to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
+ select_ipc +
" FROM samples"
" INNER JOIN comms ON comm_id = comms.id"
" INNER JOIN threads ON thread_id = threads.id"
" INNER JOIN branch_types ON branch_type = branch_types.id"
" INNER JOIN symbols ON symbol_id = symbols.id"
" INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id"
" INNER JOIN dsos ON samples.dso_id = dsos.id"
" INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id"
" WHERE samples.id > $$last_id$$" + where_clause +
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
if pyside_version_1 and sys.version_info[0] == 3:
prep = prep_fn
else:
prep = prep_wa_fn
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def GetRoot(self):
return BranchRootItem()
def columnCount(self, parent=None):
if self.have_ipc:
return 11
else:
return 8
def columnHeader(self, column):
if self.have_ipc:
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column]
else:
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
def columnFont(self, column):
if self.have_ipc:
br_col = 10
else:
br_col = 7
if column != br_col:
return None
return QFont("Monospace")
def DisplayData(self, item, index):
if item.level == 1:
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = BranchLevelOneItem(self.glb, self.populated, data, self.root)
self.root.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.root.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.root.child_count += count
self.endInsertRows()
self.progress.emit(self.root.child_count)
def FetchMoreRecords(self, count):
current = self.root.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
# Report Variables
class ReportVars():
def __init__(self, name = "", where_clause = "", limit = ""):
self.name = name
self.where_clause = where_clause
self.limit = limit
def UniqueId(self):
return str(self.where_clause + ";" + self.limit)
# Branch window
class BranchWindow(QMdiSubWindow):
def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.view.setModel(self.model)
self.ResizeColumnsToContents()
self.context_menu = TreeContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model.root)
self.fetch_bar = FetchMoreRecordsBar(self.model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
mm = "MM" if column else "MMMM"
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.model.root.child_items[row].data[column]
len = metrics.width(str(val) + mm)
max = len if len > max else max
val = self.model.columnHeader(column)
len = metrics.width(str(val) + mm)
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.model.root.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Line edit data item
class LineEditDataItem(object):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
self.parent = parent
self.id = id
self.value = default
self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
def TurnTextRed(self):
if not self.red:
palette = QPalette()
palette.setColor(QPalette.Text,Qt.red)
self.widget.setPalette(palette)
self.red = True
def TurnTextNormal(self):
if self.red:
palette = QPalette()
self.widget.setPalette(palette)
self.red = False
def InvalidValue(self, value):
self.value = ""
self.TurnTextRed()
self.error = self.label + " invalid value '" + value + "'"
self.parent.ShowMessage(self.error)
def Invalidate(self):
self.validated = False
def DoValidate(self, input_string):
self.value = input_string.strip()
def Validate(self):
self.validated = True
self.error = ""
self.TurnTextNormal()
self.parent.ClearMessage()
input_string = self.widget.text()
if not len(input_string.strip()):
self.value = ""
return
self.DoValidate(input_string)
def IsValid(self):
if not self.validated:
self.Validate()
if len(self.error):
self.parent.ShowMessage(self.error)
return False
return True
def IsNumber(self, value):
try:
x = int(value)
except:
x = 0
return str(x) == value
# Non-negative integer ranges dialog data item
class NonNegativeIntegerRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
self.column_name = column_name
def DoValidate(self, input_string):
singles = []
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if "-" in value:
vrange = value.split("-")
if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return self.InvalidValue(value)
ranges.append(vrange)
else:
if not self.IsNumber(value):
return self.InvalidValue(value)
singles.append(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
if len(singles):
ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
self.value = " OR ".join(ranges)
# Positive integer dialog data item
class PositiveIntegerDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
def DoValidate(self, input_string):
if not self.IsNumber(input_string.strip()):
return self.InvalidValue(input_string)
value = int(input_string.strip())
if value <= 0:
return self.InvalidValue(input_string)
self.value = str(value)
# Dialog data item converted and validated using a SQL table
class SQLTableDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
self.table_name = table_name
self.match_column = match_column
self.column_name1 = column_name1
self.column_name2 = column_name2
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
ret = query.exec_(stmt)
if ret:
while query.next():
ids.append(str(query.value(0)))
return ids
def DoValidate(self, input_string):
all_ids = []
for value in [x.strip() for x in input_string.split(",")]:
ids = self.ValueToIds(value)
if len(ids):
all_ids.extend(ids)
else:
return self.InvalidValue(value)
self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
if self.column_name2:
self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
# Sample time ranges dialog data item converted and validated using 'samples' SQL table
class SampleTimeRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
self.column_name = column_name
self.last_id = 0
self.first_time = 0
self.last_time = 2 ** 64
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
if query.next():
self.last_id = int(query.value(0))
self.first_time = int(glb.HostStartTime())
self.last_time = int(glb.HostFinishTime())
if placeholder_text:
placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
return True, int(query.value(0))
else:
return False, 0
def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
query = QSqlQuery(self.glb.db)
while True:
next_id = int((lower_id + higher_id) / 2)
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
if not query.next():
ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
if not ok:
ok, dbid = self.IdBetween(query, next_id, higher_id, "")
if not ok:
return str(higher_id)
next_id = dbid
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
next_time = int(query.value(0))
if get_floor:
if target_time > next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(higher_id)
else:
if target_time >= next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(lower_id)
def ConvertRelativeTime(self, val):
mult = 1
suffix = val[-2:]
if suffix == "ms":
mult = 1000000
elif suffix == "us":
mult = 1000
elif suffix == "ns":
mult = 1
else:
return val
val = val[:-2].strip()
if not self.IsNumber(val):
return val
val = int(val) * mult
if val >= 0:
val += self.first_time
else:
val += self.last_time
return str(val)
def ConvertTimeRange(self, vrange):
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
return True
def AddTimeRange(self, value, ranges):
n = value.count("-")
if n == 1:
pass
elif n == 2:
if value.split("-")[1].strip() == "":
n = 1
elif n == 3:
n = 2
else:
return False
pos = findnth(value, "-", n)
vrange = [value[:pos].strip() ,value[pos+1:].strip()]
if self.ConvertTimeRange(vrange):
ranges.append(vrange)
return True
return False
def DoValidate(self, input_string):
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if not self.AddTimeRange(value, ranges):
return self.InvalidValue(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
self.value = " OR ".join(ranges)
# Report Dialog Base
class ReportDialogBase(QDialog):
def __init__(self, glb, title, items, partial, parent=None):
super(ReportDialogBase, self).__init__(parent)
self.glb = glb
self.report_vars = ReportVars()
self.setWindowTitle(title)
self.setMinimumWidth(600)
self.data_items = [x(glb, self) for x in items]
self.partial = partial
self.grid = QGridLayout()
for row in xrange(len(self.data_items)):
self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
self.grid.addWidget(self.data_items[row].widget, row, 1)
self.status = QLabel()
self.ok_button = QPushButton("Ok", self)
self.ok_button.setDefault(True)
self.ok_button.released.connect(self.Ok)
self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.cancel_button = QPushButton("Cancel", self)
self.cancel_button.released.connect(self.reject)
self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.hbox = QHBoxLayout()
#self.hbox.addStretch()
self.hbox.addWidget(self.status)
self.hbox.addWidget(self.ok_button)
self.hbox.addWidget(self.cancel_button)
self.vbox = QVBoxLayout()
self.vbox.addLayout(self.grid)
self.vbox.addLayout(self.hbox)
self.setLayout(self.vbox)
def Ok(self):
vars = self.report_vars
for d in self.data_items:
if d.id == "REPORTNAME":
vars.name = d.value
if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
if d.id == "LIMIT":
vars.limit = d.value
elif len(d.value):
if len(vars.where_clause):
vars.where_clause += " AND "
vars.where_clause += d.value
if len(vars.where_clause):
if self.partial:
vars.where_clause = " AND ( " + vars.where_clause + " ) "
else:
vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
self.status.setText("<font color=#FF0000>" + msg)
def ClearMessage(self):
self.status.setText("")
# Selected branch report creation dialog
class SelectedBranchDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Selected Branches"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
# Event list
def GetEventList(db):
events = []
query = QSqlQuery(db)
QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id")
while query.next():
events.append(query.value(0))
return events
# Is a table selectable
def IsSelectable(db, table, sql = "", columns = "*"):
query = QSqlQuery(db)
try:
QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
# SQL table data model item
class SQLTableItem():
def __init__(self, row, data):
self.row = row
self.data = data
def getData(self, column):
return self.data[column]
# SQL table data model
class SQLTableModel(TableModel):
progress = Signal(object)
def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
self.column_headers = column_headers
self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def DisplayData(self, item, index):
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = SQLTableItem(self.populated, data)
self.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.child_count += count
self.endInsertRows()
self.progress.emit(self.child_count)
def FetchMoreRecords(self, count):
current = self.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
def columnCount(self, parent=None):
return len(self.column_headers)
def columnHeader(self, column):
return self.column_headers[column]
def SQLTableDataPrep(self, query, count):
data = []
for i in xrange(count):
data.append(query.value(i))
return data
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
def __init__(self, glb, table_name, parent=None):
sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz)
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
if table_name[:19] == "information_schema.":
sql = "SELECT * FROM " + table_name
select_table_name = table_name[19:]
schema = "information_schema"
else:
select_table_name = table_name
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
column_headers.append(query.value(0))
if pyside_version_1 and sys.version_info[0] == 3:
if table_name == "samples_view":
self.SQLTableDataPrep = self.samples_view_DataPrep
if table_name == "samples":
self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
def samples_view_DataPrep(self, query, count):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, count):
data.append(query.value(i))
return data
def samples_DataPrep(self, query, count):
data = []
for i in xrange(9):
data.append(query.value(i))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(9)))
for i in xrange(10, count):
data.append(query.value(i))
return data
# Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject):
def __init__(self, parent=None):
super(ResizeColumnsToContentsBase, self).__init__(parent)
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.data_model.child_items[row].data[column]
len = metrics.width(str(val) + "MM")
max = len if len > max else max
val = self.data_model.columnHeader(column)
len = metrics.width(str(val) + "MM")
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.data_model.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.data_model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.data_model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
# Convert value to CSV
def ToCSValue(val):
if '"' in val:
val = val.replace('"', '""')
if "," in val or '"' in val:
val = '"' + val + '"'
return val
# Key to sort table model indexes by row / column, assuming fewer than 1000 columns
glb_max_cols = 1000
def RowColumnKey(a):
return a.row() * glb_max_cols + a.column()
# Copy selected table cells to clipboard
def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = sorted(view.selectedIndexes(), key=RowColumnKey)
idx_cnt = len(indexes)
if not idx_cnt:
return
if idx_cnt == 1:
with_hdr=False
min_row = indexes[0].row()
max_row = indexes[0].row()
min_col = indexes[0].column()
max_col = indexes[0].column()
for i in indexes:
min_row = min(min_row, i.row())
max_row = max(max_row, i.row())
min_col = min(min_col, i.column())
max_col = max(max_col, i.column())
if max_col > glb_max_cols:
raise RuntimeError("glb_max_cols is too low")
max_width = [0] * (1 + max_col - min_col)
for i in indexes:
c = i.column() - min_col
max_width[c] = max(max_width[c], len(str(i.data())))
text = ""
pad = ""
sep = ""
if with_hdr:
model = indexes[0].model()
for col in range(min_col, max_col + 1):
val = model.headerData(col, Qt.Horizontal)
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
c = col - min_col
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
last_row = min_row
for i in indexes:
if i.row() > last_row:
last_row = i.row()
text += "\n"
pad = ""
sep = ""
if as_csv:
text += sep + ToCSValue(str(i.data()))
sep = ","
else:
width = max_width[i.column() - min_col]
if i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = str(i.data()).rjust(width)
else:
val = str(i.data())
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
QApplication.clipboard().setText(text)
def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = view.selectedIndexes()
if not len(indexes):
return
selection = view.selectionModel()
first = None
for i in indexes:
above = view.indexAbove(i)
if not selection.isSelected(above):
first = i
break
if first is None:
raise RuntimeError("CopyTreeCellsToClipboard internal error")
model = first.model()
row_cnt = 0
col_cnt = model.columnCount(first)
max_width = [0] * col_cnt
indent_sz = 2
indent_str = " " * indent_sz
expanded_mark_sz = 2
if sys.version_info[0] == 3:
expanded_mark = "\u25BC "
not_expanded_mark = "\u25B6 "
else:
expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8")
not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8")
leaf_mark = " "
if not as_csv:
pos = first
while True:
row_cnt += 1
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
if c:
n = len(str(i.data()))
else:
n = len(str(i.data()).strip())
n += (i.internalPointer().level - 1) * indent_sz
n += expanded_mark_sz
max_width[c] = max(max_width[c], n)
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = ""
pad = ""
sep = ""
if with_hdr:
for c in range(col_cnt):
val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
pos = first
while True:
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
val = str(i.data())
if not c:
if model.hasChildren(i):
if view.isExpanded(i):
mark = expanded_mark
else:
mark = not_expanded_mark
else:
mark = leaf_mark
val = indent_str * (i.internalPointer().level - 1) + mark + val.strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
width = max_width[c]
if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = text.rstrip() + "\n"
pad = ""
sep = ""
QApplication.clipboard().setText(text)
def CopyCellsToClipboard(view, as_csv=False, with_hdr=False):
view.CopyCellsToClipboard(view, as_csv, with_hdr)
def CopyCellsToClipboardHdr(view):
CopyCellsToClipboard(view, False, True)
def CopyCellsToClipboardCSV(view):
CopyCellsToClipboard(view, True, True)
# Context menu
class ContextMenu(object):
def __init__(self, view):
self.view = view
self.view.setContextMenuPolicy(Qt.CustomContextMenu)
self.view.customContextMenuRequested.connect(self.ShowContextMenu)
def ShowContextMenu(self, pos):
menu = QMenu(self.view)
self.AddActions(menu)
menu.exec_(self.view.mapToGlobal(pos))
def AddCopy(self, menu):
menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view))
menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view))
def AddActions(self, menu):
self.AddCopy(menu)
class TreeContextMenu(ContextMenu):
def __init__(self, view):
super(TreeContextMenu, self).__init__(view)
def AddActions(self, menu):
i = self.view.currentIndex()
text = str(i.data()).strip()
if len(text):
menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view))
self.AddCopy(menu)
# Table window
class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, table_name, parent=None):
super(TableWindow, self).__init__(parent)
self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name))
self.model = QSortFilterProxyModel()
self.model.setSourceModel(self.data_model)
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.ResizeColumnsToContents()
self.context_menu = ContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.data_model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table")
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
# Table list
def GetTableList(glb):
tables = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
else:
QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
while query.next():
tables.append(query.value(0))
if glb.dbref.is_sqlite3:
tables.append("sqlite_master")
else:
tables.append("information_schema.tables")
tables.append("information_schema.views")
tables.append("information_schema.columns")
return tables
# Top Calls data model
class TopCallsModel(SQLTableModel):
def __init__(self, glb, report_vars, parent=None):
text = ""
if not glb.dbref.is_sqlite3:
text = "::text"
limit = ""
if len(report_vars.limit):
limit = " LIMIT " + report_vars.limit
sql = ("SELECT comm, pid, tid, name,"
" CASE"
" WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
" ELSE short_name"
" END AS dso,"
" call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
" CASE"
" WHEN (calls.flags = 1) THEN 'no call'" + text +
" WHEN (calls.flags = 2) THEN 'no return'" + text +
" WHEN (calls.flags = 3) THEN 'no call/return'" + text +
" ELSE ''" + text +
" END AS flags"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" INNER JOIN comms ON calls.comm_id = comms.id"
" INNER JOIN threads ON calls.thread_id = threads.id" +
report_vars.where_clause +
" ORDER BY elapsed_time DESC" +
limit
)
column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
def columnAlignment(self, column):
return self.alignment[column]
# Top Calls report creation dialog
class TopCallsDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Top Calls by Elapsed Time"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
# Top Calls window
class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, report_vars, parent=None):
super(TopCallsWindow, self).__init__(parent)
self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
self.model = self.data_model
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.context_menu = ContextMenu(self.view)
self.ResizeColumnsToContents()
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
action = QAction(label, parent)
if shortcut != None:
action.setShortcuts(shortcut)
action.setStatusTip(tip)
action.triggered.connect(callback)
return action
# Typical application actions
def CreateExitAction(app, parent=None):
return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit)
# Typical MDI actions
def CreateCloseActiveWindowAction(mdi_area):
return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area)
def CreateCloseAllWindowsAction(mdi_area):
return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area)
def CreateTileWindowsAction(mdi_area):
return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area)
def CreateCascadeWindowsAction(mdi_area):
return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area)
def CreateNextWindowAction(mdi_area):
return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild)
def CreatePreviousWindowAction(mdi_area):
return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild)
# Typical MDI window menu
class WindowMenu():
def __init__(self, mdi_area, menu):
self.mdi_area = mdi_area
self.window_menu = menu.addMenu("&Windows")
self.close_active_window = CreateCloseActiveWindowAction(mdi_area)
self.close_all_windows = CreateCloseAllWindowsAction(mdi_area)
self.tile_windows = CreateTileWindowsAction(mdi_area)
self.cascade_windows = CreateCascadeWindowsAction(mdi_area)
self.next_window = CreateNextWindowAction(mdi_area)
self.previous_window = CreatePreviousWindowAction(mdi_area)
self.window_menu.aboutToShow.connect(self.Update)
def Update(self):
self.window_menu.clear()
sub_window_count = len(self.mdi_area.subWindowList())
have_sub_windows = sub_window_count != 0
self.close_active_window.setEnabled(have_sub_windows)
self.close_all_windows.setEnabled(have_sub_windows)
self.tile_windows.setEnabled(have_sub_windows)
self.cascade_windows.setEnabled(have_sub_windows)
self.next_window.setEnabled(have_sub_windows)
self.previous_window.setEnabled(have_sub_windows)
self.window_menu.addAction(self.close_active_window)
self.window_menu.addAction(self.close_all_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.tile_windows)
self.window_menu.addAction(self.cascade_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.next_window)
self.window_menu.addAction(self.previous_window)
if sub_window_count == 0:
return
self.window_menu.addSeparator()
nr = 1
for sub_window in self.mdi_area.subWindowList():
label = str(nr) + " " + sub_window.name
if nr < 10:
label = "&" + label
action = self.window_menu.addAction(label)
action.setCheckable(True)
action.setChecked(sub_window == self.mdi_area.activeSubWindow())
action.triggered.connect(lambda a=None,x=nr: self.setActiveSubWindow(x))
self.window_menu.addAction(action)
nr += 1
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
# Help text
glb_help_text = """
<h1>Contents</h1>
<style>
p.c1 {
text-indent: 40px;
}
p.c2 {
text-indent: 80px;
}
}
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
<p class=c2><a href=#allbranches>1.3 All branches</a></p>
<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#charts>2. Charts</a></p>
<p class=c2><a href=#timechartbycpu>2.1 Time chart by CPU</a></p>
<p class=c1><a href=#tables>3. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
The result is a GUI window with a tree representing a context-sensitive
call-graph. Expanding a couple of levels of the tree and adjusting column
widths to suit will display something like:
<pre>
Call Graph: pt_example
Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
v- ls
v- 2638:2638
v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
|- unknown unknown 1 13198 0.1 1 0.0
>- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
>- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
>- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
>- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
>- __libc_csu_init ls 1 10354 0.1 10 0.0
|- _setjmp libc-2.19.so 1 0 0.0 4 0.0
v- main ls 1 8182043 99.6 180254 99.9
</pre>
<h3>Points to note:</h3>
<ul>
<li>The top level is a command name (comm)</li>
<li>The next level is a thread (pid:tid)</li>
<li>Subsequent levels are functions</li>
<li>'Count' is the number of calls</li>
<li>'Time' is the elapsed time until the function returns</li>
<li>Percentages are relative to the level above</li>
<li>'Branch Count' is the total number of branches for that function and all functions that it calls
</ul>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
<h2 id=calltree>1.2 Call Tree</h2>
The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
Open a branch to display disassembly. This only works if:
<ol>
<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
</ol>
<h4 id=xed>Intel XED Setup</h4>
To use Intel XED, libxed.so must be present. To build and install libxed.so:
<pre>
git clone https://github.com/intelxed/mbuild.git mbuild
git clone https://github.com/intelxed/xed
cd xed
./mfile.py --share
sudo ./mfile.py --prefix=/usr/local install
sudo ldconfig
</pre>
<h3>Instructions per Cycle (IPC)</h3>
If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'.
<p><b>Intel PT note:</b> The information applies to the blocks of code ending with, and including, that branch.
Due to the granularity of timing information, the number of cycles for some code blocks will not be known.
In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period
since the previous displayed 'IPC'.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
81073085947329-81073085958238 From 81073085947329 to 81073085958238
100us-200us From 100us to 200us
10ms- From 10ms to the end
-100ns The first 100ns
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=charts>2. Charts</h1>
<h2 id=timechartbycpu>2.1 Time chart by CPU</h2>
This chart displays context switch information when that data is available. Refer to context_switches_view on the Tables menu.
<h3>Features</h3>
<ol>
<li>Mouse over to highight the task and show the time</li>
<li>Drag the mouse to select a region and zoom by pushing the Zoom button</li>
<li>Go back and forward by pressing the arrow buttons</li>
<li>If call information is available, right-click to show a call tree opened to that task and time.
Note, the call tree may take some time to appear, and there may not be call information for the task or time selected.
</li>
</ol>
<h3>Important</h3>
The graph can be misleading in the following respects:
<ol>
<li>The graph shows the first task on each CPU as running from the beginning of the time range.
Because tracing might start on different CPUs at different times, that is not necessarily the case.
Refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li>
<li>Similarly, the last task on each CPU can be showing running longer than it really was.
Again, refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li>
<li>When the mouse is over a task, the highlighted task might not be visible on the legend without scrolling if the legend does not fit fully in the window</li>
</ol>
<h1 id=tables>3. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
but that can be slow for large tables.
<p>There are also tables of database meta-information.
For SQLite3 databases, the sqlite_master table is included.
For PostgreSQL databases, information_schema.tables/views/columns are included.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
will go to the next/previous result in id order, instead of display order.
"""
# Help window
class HelpWindow(QMdiSubWindow):
def __init__(self, glb, parent=None):
super(HelpWindow, self).__init__(parent)
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setWidget(self.text)
AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
# Main window that only displays the help text
class HelpOnlyWindow(QMainWindow):
def __init__(self, parent=None):
super(HelpOnlyWindow, self).__init__(parent)
self.setMinimumSize(200, 100)
self.resize(800, 600)
self.setWindowTitle("Exported SQL Viewer Help")
self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setCentralWidget(self.text)
# PostqreSQL server version
def PostqreSQLServerVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT VERSION()")
if query.next():
v_str = query.value(0)
v_list = v_str.strip().split(" ")
if v_list[0] == "PostgreSQL" and v_list[2] == "on":
return v_list[1]
return v_str
return "Unknown"
# SQLite version
def SQLiteVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT sqlite_version()")
if query.next():
return query.value(0)
return "Unknown"
# About dialog
class AboutDialog(QDialog):
def __init__(self, glb, parent=None):
super(AboutDialog, self).__init__(parent)
self.setWindowTitle("About Exported SQL Viewer")
self.setMinimumWidth(300)
pyside_version = "1" if pyside_version_1 else "2"
text = "<pre>"
text += "Python version: " + sys.version.split(" ")[0] + "\n"
text += "PySide version: " + pyside_version + "\n"
text += "Qt version: " + qVersion() + "\n"
if glb.dbref.is_sqlite3:
text += "SQLite version: " + SQLiteVersion(glb.db) + "\n"
else:
text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
text += "</pre>"
self.text = QTextBrowser()
self.text.setHtml(text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.text)
self.setLayout(self.vbox)
# Font resize
def ResizeFont(widget, diff):
font = widget.font()
sz = font.pointSize()
font.setPointSize(sz + diff)
widget.setFont(font)
def ShrinkFont(widget):
ResizeFont(widget, -1)
def EnlargeFont(widget):
ResizeFont(widget, 1)
# Unique name for sub-windows
def NumberedWindowName(name, nr):
if nr > 1:
name += " <" + str(nr) + ">"
return name
def UniqueSubWindowName(mdi_area, name):
nr = 1
while True:
unique_name = NumberedWindowName(name, nr)
ok = True
for sub_window in mdi_area.subWindowList():
if sub_window.name == unique_name:
ok = False
break
if ok:
return unique_name
nr += 1
# Add a sub-window
def AddSubWindow(mdi_area, sub_window, name):
unique_name = UniqueSubWindowName(mdi_area, name)
sub_window.setMinimumSize(200, 100)
sub_window.resize(800, 600)
sub_window.setWindowTitle(unique_name)
sub_window.setAttribute(Qt.WA_DeleteOnClose)
sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon))
sub_window.name = unique_name
mdi_area.addSubWindow(sub_window)
sub_window.show()
# Main window
class MainWindow(QMainWindow):
def __init__(self, glb, parent=None):
super(MainWindow, self).__init__(parent)
self.glb = glb
self.setWindowTitle("Exported SQL Viewer: " + glb.dbname)
self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon))
self.setMinimumSize(200, 100)
self.mdi_area = QMdiArea()
self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdi_area)
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_menu.addAction(CreateExitAction(glb.app, self))
edit_menu = menu.addMenu("&Edit")
edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy))
edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self))
edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
if IsSelectable(glb.db, "context_switches"):
charts_menu = menu.addMenu("&Charts")
charts_menu.addAction(CreateAction("&Time chart by CPU", "Create a new window displaying time charts by CPU", self.TimeChartByCPU, self))
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
help_menu = menu.addMenu("&Help")
help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self))
def Try(self, fn):
win = self.mdi_area.activeSubWindow()
if win:
try:
fn(win.view)
except:
pass
def CopyToClipboard(self):
self.Try(CopyCellsToClipboardHdr)
def CopyToClipboardCSV(self):
self.Try(CopyCellsToClipboardCSV)
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.find_bar.Activate()
except:
pass
def FetchMoreRecords(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.fetch_bar.Activate()
except:
pass
def ShrinkFont(self):
self.Try(ShrinkFont)
def EnlargeFont(self):
self.Try(EnlargeFont)
def EventMenu(self, events, reports_menu):
branches_events = 0
for event in events:
event = event.split(":")[0]
if event == "branches":
branches_events += 1
dbid = 0
for event in events:
dbid += 1
event = event.split(":")[0]
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewBranchView(x), self))
label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self))
def TimeChartByCPU(self):
TimeChartByCPUWindow(self.glb, self)
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
for table in tables:
table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda a=None,t=table: self.NewTableView(t), self))
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
def NewCallTree(self):
CallTreeWindow(self.glb, self)
def NewTopCalls(self):
dialog = TopCallsDialog(self.glb, self)
ret = dialog.exec_()
if ret:
TopCallsWindow(self.glb, dialog.report_vars, self)
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
def Help(self):
HelpWindow(self.glb, self)
def About(self):
dialog = AboutDialog(self.glb, self)
dialog.exec_()
# XED Disassembler
class xed_state_t(Structure):
_fields_ = [
("mode", c_int),
("width", c_int)
]
class XEDInstruction():
def __init__(self, libxed):
# Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
xedd_t = c_byte * 512
self.xedd = xedd_t()
self.xedp = addressof(self.xedd)
libxed.xed_decoded_inst_zero(self.xedp)
self.state = xed_state_t()
self.statep = addressof(self.state)
# Buffer for disassembled instruction text
self.buffer = create_string_buffer(256)
self.bufferp = addressof(self.buffer)
class LibXED():
def __init__(self):
try:
self.libxed = CDLL("libxed.so")
except:
self.libxed = None
if not self.libxed:
self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
self.xed_tables_init.argtypes = []
self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
self.xed_decoded_inst_zero.restype = None
self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
self.xed_operand_values_set_mode.restype = None
self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
self.xed_decoded_inst_zero_keep_mode.restype = None
self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
self.xed_decode = self.libxed.xed_decode
self.xed_decode.restype = c_int
self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
self.xed_format_context = self.libxed.xed_format_context
self.xed_format_context.restype = c_uint
self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
self.xed_tables_init()
def Instruction(self):
return XEDInstruction(self)
def SetMode(self, inst, mode):
if mode:
inst.state.mode = 4 # 32-bit
inst.state.width = 4 # 4 bytes
else:
inst.state.mode = 1 # 64-bit
inst.state.width = 8 # 8 bytes
self.xed_operand_values_set_mode(inst.xedp, inst.statep)
def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
self.xed_decoded_inst_zero_keep_mode(inst.xedp)
err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
if err:
return 0, ""
# Use AT&T mode (2), alternative is Intel (3)
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
if sys.version_info[0] == 2:
result = inst.buffer.value
else:
result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
return inst.xedd[166], result
def TryOpen(file_name):
try:
return open(file_name, "rb")
except:
return None
def Is64Bit(f):
result = sizeof(c_void_p)
# ELF support only
pos = f.tell()
f.seek(0)
header = f.read(7)
f.seek(pos)
magic = header[0:4]
if sys.version_info[0] == 2:
eclass = ord(header[4])
encoding = ord(header[5])
version = ord(header[6])
else:
eclass = header[4]
encoding = header[5]
version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
# Global data
class Glb():
def __init__(self, dbref, db, dbname):
self.dbref = dbref
self.db = db
self.dbname = dbname
self.home_dir = os.path.expanduser("~")
self.buildid_dir = os.getenv("PERF_BUILDID_DIR")
if self.buildid_dir:
self.buildid_dir += "/.build-id/"
else:
self.buildid_dir = self.home_dir + "/.debug/.build-id/"
self.app = None
self.mainwindow = None
self.instances_to_shutdown_on_exit = weakref.WeakSet()
try:
self.disassembler = LibXED()
self.have_disassembler = True
except:
self.have_disassembler = False
self.host_machine_id = 0
self.host_start_time = 0
self.host_finish_time = 0
def FileFromBuildId(self, build_id):
file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
return TryOpen(file_name)
def FileFromNamesAndBuildId(self, short_name, long_name, build_id):
# Assume current machine i.e. no support for virtualization
if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore":
file_name = os.getenv("PERF_KCORE")
f = TryOpen(file_name) if file_name else None
if f:
return f
# For now, no special handling if long_name is /proc/kcore
f = TryOpen(long_name)
if f:
return f
f = self.FileFromBuildId(build_id)
if f:
return f
return None
def AddInstanceToShutdownOnExit(self, instance):
self.instances_to_shutdown_on_exit.add(instance)
# Shutdown any background processes or threads
def ShutdownInstances(self):
for x in self.instances_to_shutdown_on_exit:
try:
x.Shutdown()
except:
pass
def GetHostMachineId(self):
query = QSqlQuery(self.db)
QueryExec(query, "SELECT id FROM machines WHERE pid = -1")
if query.next():
self.host_machine_id = query.value(0)
else:
self.host_machine_id = 0
return self.host_machine_id
def HostMachineId(self):
if self.host_machine_id:
return self.host_machine_id
return self.GetHostMachineId()
def SelectValue(self, sql):
query = QSqlQuery(self.db)
try:
QueryExec(query, sql)
except:
return None
if query.next():
return Decimal(query.value(0))
return None
def SwitchesMinTime(self, machine_id):
return self.SelectValue("SELECT time"
" FROM context_switches"
" WHERE time != 0 AND machine_id = " + str(machine_id) +
" ORDER BY id LIMIT 1")
def SwitchesMaxTime(self, machine_id):
return self.SelectValue("SELECT time"
" FROM context_switches"
" WHERE time != 0 AND machine_id = " + str(machine_id) +
" ORDER BY id DESC LIMIT 1")
def SamplesMinTime(self, machine_id):
return self.SelectValue("SELECT time"
" FROM samples"
" WHERE time != 0 AND machine_id = " + str(machine_id) +
" ORDER BY id LIMIT 1")
def SamplesMaxTime(self, machine_id):
return self.SelectValue("SELECT time"
" FROM samples"
" WHERE time != 0 AND machine_id = " + str(machine_id) +
" ORDER BY id DESC LIMIT 1")
def CallsMinTime(self, machine_id):
return self.SelectValue("SELECT calls.call_time"
" FROM calls"
" INNER JOIN threads ON threads.thread_id = calls.thread_id"
" WHERE calls.call_time != 0 AND threads.machine_id = " + str(machine_id) +
" ORDER BY calls.id LIMIT 1")
def CallsMaxTime(self, machine_id):
return self.SelectValue("SELECT calls.return_time"
" FROM calls"
" INNER JOIN threads ON threads.thread_id = calls.thread_id"
" WHERE calls.return_time != 0 AND threads.machine_id = " + str(machine_id) +
" ORDER BY calls.return_time DESC LIMIT 1")
def GetStartTime(self, machine_id):
t0 = self.SwitchesMinTime(machine_id)
t1 = self.SamplesMinTime(machine_id)
t2 = self.CallsMinTime(machine_id)
if t0 is None or (not(t1 is None) and t1 < t0):
t0 = t1
if t0 is None or (not(t2 is None) and t2 < t0):
t0 = t2
return t0
def GetFinishTime(self, machine_id):
t0 = self.SwitchesMaxTime(machine_id)
t1 = self.SamplesMaxTime(machine_id)
t2 = self.CallsMaxTime(machine_id)
if t0 is None or (not(t1 is None) and t1 > t0):
t0 = t1
if t0 is None or (not(t2 is None) and t2 > t0):
t0 = t2
return t0
def HostStartTime(self):
if self.host_start_time:
return self.host_start_time
self.host_start_time = self.GetStartTime(self.HostMachineId())
return self.host_start_time
def HostFinishTime(self):
if self.host_finish_time:
return self.host_finish_time
self.host_finish_time = self.GetFinishTime(self.HostMachineId())
return self.host_finish_time
def StartTime(self, machine_id):
if machine_id == self.HostMachineId():
return self.HostStartTime()
return self.GetStartTime(machine_id)
def FinishTime(self, machine_id):
if machine_id == self.HostMachineId():
return self.HostFinishTime()
return self.GetFinishTime(machine_id)
# Database reference
class DBRef():
def __init__(self, is_sqlite3, dbname):
self.is_sqlite3 = is_sqlite3
self.dbname = dbname
self.TRUE = "TRUE"
self.FALSE = "FALSE"
# SQLite prior to version 3.23 does not support TRUE and FALSE
if self.is_sqlite3:
self.TRUE = "1"
self.FALSE = "0"
def Open(self, connection_name):
dbname = self.dbname
if self.is_sqlite3:
db = QSqlDatabase.addDatabase("QSQLITE", connection_name)
else:
db = QSqlDatabase.addDatabase("QPSQL", connection_name)
opts = dbname.split()
for opt in opts:
if "=" in opt:
opt = opt.split("=")
if opt[0] == "hostname":
db.setHostName(opt[1])
elif opt[0] == "port":
db.setPort(int(opt[1]))
elif opt[0] == "username":
db.setUserName(opt[1])
elif opt[0] == "password":
db.setPassword(opt[1])
elif opt[0] == "dbname":
dbname = opt[1]
else:
dbname = opt
db.setDatabaseName(dbname)
if not db.open():
raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
return db, dbname
# Main
def Main():
usage_str = "exported-sql-viewer.py [--pyside-version-1] <database name>\n" \
" or: exported-sql-viewer.py --help-only"
ap = argparse.ArgumentParser(usage = usage_str, add_help = False)
ap.add_argument("--pyside-version-1", action='store_true')
ap.add_argument("dbname", nargs="?")
ap.add_argument("--help-only", action='store_true')
args = ap.parse_args()
if args.help_only:
app = QApplication(sys.argv)
mainwindow = HelpOnlyWindow()
mainwindow.show()
err = app.exec_()
sys.exit(err)
dbname = args.dbname
if dbname is None:
ap.print_usage()
print("Too few arguments")
sys.exit(1)
is_sqlite3 = False
try:
f = open(dbname, "rb")
if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
pass
dbref = DBRef(is_sqlite3, dbname)
db, dbname = dbref.Open("main")
glb = Glb(dbref, db, dbname)
app = QApplication(sys.argv)
glb.app = app
mainwindow = MainWindow(glb)
glb.mainwindow = mainwindow
mainwindow.show()
err = app.exec_()
glb.ShutdownInstances()
db.close()
sys.exit(err)
if __name__ == "__main__":
Main()
| gpl-2.0 |
leorochael/odoo | addons/account_check_writing/__openerp__.py | 313 | 1808 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Check Writing',
'version': '1.1',
'author': 'OpenERP SA, NovaPoint Group',
'category': 'Generic Modules/Accounting',
'description': """
Module for the Check Writing and Check Printing.
================================================
""",
'website': 'https://www.odoo.com/page/accounting',
'depends' : ['account_voucher'],
'data': [
'wizard/account_check_batch_printing_view.xml',
'account_view.xml',
'account_voucher_view.xml',
'account_check_writing_data.xml',
'data/report_paperformat.xml',
'views/report_check.xml',
'account_check_writing_report.xml',
],
'demo': ['account_demo.xml'],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
markkerzner/nn_kove | hadoop/src/contrib/hod/testing/testHodCleanup.py | 182 | 4063 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
from testing.lib import BaseTestSuite
from hodlib.HodRing.hodRing import MRSystemDirectoryManager, createMRSystemDirectoryManager
from hodlib.Common.threads import simpleCommand
excludes = []
# duplicating temporarily until HADOOP-2848 is committed.
class MyMockLogger:
def __init__(self):
self.__logLines = {}
def info(self, message):
self.__logLines[message] = 'info'
def critical(self, message):
self.__logLines[message] = 'critical'
def warn(self, message):
self.__logLines[message] = 'warn'
def debug(self, message):
# don't track debug lines.
pass
# verify a certain message has been logged at the defined level of severity.
def hasMessage(self, message, level):
if not self.__logLines.has_key(message):
return False
return self.__logLines[message] == level
class test_MRSystemDirectoryManager(unittest.TestCase):
def setUp(self):
self.log = MyMockLogger()
def testCleanupArgsString(self):
sysDirMgr = MRSystemDirectoryManager(1234, '/user/hod/mapredsystem/hoduser.123.abc.com', \
'def.com:5678', '/usr/bin/hadoop', self.log)
str = sysDirMgr.toCleanupArgs()
self.assertTrue(" --jt-pid 1234 --mr-sys-dir /user/hod/mapredsystem/hoduser.123.abc.com --fs-name def.com:5678 --hadoop-path /usr/bin/hadoop ", str)
def testCreateMRSysDirInvalidParams(self):
# test that no mr system directory manager is created if required keys are not present
# this case will test scenarios of non jobtracker daemons.
keys = [ 'jt-pid', 'mr-sys-dir', 'fs-name', 'hadoop-path' ]
map = { 'jt-pid' : 1234,
'mr-sys-dir' : '/user/hod/mapredsystem/hoduser.def.com',
'fs-name' : 'ghi.com:1234',
'hadoop-path' : '/usr/bin/hadoop'
}
for key in keys:
val = map[key]
map[key] = None
self.assertEquals(createMRSystemDirectoryManager(map, self.log), None)
map[key] = val
def testUnresponsiveJobTracker(self):
# simulate an unresponsive job tracker, by giving a command that runs longer than the retries
# verify that the program returns with the right error message.
sc = simpleCommand("sleep", "sleep 300")
sc.start()
pid = sc.getPid()
while pid is None:
pid = sc.getPid()
sysDirMgr = MRSystemDirectoryManager(pid, '/user/yhemanth/mapredsystem/hoduser.123.abc.com', \
'def.com:5678', '/usr/bin/hadoop', self.log, retries=3)
sysDirMgr.removeMRSystemDirectory()
self.log.hasMessage("Job Tracker did not exit even after a minute. Not going to try and cleanup the system directory", 'warn')
sc.kill()
sc.wait()
sc.join()
class HodCleanupTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunHodCleanupTests():
# modulename_suite
suite = HodCleanupTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunHodCleanupTests()
| apache-2.0 |
diegocortassa/TACTIC | src/context/client/tactic-api-python-4.0.api04/Lib/opcode.py | 8 | 5434 |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG"]
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('STOP_CODE', 0)
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('ROT_FOUR', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_CONVERT', 13)
def_op('UNARY_INVERT', 15)
def_op('LIST_APPEND', 18)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_DIVIDE', 21)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('SLICE+0', 30)
def_op('SLICE+1', 31)
def_op('SLICE+2', 32)
def_op('SLICE+3', 33)
def_op('STORE_SLICE+0', 40)
def_op('STORE_SLICE+1', 41)
def_op('STORE_SLICE+2', 42)
def_op('STORE_SLICE+3', 43)
def_op('DELETE_SLICE+0', 50)
def_op('DELETE_SLICE+1', 51)
def_op('DELETE_SLICE+2', 52)
def_op('DELETE_SLICE+3', 53)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_DIVIDE', 58)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('PRINT_ITEM', 71)
def_op('PRINT_NEWLINE', 72)
def_op('PRINT_ITEM_TO', 73)
def_op('PRINT_NEWLINE_TO', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('LOAD_LOCALS', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('EXEC_STMT', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('BUILD_CLASS', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('DUP_TOPX', 99) # number of items to duplicate
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_MAP', 104) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 105) # Index in name list
def_op('COMPARE_OP', 106) # Comparison operator
hascompare.append(106)
name_op('IMPORT_NAME', 107) # Index in name list
name_op('IMPORT_FROM', 108) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jrel_op('JUMP_IF_FALSE', 111) # ""
jrel_op('JUMP_IF_TRUE', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # Target byte offset from beginning of code
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
def_op('EXTENDED_ARG', 143)
EXTENDED_ARG = 143
del def_op, name_op, jrel_op, jabs_op
| epl-1.0 |
Stavitsky/nova | nova/db/sqlalchemy/utils.py | 19 | 5548 | # Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as oslodbutils
from oslo_log import log as logging
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import Table
from sqlalchemy.types import NullType
from nova.db.sqlalchemy import api as db
from nova import exception
from nova.i18n import _, _LE
LOG = logging.getLogger(__name__)
class DeleteFromSelect(UpdateBase):
def __init__(self, table, select, column):
self.table = table
self.select = select
self.column = column
# NOTE(guochbo): some verions of MySQL doesn't yet support subquery with
# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
@compiles(DeleteFromSelect)
def visit_delete_from_select(element, compiler, **kw):
return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.column),
element.column.name,
compiler.process(element.select))
def check_shadow_table(migrate_engine, table_name):
"""This method checks that table with ``table_name`` and
corresponding shadow table have same columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
autoload=True)
columns = {c.name: c for c in table.columns}
shadow_columns = {c.name: c for c in shadow_table.columns}
for name, column in columns.iteritems():
if name not in shadow_columns:
raise exception.NovaException(
_("Missing column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
shadow_column = shadow_columns[name]
if not isinstance(shadow_column.type, type(column.type)):
raise exception.NovaException(
_("Different types in %(table)s.%(column)s and shadow table: "
"%(c_type)s %(shadow_c_type)s")
% {'column': name, 'table': table.name,
'c_type': column.type,
'shadow_c_type': shadow_column.type})
for name, column in shadow_columns.iteritems():
if name not in columns:
raise exception.NovaException(
_("Extra column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
return True
def create_shadow_table(migrate_engine, table_name=None, table=None,
**col_name_col_instance):
"""This method create shadow table for table with name ``table_name``
or table instance ``table``.
:param table_name: Autoload table with this name and create shadow table
:param table: Autoloaded table, so just create corresponding shadow table.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params are required only for
columns that have unsupported types by sqlite. For example BigInteger.
:returns: The created shadow_table object.
"""
meta = MetaData(bind=migrate_engine)
if table_name is None and table is None:
raise exception.NovaException(_("Specify `table_name` or `table` "
"param"))
if not (table_name is None or table is None):
raise exception.NovaException(_("Specify only one param `table_name` "
"`table`"))
if table is None:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = oslodbutils._get_not_supported_column(
col_name_col_instance, column.name)
columns.append(new_column)
else:
columns.append(column.copy())
shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
return shadow_table
except (db_exc.DBError, OperationalError):
# NOTE(ekudryashova): At the moment there is a case in oslo.db code,
# which raises unwrapped OperationalError, so we should catch it until
# oslo.db would wraps all such exceptions
LOG.info(repr(shadow_table))
LOG.exception(_LE('Exception while creating table.'))
raise exception.ShadowTableExists(name=shadow_table_name)
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_LE('Exception while creating table.'))
| apache-2.0 |
eceglov/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/MSVSProject.py | 137 | 7491 | #!/usr/bin/python2.4
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import common
import xml.dom
import xml.dom.minidom
import MSVSNew
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self.name = name
self.attrs = attrs or {}
def CreateElement(self, doc):
"""Creates an element for the tool.
Args:
doc: xml.dom.Document object to use for node creation.
Returns:
A new xml.dom.Element for the tool.
"""
node = doc.createElement('Tool')
node.setAttribute('Name', self.name)
for k, v in self.attrs.items():
node.setAttribute(k, v)
return node
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
"""
self.project_path = project_path
self.doc = None
self.version = version
def Create(self, name, guid=None, platforms=None):
"""Creates the project document.
Args:
name: Name of the project.
guid: GUID to use for project, if not None.
"""
self.name = name
self.guid = guid or MSVSNew.MakeGuid(self.project_path)
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Create XML doc
xml_impl = xml.dom.getDOMImplementation()
self.doc = xml_impl.createDocument(None, 'VisualStudioProject', None)
# Add attributes to root element
self.n_root = self.doc.documentElement
self.n_root.setAttribute('ProjectType', 'Visual C++')
self.n_root.setAttribute('Version', self.version.ProjectVersion())
self.n_root.setAttribute('Name', self.name)
self.n_root.setAttribute('ProjectGUID', self.guid)
self.n_root.setAttribute('RootNamespace', self.name)
self.n_root.setAttribute('Keyword', 'Win32Proj')
# Add platform list
n_platform = self.doc.createElement('Platforms')
self.n_root.appendChild(n_platform)
for platform in platforms:
n = self.doc.createElement('Platform')
n.setAttribute('Name', platform)
n_platform.appendChild(n)
# Add tool files section
self.n_tool_files = self.doc.createElement('ToolFiles')
self.n_root.appendChild(self.n_tool_files)
# Add configurations section
self.n_configs = self.doc.createElement('Configurations')
self.n_root.appendChild(self.n_configs)
# Add empty References section
self.n_root.appendChild(self.doc.createElement('References'))
# Add files section
self.n_files = self.doc.createElement('Files')
self.n_root.appendChild(self.n_files)
# Keep a dict keyed on filename to speed up access.
self.n_files_dict = dict()
# Add empty Globals section
self.n_root.appendChild(self.doc.createElement('Globals'))
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
n_tool = self.doc.createElement('ToolFile')
n_tool.setAttribute('RelativePath', path)
self.n_tool_files.appendChild(n_tool)
def _AddConfigToNode(self, parent, config_type, config_name, attrs=None,
tools=None):
"""Adds a configuration to the parent node.
Args:
parent: Destination node.
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
n_config = self.doc.createElement(config_type)
n_config.setAttribute('Name', config_name)
for k, v in attrs.items():
n_config.setAttribute(k, v)
parent.appendChild(n_config)
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
n_config.appendChild(t.CreateElement(self.doc))
else:
n_config.appendChild(Tool(t).CreateElement(self.doc))
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
self._AddConfigToNode(self.n_configs, 'Configuration', name, attrs, tools)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = self.doc.createElement('Filter')
node.setAttribute('Name', f.name)
self._AddFilesToNode(node, f.contents)
else:
node = self.doc.createElement('File')
node.setAttribute('RelativePath', f)
self.n_files_dict[f] = node
parent.appendChild(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.n_files, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.n_files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
self._AddConfigToNode(parent, 'FileConfiguration', config, attrs, tools)
def Write(self, writer=common.WriteOnDiff):
"""Writes the project file."""
f = writer(self.project_path)
self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n')
f.close()
#------------------------------------------------------------------------------
| bsd-3-clause |
uw-it-cte/uw-restclients | restclients/test/canvas/roles.py | 4 | 1692 | from django.test import TestCase
from django.conf import settings
from restclients.canvas.roles import Roles
from restclients.exceptions import DataFailureException
class CanvasTestRoles(TestCase):
def test_roles(self):
with self.settings(
RESTCLIENTS_CANVAS_DAO_CLASS='restclients.dao_implementation.canvas.File'):
canvas = Roles()
roles = canvas.get_roles_in_account(12345)
self.assertEquals(len(roles), 15, "Failed to follow Link header")
role = roles[10]
self.assertEquals(role.base_role_type, "AccountMembership")
self.assertEquals(role.label, "Course Access")
self.assertEquals(role.permissions.get('read_course_list').get('enabled'), True)
def test_course_roles(self):
with self.settings(
RESTCLIENTS_CANVAS_DAO_CLASS='restclients.dao_implementation.canvas.File'):
canvas = Roles()
roles = canvas.get_effective_course_roles_in_account(12345)
self.assertEquals(len(roles), 5, "Course roles only")
role = roles[0]
self.assertEquals(role.base_role_type, "TeacherEnrollment")
self.assertEquals(role.label, "Teacher")
def test_role(self):
with self.settings(
RESTCLIENTS_CANVAS_DAO_CLASS='restclients.dao_implementation.canvas.File'):
canvas = Roles()
role = canvas.get_role(12345, 999)
self.assertEquals(role.role_id, 999)
self.assertEquals(role.label, "Course Access")
self.assertEquals(role.permissions.get('read_course_list').get('enabled'), True)
| apache-2.0 |
jbarriosc/ACSUFRO | LGPL/CommonSoftware/acspy/test/acspyTestAutoload.py | 4 | 1315 | #!/usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# @(#) $Id: acspyTestAutoload.py,v 1.1 2004/09/02 22:50:33 dfugate Exp $
###############################################################################
'''
Tests autoloading CDB capability of container
'''
joe = 7*9
print "In acspyTestAutoLoad.py", joe
| lgpl-2.1 |
rmehta/erpnext | erpnext/patches/v5_0/replace_renamed_fields_in_custom_scripts_and_print_formats.py | 113 | 2651 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import re
def execute():
# NOTE: sequence is important
renamed_fields = get_all_renamed_fields()
for dt, script_field, ref_dt_field in (("Custom Script", "script", "dt"), ("Print Format", "html", "doc_type")):
cond1 = " or ".join("""{0} like "%%{1}%%" """.format(script_field, d[0].replace("_", "\\_")) for d in renamed_fields)
cond2 = " and standard = 'No'" if dt == "Print Format" else ""
for name, script, ref_dt in frappe.db.sql("select name, {0} as script, {1} as ref_dt from `tab{2}` where ({3}) {4}".format(script_field, ref_dt_field, dt, cond1, cond2)):
update_script(dt, name, ref_dt, script_field, script, renamed_fields)
def get_all_renamed_fields():
from erpnext.patches.v5_0.rename_table_fieldnames import rename_map
renamed_fields = (
("base_amount", "base_net_amount"),
("net_total", "base_net_total"),
("net_total_export", "total"),
("net_total_import", "total"),
("other_charges_total", "base_total_taxes_and_charges"),
("other_charges_total_export", "total_taxes_and_charges"),
("other_charges_added", "base_taxes_and_charges_added"),
("other_charges_added_import", "taxes_and_charges_added"),
("other_charges_deducted", "base_taxes_and_charges_deducted"),
("other_charges_deducted_import", "taxes_and_charges_deducted"),
("total_tax", "base_total_taxes_and_charges"),
("grand_total", "base_grand_total"),
("grand_total_export", "grand_total"),
("grand_total_import", "grand_total"),
("rounded_total", "base_rounded_total"),
("rounded_total_export", "rounded_total"),
("rounded_total_import", "rounded_total"),
("in_words", "base_in_words"),
("in_words_export", "in_words"),
("in_words_import", "in_words"),
("tax_amount", "base_tax_amount"),
("tax_amount_after_discount_amount", "base_tax_amount_after_discount_amount"),
)
for fields in rename_map.values():
renamed_fields += tuple(fields)
return renamed_fields
def update_script(dt, name, ref_dt, script_field, script, renamed_fields):
for from_field, to_field in renamed_fields:
if from_field != "entries":
script = re.sub(r"\b{}\b".format(from_field), to_field, script)
if ref_dt == "Journal Entry":
script = re.sub(r"\bentries\b", "accounts", script)
elif ref_dt == "Bank Reconciliation":
script = re.sub(r"\bentries\b", "journal_entries", script)
elif ref_dt in ("Sales Invoice", "Purchase Invoice"):
script = re.sub(r"\bentries\b", "items", script)
frappe.db.set_value(dt, name, script_field, script) | gpl-3.0 |
akatsoulas/snippets-service | snippets/saml/settings.py | 2 | 1586 | import os
import saml2
from decouple import config
DEBUG = config('DEBUG', cast=bool)
SAML_DIR = os.path.dirname(__file__)
LOGIN_URL = '/saml2/login/'
LOGIN_REDIRECT_URL = '/admin/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
AUTHENTICATION_BACKENDS = (
'djangosaml2.backends.Saml2Backend',
)
SAML_SSO_URL = config('SAML_SSO_URL')
SAML_ENTITY_ID = config('SAML_ENTITY_ID')
SAML_SP_NAME = config('SAML_SP_NAME', 'SP')
SAML_REMOTE_METADATA = os.path.join(
SAML_DIR, config('SAML_REMOTE_METADATA', default='remote_metadata.xml'))
SAML_CREATE_UNKNOWN_USER = config('SAML_CREATE_USER', default=False, cast=bool)
SAML_ATTRIBUTE_MAPPING = {
'uid': ('username', ),
'email': ('email', ),
'firstName': ('first_name', ),
'lastName': ('last_name', ),
}
SAML_CONFIG = {
'debug': DEBUG,
'xmlsec_binary': '/usr/bin/xmlsec1',
'attribute_map_dir': os.path.join(SAML_DIR, 'attribute-maps'),
'entityid': SAML_ENTITY_ID,
'valid_for': 24, # how long is our metadata valid (hours)
'service': {
'sp': {
# Allow Okta to initiate the login.
'allow_unsolicited': 'true',
'name': SAML_SP_NAME,
'endpoints': {
'assertion_consumer_service': [
(SAML_SSO_URL, saml2.BINDING_HTTP_POST),
],
},
'required_attributes': ['uid'],
'idp': {
# Configured by remote_metadata
},
}
},
# where the remote metadata is stored
'metadata': {
'local': [SAML_REMOTE_METADATA],
},
}
| mpl-2.0 |
ahmadassaf/zulip | zerver/tests/test_bugdown.py | 1 | 29960 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from django.conf import settings
from django.test import TestCase
from zerver.lib import bugdown
from zerver.lib.actions import (
check_add_realm_emoji,
do_remove_realm_emoji,
do_set_alert_words,
get_realm,
)
from zerver.lib.camo import get_camo_url
from zerver.lib.request import (
JsonableError,
)
from zerver.lib.test_helpers import (
ZulipTestCase,
)
from zerver.models import (
get_client,
get_user_profile_by_email,
Message,
RealmFilter,
Recipient,
)
import mock
import os
import ujson
import six
class FencedBlockPreprocessorTest(TestCase):
def test_simple_quoting(self):
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
markdown = [
'~~~ quote',
'hi',
'bye',
'',
''
]
expected = [
'',
'> hi',
'> bye',
'',
'',
''
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_serial_quoting(self):
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
markdown = [
'~~~ quote',
'hi',
'~~~',
'',
'~~~ quote',
'bye',
'',
''
]
expected = [
'',
'> hi',
'',
'',
'',
'> bye',
'',
'',
''
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_serial_code(self):
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code
processor.placeholder = lambda s: '**' + s.strip('\n') + '**'
markdown = [
'``` .py',
'hello()',
'```',
'',
'``` .py',
'goodbye()',
'```',
'',
''
]
expected = [
'',
'**py:hello()**',
'',
'',
'',
'**py:goodbye()**',
'',
'',
''
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_nested_code(self):
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code
processor.placeholder = lambda s: '**' + s.strip('\n') + '**'
markdown = [
'~~~ quote',
'hi',
'``` .py',
'hello()',
'```',
'',
''
]
expected = [
'',
'> hi',
'',
'> **py:hello()**',
'',
'',
''
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def bugdown_convert(text):
return bugdown.convert(text, "zulip.com")
class BugdownTest(TestCase):
def common_bugdown_test(self, text, expected):
converted = bugdown_convert(text)
self.assertEqual(converted, expected)
def load_bugdown_tests(self):
test_fixtures = {}
data_file = open(os.path.join(os.path.dirname(__file__), '../fixtures/bugdown-data.json'), 'r')
data = ujson.loads('\n'.join(data_file.readlines()))
for test in data['regular_tests']:
test_fixtures[test['name']] = test
return test_fixtures, data['linkify_tests']
def test_bugdown_fixtures(self):
format_tests, linkify_tests = self.load_bugdown_tests()
self.maxDiff = None
for name, test in six.iteritems(format_tests):
converted = bugdown_convert(test['input'])
print("Running Bugdown test %s" % (name,))
self.assertEqual(converted, test['expected_output'])
def replaced(payload, url, phrase=''):
target = " target=\"_blank\""
if url[:4] == 'http':
href = url
elif '@' in url:
href = 'mailto:' + url
target = ""
else:
href = 'http://' + url
return payload % ("<a href=\"%s\"%s title=\"%s\">%s</a>" % (href, target, href, url),)
print("Running Bugdown Linkify tests")
self.maxDiff = None
for inline_url, reference, url in linkify_tests:
try:
match = replaced(reference, url, phrase=inline_url)
except TypeError:
match = reference
converted = bugdown_convert(inline_url)
self.assertEqual(match, converted)
def test_inline_youtube(self):
msg = 'Check out the debate: http://www.youtube.com/watch?v=hx1mjT73xYE'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Check out the debate: <a href="http://www.youtube.com/watch?v=hx1mjT73xYE" target="_blank" title="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="message_inline_image"><a href="http://www.youtube.com/watch?v=hx1mjT73xYE" target="_blank" title="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg"></a></div>')
def test_inline_dropbox(self):
msg = 'Look at how hilarious our old office was: https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG'
image_info = {'image': 'https://photos-4.dropbox.com/t/2/AABIre1oReJgPYuc_53iv0IHq1vUzRaDg2rrCfTpiWMccQ/12/129/jpeg/1024x1024/2/_/0/4/IMG_0923.JPG/CIEBIAEgAiAHKAIoBw/ymdijjcg67hv2ta/AABz2uuED1ox3vpWWvMpBxu6a/IMG_0923.JPG', 'desc': 'Shared with Dropbox', 'title': 'IMG_0923.JPG'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at how hilarious our old office was: <a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" target="_blank" title="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG">https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" target="_blank" title="IMG_0923.JPG"><img src="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG?dl=1"></a></div>')
msg = 'Look at my hilarious drawing folder: https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl='
image_info = {'image': 'https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png', 'desc': 'Shared with Dropbox', 'title': 'Saves'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at my hilarious drawing folder: <a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" target="_blank" title="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=">https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=</a></p>\n<div class="message_inline_ref"><a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" target="_blank" title="Saves"><img src="https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png"></a><div><div class="message_inline_image_title">Saves</div><desc class="message_inline_image_desc"></desc></div></div>')
def test_inline_dropbox_preview(self):
# Test photo album previews
msg = 'https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5'
image_info = {'image': 'https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0', 'desc': 'Shared with Dropbox', 'title': '1 photo'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" target="_blank" title="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5">https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" target="_blank" title="1 photo"><img src="https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0"></a></div>')
def test_inline_dropbox_negative(self):
# Make sure we're not overzealous in our conversion:
msg = 'Look at the new dropbox logo: https://www.dropbox.com/static/images/home_logo.png'
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=None):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at the new dropbox logo: <a href="https://www.dropbox.com/static/images/home_logo.png" target="_blank" title="https://www.dropbox.com/static/images/home_logo.png">https://www.dropbox.com/static/images/home_logo.png</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/static/images/home_logo.png" target="_blank" title="https://www.dropbox.com/static/images/home_logo.png"><img src="https://www.dropbox.com/static/images/home_logo.png"></a></div>')
def test_inline_dropbox_bad(self):
# Don't fail on bad dropbox links
msg = "https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM"
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=None):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM" target="_blank" title="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM">https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM</a></p>')
def test_twitter_id_extraction(self):
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/#!/VizzQuotes/status/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/VizzQuotes/status/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/VizzQuotes/statuses/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/wdaher/status/1017581858'), '1017581858')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/wdaher/status/1017581858/'), '1017581858')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/windyoona/status/410766290349879296/photo/1'), '410766290349879296')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/windyoona/status/410766290349879296/'), '410766290349879296')
def test_inline_interesting_links(self):
def make_link(url):
return '<a href="%s" target="_blank" title="%s">%s</a>' % (url, url, url)
normal_tweet_html = ('<a href="https://twitter.com/twitter" target="_blank"'
' title="https://twitter.com/twitter">@twitter</a> '
'meets @seepicturely at #tcdisrupt cc.'
'<a href="https://twitter.com/boscomonkey" target="_blank"'
' title="https://twitter.com/boscomonkey">@boscomonkey</a> '
'<a href="https://twitter.com/episod" target="_blank"'
' title="https://twitter.com/episod">@episod</a> '
'<a href="http://t.co/6J2EgYM" target="_blank"'
' title="http://t.co/6J2EgYM">http://instagram.com/p/MuW67/</a>')
mention_in_link_tweet_html = """<a href="http://t.co/@foo" target="_blank" title="http://t.co/@foo">http://foo.com</a>"""
media_tweet_html = ('<a href="http://t.co/xo7pAhK6n3" target="_blank" title="http://t.co/xo7pAhK6n3">'
'http://twitter.com/NEVNBoston/status/421654515616849920/photo/1</a>')
def make_inline_twitter_preview(url, tweet_html, image_html=''):
## As of right now, all previews are mocked to be the exact same tweet
return ('<div class="inline-preview-twitter">'
'<div class="twitter-tweet">'
'<a href="%s" target="_blank">'
'<img class="twitter-avatar"'
' src="https://si0.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png">'
'</a>'
'<p>%s</p>'
'<span>- Eoin McMillan (@imeoin)</span>'
'%s'
'</div>'
'</div>') % (url, tweet_html, image_html)
msg = 'http://www.twitter.com'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com'))
msg = 'http://www.twitter.com/wdaher/'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com/wdaher/'))
msg = 'http://www.twitter.com/wdaher/status/3'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com/wdaher/status/3'))
# id too long
msg = 'http://www.twitter.com/wdaher/status/2879779692873154569'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com/wdaher/status/2879779692873154569'))
# id too large (i.e. tweet doesn't exist)
msg = 'http://www.twitter.com/wdaher/status/999999999999999999'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com/wdaher/status/999999999999999999'))
msg = 'http://www.twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('http://www.twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('http://www.twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
msg = 'https://www.twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('https://www.twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('https://www.twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
msg = 'http://twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
# A max of 3 will be converted
msg = ('http://twitter.com/wdaher/status/287977969287315456 '
'http://twitter.com/wdaher/status/287977969287315457 '
'http://twitter.com/wdaher/status/287977969287315457 '
'http://twitter.com/wdaher/status/287977969287315457')
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s %s %s %s</p>\n%s%s%s' % (
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315457', normal_tweet_html),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315457', normal_tweet_html)))
# Tweet has a mention in a URL, only the URL is linked
msg = 'http://twitter.com/wdaher/status/287977969287315458'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('http://twitter.com/wdaher/status/287977969287315458'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315458', mention_in_link_tweet_html)))
# Tweet with an image
msg = 'http://twitter.com/wdaher/status/287977969287315459'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('http://twitter.com/wdaher/status/287977969287315459'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315459',
media_tweet_html,
('<div class="twitter-image">'
'<a href="http://t.co/xo7pAhK6n3" target="_blank" title="http://t.co/xo7pAhK6n3">'
'<img src="https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small">'
'</a>'
'</div>'))))
def test_fetch_tweet_data_settings_validation(self):
with self.settings(TEST_SUITE=False, TWITTER_CONSUMER_KEY=None):
self.assertIs(None, bugdown.fetch_tweet_data('287977969287315459'))
def test_realm_emoji(self):
def emoji_img(name, url):
return '<img alt="%s" class="emoji" src="%s" title="%s">' % (name, get_camo_url(url), name)
zulip_realm = get_realm('zulip.com')
url = "https://zulip.com/test_realm_emoji.png"
check_add_realm_emoji(zulip_realm, "test", url)
# Needs to mock an actual message because that's how bugdown obtains the realm
msg = Message(sender=get_user_profile_by_email("hamlet@zulip.com"))
converted = bugdown.convert(":test:", "zulip.com", msg)
self.assertEqual(converted, '<p>%s</p>' %(emoji_img(':test:', url)))
do_remove_realm_emoji(zulip_realm, 'test')
converted = bugdown.convert(":test:", "zulip.com", msg)
self.assertEqual(converted, '<p>:test:</p>')
def test_unicode_emoji(self):
msg = u'\u2615' # ☕
converted = bugdown_convert(msg)
self.assertEqual(converted, u'<p><img alt="\u2615" class="emoji" src="/static/third/gemoji/images/emoji/unicode/2615.png" title="\u2615"></p>')
msg = u'\u2615\u2615' # ☕☕
converted = bugdown_convert(msg)
self.assertEqual(converted, u'<p><img alt="\u2615" class="emoji" src="/static/third/gemoji/images/emoji/unicode/2615.png" title="\u2615"><img alt="\u2615" class="emoji" src="/static/third/gemoji/images/emoji/unicode/2615.png" title="\u2615"></p>')
def test_realm_patterns(self):
realm = get_realm('zulip.com')
url_format_string = r"https://trac.zulip.net/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
str(realm_filter),
'<RealmFilter(zulip.com): #(?P<id>[0-9]{2,8})'
' https://trac.zulip.net/ticket/%(id)s>')
msg = Message(sender=get_user_profile_by_email("othello@zulip.com"),
subject="#444")
content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.zulip.net/ticket/16) today."
converted = bugdown.convert(content, realm_domain='zulip.com', message=msg)
converted_subject = bugdown.subject_links(realm.domain.lower(), msg.subject)
self.assertEqual(converted, '<p>We should fix <a href="https://trac.zulip.net/ticket/224" target="_blank" title="https://trac.zulip.net/ticket/224">#224</a> and <a href="https://trac.zulip.net/ticket/115" target="_blank" title="https://trac.zulip.net/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.zulip.net/ticket/16" target="_blank" title="https://trac.zulip.net/ticket/16">trac #15</a> today.</p>')
self.assertEqual(converted_subject, [u'https://trac.zulip.net/ticket/444'])
def test_realm_patterns_negative(self):
realm = get_realm('zulip.com')
RealmFilter(realm=realm, pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=r"https://trac.zulip.net/ticket/%(id)s").save()
boring_msg = Message(sender=get_user_profile_by_email("othello@zulip.com"),
subject=u"no match here")
converted_boring_subject = bugdown.subject_links(realm.domain.lower(), boring_msg.subject)
self.assertEqual(converted_boring_subject, [])
def test_alert_words(self):
user_profile = get_user_profile_by_email("othello@zulip.com")
do_set_alert_words(user_profile, ["ALERTWORD", "scaryword"])
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have an ALERTWORD day today!"
self.assertEqual(msg.render_markdown(content), "<p>We have an ALERTWORD day today!</p>")
self.assertEqual(msg.user_ids_with_alert_words, set([user_profile.id]))
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have a NOTHINGWORD day today!"
self.assertEqual(msg.render_markdown(content), "<p>We have a NOTHINGWORD day today!</p>")
self.assertEqual(msg.user_ids_with_alert_words, set())
def test_mention_wildcard(self):
user_profile = get_user_profile_by_email("othello@zulip.com")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@all test"
self.assertEqual(msg.render_markdown(content),
'<p><span class="user-mention" data-user-email="*">@all</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_everyone(self):
user_profile = get_user_profile_by_email("othello@zulip.com")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@everyone test"
self.assertEqual(msg.render_markdown(content),
'<p><span class="user-mention" data-user-email="*">@everyone</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_single(self):
sender_user_profile = get_user_profile_by_email("othello@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet**"
self.assertEqual(msg.render_markdown(content),
'<p><span class="user-mention" data-user-email="hamlet@zulip.com">@King Hamlet</span></p>')
self.assertEqual(msg.mentions_user_ids, set([user_profile.id]))
def test_mention_shortname(self):
sender_user_profile = get_user_profile_by_email("othello@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**hamlet**"
self.assertEqual(msg.render_markdown(content),
'<p><span class="user-mention" data-user-email="hamlet@zulip.com">@King Hamlet</span></p>')
self.assertEqual(msg.mentions_user_ids, set([user_profile.id]))
def test_mention_multiple(self):
sender_user_profile = get_user_profile_by_email("othello@zulip.com")
hamlet = get_user_profile_by_email("hamlet@zulip.com")
cordelia = get_user_profile_by_email("cordelia@zulip.com")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet** and @**cordelia**, check this out"
self.assertEqual(msg.render_markdown(content),
'<p>'
'<span class="user-mention" '
'data-user-email="hamlet@zulip.com">@King Hamlet</span> and '
'<span class="user-mention" '
'data-user-email="cordelia@zulip.com">@Cordelia Lear</span>, '
'check this out</p>')
self.assertEqual(msg.mentions_user_ids, set([hamlet.id, cordelia.id]))
def test_mention_invalid(self):
sender_user_profile = get_user_profile_by_email("othello@zulip.com")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @**Nonexistent User**"
self.assertEqual(msg.render_markdown(content),
'<p>Hey @<strong>Nonexistent User</strong></p>')
self.assertEqual(msg.mentions_user_ids, set())
def test_stream_subscribe_button_simple(self):
msg = '!_stream_subscribe_button(simple)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<span class="inline-subscribe" data-stream-name="simple">'
'<button class="inline-subscribe-button btn">Subscribe to simple</button>'
'<span class="inline-subscribe-error"></span>'
'</span>'
'</p>'
)
def test_stream_subscribe_button_in_name(self):
msg = '!_stream_subscribe_button(simple (not\\))'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<span class="inline-subscribe" data-stream-name="simple (not)">'
'<button class="inline-subscribe-button btn">Subscribe to simple (not)</button>'
'<span class="inline-subscribe-error"></span>'
'</span>'
'</p>'
)
def test_stream_subscribe_button_after_name(self):
msg = '!_stream_subscribe_button(simple) (not)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<span class="inline-subscribe" data-stream-name="simple">'
'<button class="inline-subscribe-button btn">Subscribe to simple</button>'
'<span class="inline-subscribe-error"></span>'
'</span>'
' (not)</p>'
)
def test_stream_subscribe_button_slash(self):
msg = '!_stream_subscribe_button(simple\\\\)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<span class="inline-subscribe" data-stream-name="simple\\">'
'<button class="inline-subscribe-button btn">Subscribe to simple\\</button>'
'<span class="inline-subscribe-error"></span>'
'</span>'
'</p>'
)
def test_in_app_modal_link(self):
msg = '!modal_link(#settings, Settings page)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<a data-toggle="modal" href="#settings" title="#settings">Settings page</a>'
'</p>'
)
def test_mit_rendering(self):
"""Test the markdown configs for the MIT Zephyr mirroring system;
verifies almost all inline patterns are disabled, but
inline_interesting_links is still enabled"""
msg = "**test**"
converted = bugdown.convert(msg, "zephyr_mirror")
self.assertEqual(
converted,
"<p>**test**</p>",
)
msg = "* test"
converted = bugdown.convert(msg, "zephyr_mirror")
self.assertEqual(
converted,
"<p>* test</p>",
)
msg = "https://lists.debian.org/debian-ctte/2014/02/msg00173.html"
converted = bugdown.convert(msg, "zephyr_mirror")
self.assertEqual(
converted,
'<p><a href="https://lists.debian.org/debian-ctte/2014/02/msg00173.html" target="_blank" title="https://lists.debian.org/debian-ctte/2014/02/msg00173.html">https://lists.debian.org/debian-ctte/2014/02/msg00173.html</a></p>',
)
class BugdownApiTests(ZulipTestCase):
def test_render_message_api(self):
# type: () -> None
content = 'That is a **bold** statement'
result = self.client_get(
'/api/v1/messages/render',
dict(content=content),
**self.api_auth('othello@zulip.com')
)
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['rendered'],
u'<p>That is a <strong>bold</strong> statement</p>')
class BugdownErrorTests(ZulipTestCase):
def test_bugdown_error_handling(self):
# type: () -> None
with self.simulated_markdown_failure():
with self.assertRaises(bugdown.BugdownRenderingException):
bugdown.convert('', 'zulip.com')
def test_send_message_errors(self):
# type: () -> None
message = 'whatever'
with self.simulated_markdown_failure():
# We don't use assertRaisesRegexp because it seems to not
# handle i18n properly here on some systems.
with self.assertRaises(JsonableError):
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, message)
| apache-2.0 |
jobiols/server-tools | base_module_doc_rst/__openerp__.py | 9 | 1744 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Generate Docs of Modules",
"version": "8.0.1.0.0",
"category": "Tools",
"summary": "Modules Technical Guides in RST and Relationship Graphs",
"website": "https://odoo-community.org/",
"author": "OpenERP SA,Odoo Community Association (OCA)",
"contributors": [
"OpenERP SA <http://www.odoo.com>",
"Matjaž Mozetič <m.mozetic@matmoz.si>",
],
"license": "AGPL-3",
"depends": ["base"],
"external_dependencies": {
'python': [
'pydot',
],
},
"data": [
"base_module_doc_rst_view.xml",
"wizard/generate_relation_graph_view.xml",
"wizard/tech_guide_rst_view.xml",
"module_report.xml",
],
"demo": [],
"installable": True,
}
| agpl-3.0 |
feroda/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/rpc.py | 381 | 5849 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import socket
import xmlrpclib
#import tiny_socket
import re
class RPCGateway(object):
def __init__(self, host, port, protocol):
self.protocol = protocol
self.host = host
self.port = port
def get_url(self):
"""Get the url
"""
return "%s://%s:%s/"%(self.protocol, self.host, self.port)
def listdb(self):
"""Get the list of databases.
"""
pass
def login(self, db, user, password):
pass
def execute(self, obj, method, *args):
pass
class RPCSession(object):
def __init__(self, url):
m = re.match('^(http[s]?://|socket://)([\w.\-]+):(\d{1,5})$', url or '')
host = m.group(2)
port = m.group(3)
protocol = m.group(1)
if not m:
return -1
if protocol == 'http://' or protocol == 'https://':
self.gateway = XMLRPCGateway(host, port, protocol[:-3])
elif protocol == 'socket://':
self.gateway = NETRPCGateway(host, port)
def listdb(self):
return self.gateway.listdb()
def login(self, db, user, password):
if password is None:
return -1
uid = self.gateway.login(db, user or '', password or '')
if uid <= 0:
return -1
self.uid = uid
self.db = db
self.password = password
self.open = True
return uid
def execute(self, obj, method, *args):
try:
result = self.gateway.execute(obj, method, *args)
return self.__convert(result)
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
def __convert(self, result):
if isinstance(result, basestring):
# try to convert into unicode string
try:
return ustr(result)
except Exception, e:
return result
elif isinstance(result, list):
return [self.__convert(val) for val in result]
elif isinstance(result, tuple):
return tuple([self.__convert(val) for val in result])
elif isinstance(result, dict):
newres = {}
for key, val in result.items():
newres[key] = self.__convert(val)
return newres
else:
return result
class XMLRPCGateway(RPCGateway):
"""XML-RPC implementation.
"""
def __init__(self, host, port, protocol='http'):
super(XMLRPCGateway, self).__init__(host, port, protocol)
global rpc_url
rpc_url = self.get_url() + 'xmlrpc/'
def listdb(self):
global rpc_url
sock = xmlrpclib.ServerProxy(rpc_url + 'db')
try:
return sock.list()
except Exception, e:
return -1
def login(self, db, user, password):
global rpc_url
sock = xmlrpclib.ServerProxy(rpc_url + 'common')
try:
res = sock.login(db, user, password)
except Exception, e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return -1
return res
def execute(self, sDatabase, UID, sPassword, obj, method, *args):
global rpc_url
sock = xmlrpclib.ServerProxy(rpc_url + 'object')
return sock.execute(sDatabase,UID,sPassword, obj ,method,*args)
class NETRPCGateway(RPCGateway):
def __init__(self, host, port):
super(NETRPCGateway, self).__init__(host, port, 'socket')
def listdb(self):
sock = mysocket()
try:
sock.connect(self.host, self.port)
sock.mysend(('db', 'list'))
res = sock.myreceive()
sock.disconnect()
return res
except Exception, e:
return -1
def login(self, db, user, password):
sock = mysocket()
try:
sock.connect(self.host, self.port)
sock.mysend(('common', 'login', db, user, password))
res = sock.myreceive()
sock.disconnect()
except Exception, e:
return -1
return res
def execute(self,obj, method, *args):
sock = mysocket()
try:
sock.connect(self.host, self.port)
data=(('object', 'execute',obj,method,)+args)
sock.mysend(data)
res=sock.myreceive()
sock.disconnect()
return res
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
masterpowers/angular-laravel | node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/input.py | 457 | 112827 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import copy
import gyp.common
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = []
is_path_section_charset = set('=+?!')
is_path_section_match_re = re.compile('_(dir|file|path)s?$')
def IsPathSection(section):
# If section ends in one of these characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in is_path_section_charset:
section = section[:-1]
return section in path_sections or is_path_section_match_re.search(section)
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node at key path '" + '.'.join(keypath) + \
"': " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if not isinstance(build_file_data, dict):
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
variables, includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, variables, None,
False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if v.__class__ == dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
None, check)
elif v.__class__ == list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
variables, check):
for item in sublist:
if item.__class__ == dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
variables, None, check)
elif item.__class__ == list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
variables, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if isinstance(condition, list):
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, data,
aux_data, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
# Save the keys so we can return data that changed.
data_keys = set(data)
aux_data_keys = set(aux_data)
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, data,
aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
data_out = {}
for key in data:
if key == 'target_build_files':
continue
if key not in data_keys:
data_out[key] = data[key]
aux_data_out = {}
for key in aux_data:
if key not in aux_data_keys:
aux_data_out[key] = aux_data[key]
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
data_out,
aux_data_out,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The "aux_data" dict that was passed to LoadTargetBuildFileParallel
self.aux_data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, data0, aux_data0, dependencies0) = result
self.data['target_build_files'].add(build_file_path0)
for key in data0:
self.data[key] = data0[key]
for key in aux_data0:
self.aux_data[key] = aux_data0[key]
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, aux_data,
variables, includes, depth, check,
generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
parallel_state.aux_data = aux_data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
data_in = {}
data_in['target_build_files'] = data['target_build_files']
aux_data_in = {}
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(8)
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
data_in, aux_data_in,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
canonical_int_re = re.compile('(0|-?[1-9][0-9]*)$')
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
return isinstance(string, str) and canonical_int_re.match(string)
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) == list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) == list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once.
# TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
# possible that the command being invoked depends on the current
# directory. For that case the syntax needs to be extended so that the
# directory is also used in cache_key (it becomes a tuple).
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = str(contents)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if isinstance(replacement, list):
for item in replacement:
if (not contents[-1] == '/' and
not isinstance(item, str) and not isinstance(item, int)):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif not isinstance(replacement, str) and \
not isinstance(replacement, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if isinstance(replacement, list):
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if isinstance(replacement, list):
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if isinstance(output, list):
if output and isinstance(output[0], list):
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if isinstance(output, list):
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
if not isinstance(condition, list):
raise GypError(conditions_key + ' must be a list')
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be length 2 or 3, not ' + str(len(condition)))
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if not isinstance(cond_expr_expanded, str) and \
not isinstance(cond_expr_expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__
try:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
if eval(ast_code, {'__builtins__': None}, variables):
merge_dict = true_dict
else:
merge_dict = false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if isinstance(value, str) or isinstance(value, int) or \
isinstance(value, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and isinstance(value, str):
expanded = ExpandVariables(value, phase, variables, build_file)
if not isinstance(expanded, str) and not isinstance(expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or isinstance(value, str):
continue
if isinstance(value, dict):
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif isinstance(value, list):
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif not isinstance(value, int):
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if isinstance(item, dict):
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif isinstance(item, list):
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif isinstance(item, str):
expanded = ExpandVariables(item, phase, variables, build_file)
if isinstance(expanded, str) or isinstance(expanded, int):
the_list[index] = expanded
elif isinstance(expanded, list):
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif not isinstance(item, int):
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = []
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.append(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return flat_list
def FindCycles(self, path=None):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
if path is None:
path = [self]
results = []
for node in self.dependents:
if node in path:
cycle = [node]
for part in path:
cycle.append(part)
if part == node:
break
results.append(tuple(cycle))
else:
results.extend(node.FindCycles([node] + path))
return list(set(results))
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns a list of all of a target's dependencies, recursively."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns a list of dependency targets that are linked into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies == None:
dependencies = []
# Check for None, corresponding to the root node.
if self.ref == None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
if self.ref not in dependencies:
dependencies.append(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.append(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle). If you need to figure out what's wrong, look for elements of
# targets that are not in flat_list.
if len(flat_list) != len(targets):
raise DependencyGraphNode.CircularException(
'Some targets not reachable, cycle in dependency graph detected: ' +
' '.join(set(flat_list) ^ set(targets)))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
bad_files = []
for file in dependency_nodes.iterkeys():
if not file in flat_list:
bad_files.append(file)
common_path_prefix = os.path.commonprefix(dependency_nodes)
cycles = []
for cycle in root_node.FindCycles():
simplified_paths = []
for node in cycle:
assert(node.ref.startswith(common_path_prefix))
simplified_paths.append(node.ref[len(common_path_prefix):])
cycles.append('Cycle: %s' % ' -> '.join(simplified_paths))
raise DependencyGraphNode.CircularException, \
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles)
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if isinstance(item, str) or isinstance(item, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not isinstance(item, str) or not item.startswith('-'):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif isinstance(item, dict):
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif isinstance(item, list):
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if isinstance(v, str) or isinstance(v, int):
if not (isinstance(to[k], str) or isinstance(to[k], int)):
bad_merge = True
elif v.__class__ != to[k].__class__:
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if isinstance(v, str) or isinstance(v, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif isinstance(v, dict):
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif isinstance(v, list):
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
if not isinstance(to[list_base], list):
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for i in target_dict['configurations'].iterkeys()
if not target_dict['configurations'][i].get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = copy.deepcopy(target_dict)
# Take out the bits that don't belong in a "configurations" section.
# Since configuration setup is done before conditional, exclude, and rules
# processing, be careful with handling of the suffix characters used in
# those phases.
delete_keys = []
for key in new_configuration_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del new_configuration_dict[key]
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
# Put the new result back into the target dict as a configuration.
target_dict['configurations'][configuration] = new_configuration_dict
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if not isinstance(value, list):
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if not isinstance(the_dict[list_key], list):
value = the_dict[list_key]
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if isinstance(value, dict):
ProcessListFiltersInDict(key, value)
elif isinstance(value, list):
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if isinstance(item, dict):
ProcessListFiltersInDict(name, item)
elif isinstance(item, list):
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file):
# TODO: Check if MSVC allows this for loadable_module targets.
if target_dict.get('type', None) not in ('static_library', 'shared_library'):
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'Some build systems, e.g. MSVC08, '
'cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if not isinstance(run_as, dict):
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if not isinstance(action, list):
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and not isinstance(working_directory, str):
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and not isinstance(environment, dict):
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if isinstance(v, int):
v = str(v)
the_dict[k] = v
elif isinstance(v, dict):
TurnIntIntoStrInDict(v)
elif isinstance(v, list):
TurnIntIntoStrInList(v)
if isinstance(k, int):
the_dict[str(k)] = v
del the_dict[k]
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if isinstance(item, int):
the_list[index] = str(item)
elif isinstance(item, dict):
TurnIntIntoStrInDict(item)
elif isinstance(item, list):
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = base_path_sections[:]
path_sections.extend(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, aux_data,
variables, includes, depth, check,
generator_input_info)
else:
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
# TODO(thakis): Get vpx_scale/arm/scalesystemdependent.c to be renamed to
# scalesystemdependent_arm_additions.c or similar.
if 'arm' not in variables.get('target_arch', ''):
ValidateSourcesInTarget(target, target_dict, build_file)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| mit |
RedHatInsights/insights-core | insights/parsers/tests/test_abrt_ccpp.py | 1 | 3026 | import doctest
import pytest
from insights.parsers import abrt_ccpp
from insights.parsers.abrt_ccpp import AbrtCCppConf
from insights.tests import context_wrap
from insights.parsers import SkipException
ABRT_CONF_CONTENT = """
# Configuration file for CCpp hook
# CCpp hook writes its template to the "/proc/sys/kernel/core_pattern" file
# and stores the original template in the "/var/run/abrt/saved_core_pattern"
# file. If you want CCpp hook to create a core dump file named according to
# the original template as well, set 'MakeCompatCore' to 'yes'.
# If the original template string starts with "|", the string "core" is used
# instead of the template.
# For more information about naming core dump files see 'man 5 core'.
MakeCompatCore = yes
# The option allows you to set limit for the core file size in MiB.
#
# This value is compared to value of the MaxCrashReportSize configuration
# option from (/etc/abrt.conf) and the lower value is used as the limit.
#
# If MaxCoreFileSize is 0 then the value of MaxCrashReportSize is the limit.
# If MaxCrashReportSize is 0 then the value of MaxCoreFileSize is the limit.
# If both values are 0 then the core file size is unlimited.
MaxCoreFileSize = 0
# Do you want a copy of crashed binary be saved?
# (useful, for example, when _deleted binary_ segfaults)
SaveBinaryImage = no
# When this option is set to 'yes', core backtrace is generated
# from the memory image of the crashing process. Only the crash
# thread is present in the backtrace.
CreateCoreBacktrace = yes
# Save full coredump? If set to 'no', coredump won't be saved
# and you won't be able to report the crash to Bugzilla. Only
# useful with CreateCoreBacktrace set to 'yes'. Please
# note that if this option is set to 'no' and MakeCompatCore
# is set to 'yes', the core is still written to the current
# directory.
SaveFullCore = yes
# Used for debugging the hook
#VerboseLog = 2
# Specify where you want to store debuginfos (default: /var/cache/abrt-di)
#
DebuginfoLocation = /var/cache/abrt-di
# ABRT will ignore crashes in executables whose absolute path matches one of
# specified patterns.
#
#IgnoredPaths =
# ABRT will process only crashes of either allowed users or users who are
# members of allowed group. If no allowed users nor allowed group are specified
# ABRT will process crashes of all users.
#
#AllowedUsers =
#AllowedGroups =
""".strip()
ABRT_CONF_CONTENT_NO = """
""".strip()
def test_empty_content():
with pytest.raises(SkipException):
AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT_NO))
def test_abrt_class():
abrt_obj = AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT))
assert abrt_obj.get('CreateCoreBacktrace', '').lower() == 'yes'
assert abrt_obj.get('DebuginfoLocation', '').lower() == '/var/cache/abrt-di'
assert abrt_obj.get('Debuginfo', '').lower() == ''
def test_docs():
env = {
'abrt_conf': AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT))
}
failed, total = doctest.testmod(abrt_ccpp, globs=env)
assert failed == 0
| apache-2.0 |
azureplus/chromium_depot_tools | roll_dep_svn.py | 26 | 15130 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Rolls a git-svn dependency.
It takes the path to a dep and a git commit hash or svn revision, and updates
the parent repo's DEPS file with the corresponding git commit hash.
Sample invocation:
[chromium/src]$ roll-dep-svn third_party/WebKit 12345
After the script completes, the DEPS file will be dirty with the new revision.
The user can then:
$ git add DEPS
$ git commit
"""
import ast
import optparse
import os
import re
import sys
from itertools import izip
from subprocess import check_output, Popen, PIPE
from textwrap import dedent
SHA1_RE = re.compile('^[a-fA-F0-9]{40}$')
GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$')
ROLL_DESCRIPTION_STR = (
'''Roll %(dep_path)s %(before_rev)s:%(after_rev)s%(svn_range)s
Summary of changes available at:
%(revlog_url)s
''')
def shorten_dep_path(dep):
"""Shorten the given dep path if necessary."""
while len(dep) > 31:
dep = '.../' + dep.lstrip('./').partition('/')[2]
return dep
def posix_path(path):
"""Convert a possibly-Windows path to a posix-style path."""
(_, path) = os.path.splitdrive(path)
return path.replace(os.sep, '/')
def platform_path(path):
"""Convert a path to the native path format of the host OS."""
return path.replace('/', os.sep)
def find_gclient_root():
"""Find the directory containing the .gclient file."""
cwd = posix_path(os.getcwd())
result = ''
for _ in xrange(len(cwd.split('/'))):
if os.path.exists(os.path.join(result, '.gclient')):
return result
result = os.path.join(result, os.pardir)
assert False, 'Could not find root of your gclient checkout.'
def get_solution(gclient_root, dep_path):
"""Find the solution in .gclient containing the dep being rolled."""
dep_path = os.path.relpath(dep_path, gclient_root)
cwd = os.getcwd().rstrip(os.sep) + os.sep
gclient_root = os.path.realpath(gclient_root)
gclient_path = os.path.join(gclient_root, '.gclient')
gclient_locals = {}
execfile(gclient_path, {}, gclient_locals)
for soln in gclient_locals['solutions']:
soln_relpath = platform_path(soln['name'].rstrip('/')) + os.sep
if (dep_path.startswith(soln_relpath) or
cwd.startswith(os.path.join(gclient_root, soln_relpath))):
return soln
assert False, 'Could not determine the parent project for %s' % dep_path
def is_git_hash(revision):
"""Determines if a given revision is a git hash."""
return SHA1_RE.match(revision)
def verify_git_revision(dep_path, revision):
"""Verify that a git revision exists in a repository."""
p = Popen(['git', 'rev-list', '-n', '1', revision],
cwd=dep_path, stdout=PIPE, stderr=PIPE)
result = p.communicate()[0].strip()
if p.returncode != 0 or not is_git_hash(result):
result = None
return result
def get_svn_revision(dep_path, git_revision):
"""Given a git revision, return the corresponding svn revision."""
p = Popen(['git', 'log', '-n', '1', '--pretty=format:%B', git_revision],
stdout=PIPE, cwd=dep_path)
(log, _) = p.communicate()
assert p.returncode == 0, 'git log %s failed.' % git_revision
for line in reversed(log.splitlines()):
m = GIT_SVN_ID_RE.match(line.strip())
if m:
return m.group(1)
return None
def convert_svn_revision(dep_path, revision):
"""Find the git revision corresponding to an svn revision."""
err_msg = 'Unknown error'
revision = int(revision)
latest_svn_rev = None
with open(os.devnull, 'w') as devnull:
for ref in ('HEAD', 'origin/master'):
try:
log_p = Popen(['git', 'log', ref],
cwd=dep_path, stdout=PIPE, stderr=devnull)
grep_p = Popen(['grep', '-e', '^commit ', '-e', '^ *git-svn-id: '],
stdin=log_p.stdout, stdout=PIPE, stderr=devnull)
git_rev = None
prev_svn_rev = None
for line in grep_p.stdout:
if line.startswith('commit '):
git_rev = line.split()[1]
continue
try:
svn_rev = int(line.split()[1].partition('@')[2])
except (IndexError, ValueError):
print >> sys.stderr, (
'WARNING: Could not parse svn revision out of "%s"' % line)
continue
if not latest_svn_rev or int(svn_rev) > int(latest_svn_rev):
latest_svn_rev = svn_rev
if svn_rev == revision:
return git_rev
if svn_rev > revision:
prev_svn_rev = svn_rev
continue
if prev_svn_rev:
err_msg = 'git history skips from revision %d to revision %d.' % (
svn_rev, prev_svn_rev)
else:
err_msg = (
'latest available revision is %d; you may need to '
'"git fetch origin" to get the latest commits.' %
latest_svn_rev)
finally:
log_p.terminate()
grep_p.terminate()
raise RuntimeError('No match for revision %d; %s' % (revision, err_msg))
def get_git_revision(dep_path, revision):
"""Convert the revision argument passed to the script to a git revision."""
svn_revision = None
if revision.startswith('r'):
git_revision = convert_svn_revision(dep_path, revision[1:])
svn_revision = revision[1:]
elif re.search('[a-fA-F]', revision):
git_revision = verify_git_revision(dep_path, revision)
if not git_revision:
raise RuntimeError('Please \'git fetch origin\' in %s' % dep_path)
svn_revision = get_svn_revision(dep_path, git_revision)
elif len(revision) > 6:
git_revision = verify_git_revision(dep_path, revision)
if git_revision:
svn_revision = get_svn_revision(dep_path, git_revision)
else:
git_revision = convert_svn_revision(dep_path, revision)
svn_revision = revision
else:
try:
git_revision = convert_svn_revision(dep_path, revision)
svn_revision = revision
except RuntimeError:
git_revision = verify_git_revision(dep_path, revision)
if not git_revision:
raise
svn_revision = get_svn_revision(dep_path, git_revision)
return git_revision, svn_revision
def ast_err_msg(node):
return 'ERROR: Undexpected DEPS file AST structure at line %d column %d' % (
node.lineno, node.col_offset)
def find_deps_section(deps_ast, section):
"""Find a top-level section of the DEPS file in the AST."""
try:
result = [n.value for n in deps_ast.body if
n.__class__ is ast.Assign and
n.targets[0].__class__ is ast.Name and
n.targets[0].id == section][0]
return result
except IndexError:
return None
def find_dict_index(dict_node, key):
"""Given a key, find the index of the corresponding dict entry."""
assert dict_node.__class__ is ast.Dict, ast_err_msg(dict_node)
indices = [i for i, n in enumerate(dict_node.keys) if
n.__class__ is ast.Str and n.s == key]
assert len(indices) < 2, (
'Found redundant dict entries for key "%s"' % key)
return indices[0] if indices else None
def update_node(deps_lines, deps_ast, node, git_revision):
"""Update an AST node with the new git revision."""
if node.__class__ is ast.Str:
return update_string(deps_lines, node, git_revision)
elif node.__class__ is ast.BinOp:
return update_binop(deps_lines, deps_ast, node, git_revision)
elif node.__class__ is ast.Call:
return update_call(deps_lines, deps_ast, node, git_revision)
else:
assert False, ast_err_msg(node)
def update_string(deps_lines, string_node, git_revision):
"""Update a string node in the AST with the new git revision."""
line_idx = string_node.lineno - 1
start_idx = string_node.col_offset - 1
line = deps_lines[line_idx]
(prefix, sep, old_rev) = string_node.s.partition('@')
if sep:
start_idx = line.find(prefix + sep, start_idx) + len(prefix + sep)
tail_idx = start_idx + len(old_rev)
else:
start_idx = line.find(prefix, start_idx)
tail_idx = start_idx + len(prefix)
old_rev = prefix
deps_lines[line_idx] = line[:start_idx] + git_revision + line[tail_idx:]
return line_idx
def update_binop(deps_lines, deps_ast, binop_node, git_revision):
"""Update a binary operation node in the AST with the new git revision."""
# Since the revision part is always last, assume that it's the right-hand
# operand that needs to be updated.
return update_node(deps_lines, deps_ast, binop_node.right, git_revision)
def update_call(deps_lines, deps_ast, call_node, git_revision):
"""Update a function call node in the AST with the new git revision."""
# The only call we know how to handle is Var()
assert call_node.func.id == 'Var', ast_err_msg(call_node)
assert call_node.args and call_node.args[0].__class__ is ast.Str, (
ast_err_msg(call_node))
return update_var(deps_lines, deps_ast, call_node.args[0].s, git_revision)
def update_var(deps_lines, deps_ast, var_name, git_revision):
"""Update an entry in the vars section of the DEPS file with the new
git revision."""
vars_node = find_deps_section(deps_ast, 'vars')
assert vars_node, 'Could not find "vars" section of DEPS file.'
var_idx = find_dict_index(vars_node, var_name)
assert var_idx is not None, (
'Could not find definition of "%s" var in DEPS file.' % var_name)
val_node = vars_node.values[var_idx]
return update_node(deps_lines, deps_ast, val_node, git_revision)
def short_rev(rev, dep_path):
return check_output(['git', 'rev-parse', '--short', rev],
cwd=dep_path).rstrip()
def generate_commit_message(deps_section, dep_path, dep_name, new_rev):
(url, _, old_rev) = deps_section[dep_name].partition('@')
if url.endswith('.git'):
url = url[:-4]
old_rev_short = short_rev(old_rev, dep_path)
new_rev_short = short_rev(new_rev, dep_path)
url += '/+log/%s..%s' % (old_rev_short, new_rev_short)
try:
old_svn_rev = get_svn_revision(dep_path, old_rev)
new_svn_rev = get_svn_revision(dep_path, new_rev)
except Exception:
# Ignore failures that might arise from the repo not being checked out.
old_svn_rev = new_svn_rev = None
svn_range_str = ''
if old_svn_rev and new_svn_rev:
svn_range_str = ' (svn %s:%s)' % (old_svn_rev, new_svn_rev)
return dedent(ROLL_DESCRIPTION_STR % {
'dep_path': shorten_dep_path(dep_name),
'before_rev': old_rev_short,
'after_rev': new_rev_short,
'svn_range': svn_range_str,
'revlog_url': url,
})
def update_deps_entry(deps_lines, deps_ast, value_node, new_rev, comment):
line_idx = update_node(deps_lines, deps_ast, value_node, new_rev)
(content, _, _) = deps_lines[line_idx].partition('#')
if comment:
deps_lines[line_idx] = '%s # %s' % (content.rstrip(), comment)
else:
deps_lines[line_idx] = content.rstrip()
def update_deps(deps_file, dep_path, dep_name, new_rev, comment):
"""Update the DEPS file with the new git revision."""
commit_msg = ''
with open(deps_file) as fh:
deps_content = fh.read()
deps_locals = {}
def _Var(key):
return deps_locals['vars'][key]
deps_locals['Var'] = _Var
exec deps_content in {}, deps_locals
deps_lines = deps_content.splitlines()
deps_ast = ast.parse(deps_content, deps_file)
deps_node = find_deps_section(deps_ast, 'deps')
assert deps_node, 'Could not find "deps" section of DEPS file'
dep_idx = find_dict_index(deps_node, dep_name)
if dep_idx is not None:
value_node = deps_node.values[dep_idx]
update_deps_entry(deps_lines, deps_ast, value_node, new_rev, comment)
commit_msg = generate_commit_message(deps_locals['deps'], dep_path,
dep_name, new_rev)
deps_os_node = find_deps_section(deps_ast, 'deps_os')
if deps_os_node:
for (os_name, os_node) in izip(deps_os_node.keys, deps_os_node.values):
dep_idx = find_dict_index(os_node, dep_name)
if dep_idx is not None:
value_node = os_node.values[dep_idx]
if value_node.__class__ is ast.Name and value_node.id == 'None':
pass
else:
update_deps_entry(deps_lines, deps_ast, value_node, new_rev, comment)
commit_msg = generate_commit_message(
deps_locals['deps_os'][os_name.s], dep_path, dep_name, new_rev)
if not commit_msg:
print 'Could not find an entry in %s to update.' % deps_file
return 1
print 'Pinning %s' % dep_name
print 'to revision %s' % new_rev
print 'in %s' % deps_file
with open(deps_file, 'w') as fh:
for line in deps_lines:
print >> fh, line
deps_file_dir = os.path.normpath(os.path.dirname(deps_file))
deps_file_root = Popen(
['git', 'rev-parse', '--show-toplevel'],
cwd=deps_file_dir, stdout=PIPE).communicate()[0].strip()
with open(os.path.join(deps_file_root, '.git', 'MERGE_MSG'), 'w') as fh:
fh.write(commit_msg)
return 0
def main(argv):
usage = 'Usage: roll-dep-svn [options] <dep path> <rev> [ <DEPS file> ]'
parser = optparse.OptionParser(usage=usage, description=__doc__)
parser.add_option('--no-verify-revision',
help='Don\'t verify the revision passed in. This '
'also skips adding an svn revision comment '
'for git dependencies and requires the passed '
'revision to be a git hash.',
default=False, action='store_true')
options, args = parser.parse_args(argv)
if len(args) not in (2, 3):
parser.error('Expected either 2 or 3 positional parameters.')
arg_dep_path, revision = args[:2]
gclient_root = find_gclient_root()
dep_path = platform_path(arg_dep_path)
if not os.path.exists(dep_path):
dep_path = os.path.join(gclient_root, dep_path)
if not options.no_verify_revision:
# Only require the path to exist if the revision should be verified. A path
# to e.g. os deps might not be checked out.
if not os.path.isdir(dep_path):
print >> sys.stderr, 'No such directory: %s' % arg_dep_path
return 1
if len(args) > 2:
deps_file = args[2]
else:
soln = get_solution(gclient_root, dep_path)
soln_path = os.path.relpath(os.path.join(gclient_root, soln['name']))
deps_file = os.path.join(soln_path, 'DEPS')
dep_name = posix_path(os.path.relpath(dep_path, gclient_root))
if options.no_verify_revision:
if not is_git_hash(revision):
print >> sys.stderr, (
'The passed revision %s must be a git hash when skipping revision '
'verification.' % revision)
return 1
git_rev = revision
comment = None
else:
git_rev, svn_rev = get_git_revision(dep_path, revision)
comment = ('from svn revision %s' % svn_rev) if svn_rev else None
if not git_rev:
print >> sys.stderr, 'Could not find git revision matching %s.' % revision
return 1
return update_deps(deps_file, dep_path, dep_name, git_rev, comment)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| bsd-3-clause |
facebookexperimental/binutils | gdb/testsuite/gdb.python/py-pp-maint.py | 32 | 2491 | # Copyright (C) 2010-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is part of the GDB testsuite. It tests python pretty
# printers.
import re
import gdb.types
import gdb.printing
def lookup_function_lookup_test(val):
class PrintFunctionLookup(object):
def __init__(self, val):
self.val = val
def to_string(self):
return ("x=<" + str(self.val["x"]) +
"> y=<" + str(self.val["y"]) + ">")
typename = gdb.types.get_basic_type(val.type).tag
# Note: typename could be None.
if typename == "function_lookup_test":
return PrintFunctionLookup(val)
return None
class pp_s (object):
def __init__(self, val):
self.val = val
def to_string(self):
a = self.val["a"]
b = self.val["b"]
if a.address != b:
raise Exception("&a(%s) != b(%s)" % (str(a.address), str(b)))
return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">"
class pp_ss (object):
def __init__(self, val):
self.val = val
def to_string(self):
return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">"
def build_pretty_printer():
pp = gdb.printing.RegexpCollectionPrettyPrinter("pp-test")
pp.add_printer('struct s', '^struct s$', pp_s)
pp.add_printer('s', '^s$', pp_s)
# Use a lambda this time to exercise doing things this way.
pp.add_printer('struct ss', '^struct ss$', lambda val: pp_ss(val))
pp.add_printer('ss', '^ss$', lambda val: pp_ss(val))
pp.add_printer('enum flag_enum', '^flag_enum$',
gdb.printing.FlagEnumerationPrinter('enum flag_enum'))
return pp
gdb.printing.register_pretty_printer(gdb, lookup_function_lookup_test)
my_pretty_printer = build_pretty_printer()
gdb.printing.register_pretty_printer(gdb, my_pretty_printer)
| gpl-2.0 |
zerothi/sids | sisl/supercell.py | 1 | 37042 | """ Define a supercell
This class is the basis of many different objects.
"""
import math
import warnings
from numbers import Integral
import numpy as np
from numpy import dot
from ._internal import set_module
from . import _plot as plt
from . import _array as _a
from .utils.mathematics import fnorm
from .shape.prism4 import Cuboid
from .quaternion import Quaternion
from ._math_small import cross3, dot3
from ._supercell import cell_invert, cell_reciprocal
__all__ = ['SuperCell', 'SuperCellChild']
@set_module("sisl")
class SuperCell:
r""" A cell class to retain lattice vectors and a supercell structure
The supercell structure is comprising the *primary* unit-cell and neighbouring
unit-cells. The number of supercells is given by the attribute `nsc` which
is a vector with 3 elements, one per lattice vector. It describes *how many*
times the primary unit-cell is extended along the i'th lattice vector.
For ``nsc[i] == 3`` the supercell is made up of 3 unit-cells. One *behind*, the
primary unit-cell and one *after*.
Parameters
----------
cell : array_like
the lattice parameters of the unit cell (the actual cell
is returned from `tocell`.
nsc : array_like of int
number of supercells along each latticevector
origo : (3,) of float
the origo of the supercell.
Attributes
----------
cell : (3, 3) of float
the lattice vectors (``cell[i, :]`` is the i'th vector)
"""
# We limit the scope of this SuperCell object.
__slots__ = ('cell', '_origo', 'volume', 'nsc', 'n_s', '_sc_off', '_isc_off')
def __init__(self, cell, nsc=None, origo=None):
if nsc is None:
nsc = [1, 1, 1]
# If the length of cell is 6 it must be cell-parameters, not
# actual cell coordinates
self.cell = self.tocell(cell)
if origo is None:
self._origo = _a.zerosd(3)
else:
self._origo = _a.arrayd(origo)
if self._origo.size != 3:
raise ValueError("Origo *must* be 3 numbers.")
# Set the volume
self._update_vol()
self.nsc = _a.onesi(3)
# Set the super-cell
self.set_nsc(nsc=nsc)
@property
def length(self):
""" Length of each lattice vector """
return fnorm(self.cell)
@property
def origo(self):
""" Origo for the cell """
return self._origo
@origo.setter
def origo(self, origo):
""" Set origo """
self._origo[:] = origo
def area(self, ax0, ax1):
""" Calculate the area spanned by the two axis `ax0` and `ax1` """
return (cross3(self.cell[ax0, :], self.cell[ax1, :]) ** 2).sum() ** 0.5
def toCuboid(self, orthogonal=False):
""" A cuboid with vectors as this unit-cell and center with respect to its origo
Parameters
----------
orthogonal : bool, optional
if true the cuboid has orthogonal sides such that the entire cell is contained
"""
if not orthogonal:
return Cuboid(self.cell.copy(), self.center() + self.origo)
def find_min_max(cmin, cmax, new):
for i in range(3):
cmin[i] = min(cmin[i], new[i])
cmax[i] = max(cmax[i], new[i])
cmin = self.cell.min(0)
cmax = self.cell.max(0)
find_min_max(cmin, cmax, self.cell[[0, 1], :].sum(0))
find_min_max(cmin, cmax, self.cell[[0, 2], :].sum(0))
find_min_max(cmin, cmax, self.cell[[1, 2], :].sum(0))
find_min_max(cmin, cmax, self.cell.sum(0))
return Cuboid(cmax - cmin, self.center() + self.origo)
def parameters(self, rad=False):
r""" Cell parameters of this cell in 3 lengths and 3 angles
Notes
-----
Since we return the length and angles between vectors it may not be possible to
recreate the same cell. Only in the case where the first lattice vector *only*
has a Cartesian :math:`x` component will this be the case
Parameters
----------
rad : bool, optional
whether the angles are returned in radians (otherwise in degree)
Returns
-------
float
length of first lattice vector
float
length of second lattice vector
float
length of third lattice vector
float
angle between b and c vectors
float
angle between a and c vectors
float
angle between a and b vectors
"""
if rad:
f = 1.
else:
f = 180 / np.pi
# Calculate length of each lattice vector
cell = self.cell.copy()
abc = fnorm(cell)
from math import acos
cell = cell / abc.reshape(-1, 1)
alpha = acos(dot3(cell[1, :], cell[2, :])) * f
beta = acos(dot3(cell[0, :], cell[2, :])) * f
gamma = acos(dot3(cell[0, :], cell[1, :])) * f
return abc[0], abc[1], abc[2], alpha, beta, gamma
def _update_vol(self):
self.volume = abs(dot3(self.cell[0, :], cross3(self.cell[1, :], self.cell[2, :])))
def _fill(self, non_filled, dtype=None):
""" Return a zero filled array of length 3 """
if len(non_filled) == 3:
return non_filled
# Fill in zeros
# This will purposefully raise an exception
# if the dimensions of the periodic ones
# are not consistent.
if dtype is None:
try:
dtype = non_filled.dtype
except Exception:
dtype = np.dtype(non_filled[0].__class__)
if dtype == np.dtype(int):
# Never go higher than int32 for default
# guesses on integer lists.
dtype = np.int32
f = np.zeros(3, dtype)
i = 0
if self.nsc[0] > 1:
f[0] = non_filled[i]
i += 1
if self.nsc[1] > 1:
f[1] = non_filled[i]
i += 1
if self.nsc[2] > 1:
f[2] = non_filled[i]
return f
def _fill_sc(self, supercell_index):
""" Return a filled supercell index by filling in zeros where needed """
return self._fill(supercell_index, dtype=np.int32)
def set_nsc(self, nsc=None, a=None, b=None, c=None):
""" Sets the number of supercells in the 3 different cell directions
Parameters
----------
nsc : list of int, optional
number of supercells in each direction
a : integer, optional
number of supercells in the first unit-cell vector direction
b : integer, optional
number of supercells in the second unit-cell vector direction
c : integer, optional
number of supercells in the third unit-cell vector direction
"""
if not nsc is None:
for i in range(3):
if not nsc[i] is None:
self.nsc[i] = nsc[i]
if a:
self.nsc[0] = a
if b:
self.nsc[1] = b
if c:
self.nsc[2] = c
# Correct for misplaced number of unit-cells
for i in range(3):
if self.nsc[i] == 0:
self.nsc[i] = 1
if np.sum(self.nsc % 2) != 3:
raise ValueError(
"Supercells has to be of un-even size. The primary cell counts " +
"one, all others count 2")
# We might use this very often, hence we store it
self.n_s = _a.prodi(self.nsc)
self._sc_off = _a.zerosi([self.n_s, 3])
self._isc_off = _a.zerosi(self.nsc)
n = self.nsc
# We define the following ones like this:
def ret_range(val):
i = val // 2
return range(-i, i+1)
x = ret_range(n[0])
y = ret_range(n[1])
z = ret_range(n[2])
i = 0
for iz in z:
for iy in y:
for ix in x:
if ix == 0 and iy == 0 and iz == 0:
continue
# Increment index
i += 1
# The offsets for the supercells in the
# sparsity pattern
self._sc_off[i, 0] = ix
self._sc_off[i, 1] = iy
self._sc_off[i, 2] = iz
self._update_isc_off()
def _update_isc_off(self):
""" Internal routine for updating the supercell indices """
for i in range(self.n_s):
d = self.sc_off[i, :]
self._isc_off[d[0], d[1], d[2]] = i
@property
def sc_off(self):
""" Integer supercell offsets """
return self._sc_off
@sc_off.setter
def sc_off(self, sc_off):
""" Set the supercell offset """
self._sc_off[:, :] = _a.arrayi(sc_off, order='C')
self._update_isc_off()
@property
def isc_off(self):
""" Internal indexed supercell ``[ia, ib, ic] == i`` """
return self._isc_off
def __iter__(self):
""" Iterate the supercells and the indices of the supercells """
yield from enumerate(self.sc_off)
def copy(self, cell=None, origo=None):
""" A deepcopy of the object
Parameters
----------
cell : array_like
the new cell parameters
origo : array_like
the new origo
"""
if origo is None:
origo = self.origo.copy()
if cell is None:
copy = self.__class__(np.copy(self.cell), nsc=np.copy(self.nsc), origo=origo)
else:
copy = self.__class__(np.copy(cell), nsc=np.copy(self.nsc), origo=origo)
# Ensure that the correct super-cell information gets carried through
if not np.allclose(copy.sc_off, self.sc_off):
copy.sc_off = self.sc_off
return copy
def fit(self, xyz, axis=None, tol=0.05):
""" Fit the supercell to `xyz` such that the unit-cell becomes periodic in the specified directions
The fitted supercell tries to determine the unit-cell parameters by solving a set of linear equations
corresponding to the current supercell vectors.
>>> numpy.linalg.solve(self.cell.T, xyz.T)
It is important to know that this routine will *only* work if at least some of the atoms are
integer offsets of the lattice vectors. I.e. the resulting fit will depend on the translation
of the coordinates.
Parameters
----------
xyz : array_like ``shape(*, 3)``
the coordinates that we will wish to encompass and analyze.
axis : None or array_like
if ``None`` equivalent to ``[0, 1, 2]``, else only the cell-vectors
along the provided axis will be used
tol : float
tolerance (in Angstrom) of the positions. I.e. we neglect coordinates
which are not within the radius of this magnitude
"""
# In case the passed coordinates are from a Geometry
from .geometry import Geometry
if isinstance(xyz, Geometry):
xyz = xyz.xyz[:, :]
cell = np.copy(self.cell[:, :])
# Get fractional coordinates to get the divisions in the current cell
x = dot(xyz, self.icell.T)
# Now we should figure out the correct repetitions
# by rounding to integer positions of the cell vectors
ix = np.rint(x)
# Figure out the displacements from integers
# Then reduce search space by removing those coordinates
# that are more than the tolerance.
dist = np.sqrt((dot(cell.T, (x - ix).T) ** 2).sum(0))
idx = (dist <= tol).nonzero()[0]
if len(idx) == 0:
raise ValueError('Could not fit the cell parameters to the coordinates '
'due to insufficient accuracy (try increase the tolerance)')
# Reduce problem to allowed values below the tolerance
x = x[idx, :]
ix = ix[idx, :]
# Reduce to total repetitions
ireps = np.amax(ix, axis=0) - np.amin(ix, axis=0) + 1
# Only repeat the axis requested
if isinstance(axis, Integral):
axis = [axis]
# Reduce the non-set axis
if not axis is None:
for ax in [0, 1, 2]:
if ax not in axis:
ireps[ax] = 1
# Enlarge the cell vectors
cell[0, :] *= ireps[0]
cell[1, :] *= ireps[1]
cell[2, :] *= ireps[2]
return self.copy(cell)
def swapaxes(self, a, b):
""" Swap axis `a` and `b` in a new `SuperCell`
If ``swapaxes(0,1)`` it returns the 0 in the 1 values.
"""
# Create index vector
idx = _a.arrayi([0, 1, 2])
idx[b] = a
idx[a] = b
# There _can_ be errors when sc_off isn't created by sisl
return self.__class__(np.copy(self.cell[idx, :], order='C'),
nsc=self.nsc[idx],
origo=np.copy(self.origo[idx], order='C'))
def plane(self, ax1, ax2, origo=True):
""" Query point and plane-normal for the plane spanning `ax1` and `ax2`
Parameters
----------
ax1 : int
the first axis vector
ax2 : int
the second axis vector
origo : bool, optional
whether the plane intersects the origo or the opposite corner of the
unit-cell.
Returns
-------
normal_V : numpy.ndarray
planes normal vector (pointing outwards with regards to the cell)
p : numpy.ndarray
a point on the plane
Examples
--------
All 6 faces of the supercell can be retrieved like this:
>>> sc = SuperCell(4)
>>> n1, p1 = sc.plane(0, 1, True)
>>> n2, p2 = sc.plane(0, 1, False)
>>> n3, p3 = sc.plane(0, 2, True)
>>> n4, p4 = sc.plane(0, 2, False)
>>> n5, p5 = sc.plane(1, 2, True)
>>> n6, p6 = sc.plane(1, 2, False)
However, for performance critical calculations it may be advantageous to
do this:
>>> sc = SuperCell(4)
>>> uc = sc.cell.sum(0)
>>> n1, p1 = sc.plane(0, 1)
>>> n2 = -n1
>>> p2 = p1 + uc
>>> n3, p3 = sc.plane(0, 2)
>>> n4 = -n3
>>> p4 = p3 + uc
>>> n5, p5 = sc.plane(1, 2)
>>> n6 = -n5
>>> p6 = p5 + uc
Secondly, the variables ``p1``, ``p3`` and ``p5`` are always ``[0, 0, 0]`` and
``p2``, ``p4`` and ``p6`` are always ``uc``.
Hence this may be used to further reduce certain computations.
"""
cell = self.cell
n = cross3(cell[ax1, :], cell[ax2, :])
# Normalize
n /= dot3(n, n) ** 0.5
# Now we need to figure out if the normal vector
# is pointing outwards
# Take the cell center
up = cell.sum(0)
# Calculate the distance from the plane to the center of the cell
# If d is positive then the normal vector is pointing towards
# the center, so rotate 180
if dot3(n, up / 2) > 0.:
n *= -1
if origo:
return n, _a.zerosd([3])
# We have to reverse the normal vector
return -n, up
def __mul__(self, m):
""" Implement easy repeat function
Parameters
----------
m : int or array_like of length 3
a single integer may be regarded as [m, m, m].
A list will expand the unit-cell along the equivalent lattice vector.
Returns
-------
SuperCell
enlarged supercell
"""
# Simple form
if isinstance(m, Integral):
return self.tile(m, 0).tile(m, 1).tile(m, 2)
sc = self.copy()
for i, r in enumerate(m):
sc = sc.tile(r, i)
return sc
@property
def icell(self):
""" Returns the reciprocal (inverse) cell for the `SuperCell`.
Note: The returned vectors are still in ``[0, :]`` format
and not as returned by an inverse LAPACK algorithm.
"""
return cell_invert(self.cell)
@property
def rcell(self):
""" Returns the reciprocal cell for the `SuperCell` with ``2*np.pi``
Note: The returned vectors are still in [0, :] format
and not as returned by an inverse LAPACK algorithm.
"""
return cell_reciprocal(self.cell)
def cell_length(self, length):
""" Calculate cell vectors such that they each have length `length`
Parameters
----------
length : float or array_like
length for cell vectors, if an array it corresponds to the individual
vectors and it must have length 3
Returns
-------
numpy.ndarray
cell-vectors with prescribed length
"""
length = _a.asarrayd(length)
if length.size == 1:
length = np.tile(length, 3)
if length.size != 3:
raise ValueError(self.__class__.__name__ + '.cell_length length parameter should be a single '
'float, or an array of 3 values.')
return self.cell * (length.ravel() / self.length).reshape(3, 1)
def rotate(self, angle, v, only='abc', rad=False):
""" Rotates the supercell, in-place by the angle around the vector
One can control which cell vectors are rotated by designating them
individually with ``only='[abc]'``.
Parameters
----------
angle : float
the angle of which the geometry should be rotated
v : array_like
the vector around the rotation is going to happen
``v = [1,0,0]`` will rotate in the ``yz`` plane
rad : bool, optional
Whether the angle is in radians (True) or in degrees (False)
only : ('abc'), str, optional
only rotate the designated cell vectors.
"""
# flatten => copy
vn = _a.asarrayd(v).flatten()
vn /= fnorm(vn)
q = Quaternion(angle, vn, rad=rad)
q /= q.norm() # normalize the quaternion
cell = np.copy(self.cell)
if 'a' in only:
cell[0, :] = q.rotate(self.cell[0, :])
if 'b' in only:
cell[1, :] = q.rotate(self.cell[1, :])
if 'c' in only:
cell[2, :] = q.rotate(self.cell[2, :])
return self.copy(cell)
def offset(self, isc=None):
""" Returns the supercell offset of the supercell index """
if isc is None:
return _a.arrayd([0, 0, 0])
return dot(isc, self.cell)
def add(self, other):
""" Add two supercell lattice vectors to each other
Parameters
----------
other : SuperCell, array_like
the lattice vectors of the other supercell to add
"""
if not isinstance(other, SuperCell):
other = SuperCell(other)
cell = self.cell + other.cell
origo = self.origo + other.origo
nsc = np.where(self.nsc > other.nsc, self.nsc, other.nsc)
return self.__class__(cell, nsc=nsc, origo=origo)
def __add__(self, other):
return self.add(other)
__radd__ = __add__
def add_vacuum(self, vacuum, axis):
""" Add vacuum along the `axis` lattice vector
Parameters
----------
vacuum : float
amount of vacuum added, in Ang
axis : int
the lattice vector to add vacuum along
"""
cell = np.copy(self.cell)
d = cell[axis, :].copy()
# normalize to get direction vector
cell[axis, :] += d * (vacuum / fnorm(d))
return self.copy(cell)
def sc_index(self, sc_off):
""" Returns the integer index in the sc_off list that corresponds to `sc_off`
Returns the index for the supercell in the global offset.
Parameters
----------
sc_off : (3,) or list of (3,)
super cell specification. For each axis having value ``None`` all supercells
along that axis is returned.
"""
def _assert(m, v):
if np.any(np.abs(v) > m):
raise ValueError("Requesting a non-existing supercell index")
hsc = self.nsc // 2
if len(sc_off) == 0:
return _a.arrayi([[]])
elif isinstance(sc_off[0], np.ndarray):
_assert(hsc[0], sc_off[:, 0])
_assert(hsc[1], sc_off[:, 1])
_assert(hsc[2], sc_off[:, 2])
return self._isc_off[sc_off[:, 0], sc_off[:, 1], sc_off[:, 2]]
elif isinstance(sc_off[0], (tuple, list)):
# We are dealing with a list of lists
sc_off = np.asarray(sc_off)
_assert(hsc[0], sc_off[:, 0])
_assert(hsc[1], sc_off[:, 1])
_assert(hsc[2], sc_off[:, 2])
return self._isc_off[sc_off[:, 0], sc_off[:, 1], sc_off[:, 2]]
# Fall back to the other routines
sc_off = self._fill_sc(sc_off)
if sc_off[0] is not None and sc_off[1] is not None and sc_off[2] is not None:
_assert(hsc[0], sc_off[0])
_assert(hsc[1], sc_off[1])
_assert(hsc[2], sc_off[2])
return self._isc_off[sc_off[0], sc_off[1], sc_off[2]]
# We build it because there are 'none'
if sc_off[0] is None:
idx = _a.arangei(self.n_s)
else:
idx = (self.sc_off[:, 0] == sc_off[0]).nonzero()[0]
if not sc_off[1] is None:
idx = idx[(self.sc_off[idx, 1] == sc_off[1]).nonzero()[0]]
if not sc_off[2] is None:
idx = idx[(self.sc_off[idx, 2] == sc_off[2]).nonzero()[0]]
return idx
def scale(self, scale):
""" Scale lattice vectors
Does not scale `origo`.
Parameters
----------
scale : ``float``
the scale factor for the new lattice vectors
"""
return self.copy(self.cell * scale)
def tile(self, reps, axis):
""" Extend the unit-cell `reps` times along the `axis` lattice vector
Notes
-----
This is *exactly* equivalent to the `repeat` routine.
Parameters
----------
reps : int
number of times the unit-cell is repeated along the specified lattice vector
axis : int
the lattice vector along which the repetition is performed
"""
cell = np.copy(self.cell)
nsc = np.copy(self.nsc)
origo = np.copy(self.origo)
cell[axis, :] *= reps
# Only reduce the size if it is larger than 5
if nsc[axis] > 3 and reps > 1:
# This is number of connections for the primary cell
h_nsc = nsc[axis] // 2
# The new number of supercells will then be
nsc[axis] = max(1, int(math.ceil(h_nsc / reps))) * 2 + 1
return self.__class__(cell, nsc=nsc, origo=origo)
def repeat(self, reps, axis):
""" Extend the unit-cell `reps` times along the `axis` lattice vector
Notes
-----
This is *exactly* equivalent to the `tile` routine.
Parameters
----------
reps : int
number of times the unit-cell is repeated along the specified lattice vector
axis : int
the lattice vector along which the repetition is performed
"""
return self.tile(reps, axis)
def cut(self, seps, axis):
""" Cuts the cell into several different sections. """
cell = np.copy(self.cell)
cell[axis, :] /= seps
return self.copy(cell)
def append(self, other, axis):
""" Appends other `SuperCell` to this grid along axis """
cell = np.copy(self.cell)
cell[axis, :] += other.cell[axis, :]
return self.copy(cell)
def prepend(self, other, axis):
""" Prepends other `SuperCell` to this grid along axis
For a `SuperCell` object this is equivalent to `append`.
"""
return self.append(other, axis)
def move(self, v):
""" Appends additional space to the object """
# check which cell vector resembles v the most,
# use that
cell = np.copy(self.cell)
p = np.empty([3], np.float64)
cl = fnorm(cell)
for i in range(3):
p[i] = abs(np.sum(cell[i, :] * v)) / cl[i]
cell[np.argmax(p), :] += v
return self.copy(cell)
translate = move
def center(self, axis=None):
""" Returns center of the `SuperCell`, possibly with respect to an axis """
if axis is None:
return self.cell.sum(0) * 0.5
return self.cell[axis, :] * 0.5
@classmethod
def tocell(cls, *args):
r""" Returns a 3x3 unit-cell dependent on the input
1 argument
a unit-cell along Cartesian coordinates with side-length
equal to the argument.
3 arguments
the diagonal components of a Cartesian unit-cell
6 arguments
the cell parameters give by :math:`a`, :math:`b`, :math:`c`,
:math:`\alpha`, :math:`\beta` and :math:`\gamma` (angles
in degrees).
9 arguments
a 3x3 unit-cell.
Parameters
----------
*args : float
May be either, 1, 3, 6 or 9 elements.
Note that the arguments will be put into an array and flattened
before checking the number of arguments.
Examples
--------
>>> cell_1_1_1 = SuperCell.tocell(1.)
>>> cell_1_2_3 = SuperCell.tocell(1., 2., 3.)
>>> cell_1_2_3 = SuperCell.tocell([1., 2., 3.]) # same as above
"""
# Convert into true array (flattened)
args = _a.asarrayd(args).ravel()
nargs = len(args)
# A square-box
if nargs == 1:
return np.diag([args[0]] * 3)
# Diagonal components
if nargs == 3:
return np.diag(args)
# Cell parameters
if nargs == 6:
cell = _a.zerosd([3, 3])
a = args[0]
b = args[1]
c = args[2]
alpha = args[3]
beta = args[4]
gamma = args[5]
from math import sqrt, cos, sin, pi
pi180 = pi / 180.
cell[0, 0] = a
g = gamma * pi180
cg = cos(g)
sg = sin(g)
cell[1, 0] = b * cg
cell[1, 1] = b * sg
b = beta * pi180
cb = cos(b)
sb = sin(b)
cell[2, 0] = c * cb
a = alpha * pi180
d = (cos(a) - cb * cg) / sg
cell[2, 1] = c * d
cell[2, 2] = c * sqrt(sb ** 2 - d ** 2)
return cell
# A complete cell
if nargs == 9:
return args.copy().reshape(3, 3)
raise ValueError(
"Creating a unit-cell has to have 1, 3 or 6 arguments, please correct.")
def is_orthogonal(self):
""" Returns true if the cell vectors are orthogonal """
# Convert to unit-vector cell
cell = np.copy(self.cell)
cl = fnorm(cell)
cell[0, :] = cell[0, :] / cl[0]
cell[1, :] = cell[1, :] / cl[1]
cell[2, :] = cell[2, :] / cl[2]
i_s = dot3(cell[0, :], cell[1, :]) < 0.001
i_s = dot3(cell[0, :], cell[2, :]) < 0.001 and i_s
i_s = dot3(cell[1, :], cell[2, :]) < 0.001 and i_s
return i_s
def parallel(self, other, axis=(0, 1, 2)):
""" Returns true if the cell vectors are parallel to `other`
Parameters
----------
other : SuperCell
the other object to check whether the axis are parallel
axis : int or array_like
only check the specified axis (default to all)
"""
axis = _a.asarrayi(axis).ravel()
# Convert to unit-vector cell
for i in axis:
a = self.cell[i, :] / fnorm(self.cell[i, :])
b = other.cell[i, :] / fnorm(other.cell[i, :])
if abs(dot3(a, b) - 1) > 0.001:
return False
return True
def angle(self, i, j, rad=False):
""" The angle between two of the cell vectors
Parameters
----------
i : int
the first cell vector
j : int
the second cell vector
rad : bool, optional
whether the returned value is in radians
"""
n = fnorm(self.cell[[i, j], :])
ang = math.acos(dot3(self.cell[i, :], self.cell[j, :]) / (n[0] * n[1]))
if rad:
return ang
return math.degrees(ang)
@staticmethod
def read(sile, *args, **kwargs):
""" Reads the supercell from the `Sile` using ``Sile.read_supercell``
Parameters
----------
sile : Sile, str or pathlib.Path
a `Sile` object which will be used to read the supercell
if it is a string it will create a new sile using `sisl.io.get_sile`.
"""
# This only works because, they *must*
# have been imported previously
from sisl.io import get_sile, BaseSile
if isinstance(sile, BaseSile):
return sile.read_supercell(*args, **kwargs)
else:
with get_sile(sile) as fh:
return fh.read_supercell(*args, **kwargs)
def equal(self, other, tol=1e-4):
""" Check whether two supercell are equivalent
Parameters
----------
tol : float, optional
tolerance value for the cell vectors and origo
"""
if not isinstance(other, (SuperCell, SuperCellChild)):
return False
for tol in [1e-2, 1e-3, 1e-4]:
same = np.allclose(self.cell, other.cell, atol=tol)
same = same and np.allclose(self.nsc, other.nsc)
same = same and np.allclose(self.origo, other.origo, atol=tol)
return same
def __str__(self):
""" Returns a string representation of the object """
# Create format for lattice vectors
s = ',\n '.join(['ABC'[i] + '=[{:.3f}, {:.3f}, {:.3f}]'.format(*self.cell[i]) for i in (0, 1, 2)])
return self.__class__.__name__ + ('{{nsc: [{:} {:} {:}],\n ' + s + ',\n}}').format(*self.nsc)
def __repr__(self):
a, b, c, alpha, beta, gamma = map(lambda r: round(r, 3), self.parameters())
return f"<{self.__module__}.{self.__class__.__name__} a={a}, b={b}, c={c}, α={alpha}, β={beta}, γ={gamma}, nsc={self.nsc}>"
def __eq__(self, other):
""" Equality check """
return self.equal(other)
def __ne__(self, b):
""" In-equality check """
return not (self == b)
# Create pickling routines
def __getstate__(self):
""" Returns the state of this object """
return {'cell': self.cell, 'nsc': self.nsc, 'sc_off': self.sc_off, 'origo': self.origo}
def __setstate__(self, d):
""" Re-create the state of this object """
self.__init__(d['cell'], d['nsc'], d['origo'])
self.sc_off = d['sc_off']
def __plot__(self, axis=None, axes=False, *args, **kwargs):
""" Plot the supercell in a specified ``matplotlib.Axes`` object.
Parameters
----------
axis : array_like, optional
only plot a subset of the axis, defaults to all axis
axes : bool or matplotlib.Axes, optional
the figure axes to plot in (if ``matplotlib.Axes`` object).
If ``True`` it will create a new figure to plot in.
If ``False`` it will try and grap the current figure and the current axes.
"""
# Default dictionary for passing to newly created figures
d = dict()
# Try and default the color and alpha
if 'color' not in kwargs and len(args) == 0:
kwargs['color'] = 'k'
if 'alpha' not in kwargs:
kwargs['alpha'] = 0.5
if axis is None:
axis = [0, 1, 2]
# Ensure we have a new 3D Axes3D
if len(axis) == 3:
d['projection'] = '3d'
axes = plt.get_axes(axes, **d)
# Create vector objects
o = self.origo
v = []
for a in axis:
v.append(np.vstack((o[axis], o[axis] + self.cell[a, axis])))
v = np.array(v)
if axes.__class__.__name__.startswith('Axes3D'):
# We should plot in 3D plots
for vv in v:
axes.plot(vv[:, 0], vv[:, 1], vv[:, 2], *args, **kwargs)
v0, v1 = v[0], v[1] - o
axes.plot(v0[1, 0] + v1[:, 0], v0[1, 1] + v1[:, 1], v0[1, 2] + v1[:, 2], *args, **kwargs)
axes.set_zlabel('Ang')
else:
for vv in v:
axes.plot(vv[:, 0], vv[:, 1], *args, **kwargs)
v0, v1 = v[0], v[1] - o[axis]
axes.plot(v0[1, 0] + v1[:, 0], v0[1, 1] + v1[:, 1], *args, **kwargs)
axes.plot(v1[1, 0] + v0[:, 0], v1[1, 1] + v0[:, 1], *args, **kwargs)
axes.set_xlabel('Ang')
axes.set_ylabel('Ang')
return axes
class SuperCellChild:
""" Class to be inherited by using the ``self.sc`` as a `SuperCell` object
Initialize by a `SuperCell` object and get access to several different
routines directly related to the `SuperCell` class.
"""
def set_nsc(self, *args, **kwargs):
""" Set the number of super-cells in the `SuperCell` object
See `set_nsc` for allowed parameters.
See Also
--------
SuperCell.set_nsc : the underlying called method
"""
self.sc.set_nsc(*args, **kwargs)
def set_supercell(self, sc):
""" Overwrites the local supercell """
if sc is None:
# Default supercell is a simple
# 1x1x1 unit-cell
self.sc = SuperCell([1., 1., 1.])
elif isinstance(sc, SuperCell):
self.sc = sc
elif isinstance(sc, SuperCellChild):
self.sc = sc.sc
else:
# The supercell is given as a cell
self.sc = SuperCell(sc)
# Loop over attributes in this class
# if it inherits SuperCellChild, we call
# set_sc on that too.
# Sadly, getattr fails for @property methods
# which forces us to use try ... except
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for a in dir(self):
try:
if isinstance(getattr(self, a), SuperCellChild):
getattr(self, a).set_supercell(self.sc)
except:
pass
set_sc = set_supercell
@property
def volume(self):
""" Returns the inherent `SuperCell` objects `vol` """
return self.sc.volume
def area(self, ax0, ax1):
""" Calculate the area spanned by the two axis `ax0` and `ax1` """
return (cross3(self.sc.cell[ax0, :], self.sc.cell[ax1, :]) ** 2).sum() ** 0.5
@property
def cell(self):
""" Returns the inherent `SuperCell` objects `cell` """
return self.sc.cell
@property
def icell(self):
""" Returns the inherent `SuperCell` objects `icell` """
return self.sc.icell
@property
def rcell(self):
""" Returns the inherent `SuperCell` objects `rcell` """
return self.sc.rcell
@property
def origo(self):
""" Returns the inherent `SuperCell` objects `origo` """
return self.sc.origo
@property
def n_s(self):
""" Returns the inherent `SuperCell` objects `n_s` """
return self.sc.n_s
@property
def nsc(self):
""" Returns the inherent `SuperCell` objects `nsc` """
return self.sc.nsc
@property
def sc_off(self):
""" Returns the inherent `SuperCell` objects `sc_off` """
return self.sc.sc_off
@property
def isc_off(self):
""" Returns the inherent `SuperCell` objects `isc_off` """
return self.sc.isc_off
def add_vacuum(self, vacuum, axis):
""" Add vacuum along the `axis` lattice vector
Parameters
----------
vacuum : float
amount of vacuum added, in Ang
axis : int
the lattice vector to add vacuum along
"""
copy = self.copy()
copy.set_supercell(self.sc.add_vacuum(vacuum, axis))
return copy
def _fill(self, non_filled, dtype=None):
return self.sc._fill(non_filled, dtype)
def _fill_sc(self, supercell_index):
return self.sc._fill_sc(supercell_index)
def sc_index(self, *args, **kwargs):
""" Call local `SuperCell` object `sc_index` function """
return self.sc.sc_index(*args, **kwargs)
def is_orthogonal(self):
""" Return true if all cell vectors are linearly independent"""
return self.sc.is_orthogonal()
| lgpl-3.0 |
nhenezi/kuma | vendor/lib/python/south/migration/migrators.py | 22 | 12108 | from __future__ import print_function
from copy import copy, deepcopy
import datetime
import inspect
import sys
import traceback
from django.core.management import call_command
from django.core.management.commands import loaddata
from django.db import models
import south.db
from south import exceptions
from south.db import DEFAULT_DB_ALIAS
from south.models import MigrationHistory
from south.signals import ran_migration
from south.utils.py3 import StringIO
class Migrator(object):
def __init__(self, verbosity=0, interactive=False):
self.verbosity = int(verbosity)
self.interactive = bool(interactive)
@staticmethod
def title(target):
raise NotImplementedError()
def print_title(self, target):
if self.verbosity:
print(self.title(target))
@staticmethod
def status(target):
raise NotImplementedError()
def print_status(self, migration):
status = self.status(migration)
if self.verbosity and status:
print(status)
@staticmethod
def orm(migration):
raise NotImplementedError()
def backwards(self, migration):
return self._wrap_direction(migration.backwards(), migration.prev_orm())
def direction(self, migration):
raise NotImplementedError()
@staticmethod
def _wrap_direction(direction, orm):
args = inspect.getargspec(direction)
if len(args[0]) == 1:
# Old migration, no ORM should be passed in
return direction
return (lambda: direction(orm))
@staticmethod
def record(migration, database):
raise NotImplementedError()
def run_migration_error(self, migration, extra_info=''):
return (
' ! Error found during real run of migration! Aborting.\n'
'\n'
' ! Since you have a database that does not support running\n'
' ! schema-altering statements in transactions, we have had \n'
' ! to leave it in an interim state between migrations.\n'
'%s\n'
' ! The South developers regret this has happened, and would\n'
' ! like to gently persuade you to consider a slightly\n'
' ! easier-to-deal-with DBMS (one that supports DDL transactions)\n'
' ! NOTE: The error which caused the migration to fail is further up.'
) % extra_info
def run_migration(self, migration, database):
migration_function = self.direction(migration)
south.db.db.start_transaction()
try:
migration_function()
south.db.db.execute_deferred_sql()
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
# record us as having done this in the same transaction,
# since we're not in a dry run
self.record(migration, database)
except:
south.db.db.rollback_transaction()
if not south.db.db.has_ddl_transactions:
print(self.run_migration_error(migration))
print("Error in migration: %s" % migration)
raise
else:
try:
south.db.db.commit_transaction()
except:
print("Error during commit in migration: %s" % migration)
raise
def run(self, migration, database):
# Get the correct ORM.
south.db.db.current_orm = self.orm(migration)
# If we're not already in a dry run, and the database doesn't support
# running DDL inside a transaction, *cough*MySQL*cough* then do a dry
# run first.
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
if not south.db.db.has_ddl_transactions:
dry_run = DryRunMigrator(migrator=self, ignore_fail=False)
dry_run.run_migration(migration, database)
return self.run_migration(migration, database)
def send_ran_migration(self, migration):
ran_migration.send(None,
app=migration.app_label(),
migration=migration,
method=self.__class__.__name__.lower())
def migrate(self, migration, database):
"""
Runs the specified migration forwards/backwards, in order.
"""
app = migration.migrations._migrations
migration_name = migration.name()
self.print_status(migration)
result = self.run(migration, database)
self.send_ran_migration(migration)
return result
def migrate_many(self, target, migrations, database):
raise NotImplementedError()
class MigratorWrapper(object):
def __init__(self, migrator, *args, **kwargs):
self._migrator = copy(migrator)
attributes = dict([(k, getattr(self, k))
for k in self.__class__.__dict__
if not k.startswith('__')])
self._migrator.__dict__.update(attributes)
self._migrator.__dict__['_wrapper'] = self
def __getattr__(self, name):
return getattr(self._migrator, name)
class DryRunMigrator(MigratorWrapper):
def __init__(self, ignore_fail=True, *args, **kwargs):
super(DryRunMigrator, self).__init__(*args, **kwargs)
self._ignore_fail = ignore_fail
def _run_migration(self, migration):
if migration.no_dry_run():
if self.verbosity:
print(" - Migration '%s' is marked for no-dry-run." % migration)
return
south.db.db.dry_run = True
# preserve the constraint cache as it can be mutated by the dry run
constraint_cache = deepcopy(south.db.db._constraint_cache)
if self._ignore_fail:
south.db.db.debug, old_debug = False, south.db.db.debug
pending_creates = south.db.db.get_pending_creates()
south.db.db.start_transaction()
migration_function = self.direction(migration)
try:
try:
migration_function()
south.db.db.execute_deferred_sql()
except:
raise exceptions.FailedDryRun(migration, sys.exc_info())
finally:
south.db.db.rollback_transactions_dry_run()
if self._ignore_fail:
south.db.db.debug = old_debug
south.db.db.clear_run_data(pending_creates)
south.db.db.dry_run = False
# restore the preserved constraint cache from before dry run was
# executed
south.db.db._constraint_cache = constraint_cache
def run_migration(self, migration, database):
try:
self._run_migration(migration)
except exceptions.FailedDryRun:
if self._ignore_fail:
return False
raise
def send_ran_migration(self, *args, **kwargs):
pass
class FakeMigrator(MigratorWrapper):
def run(self, migration, database):
# Don't actually run, just record as if ran
self.record(migration, database)
if self.verbosity:
print(' (faked)')
def send_ran_migration(self, *args, **kwargs):
pass
class LoadInitialDataMigrator(MigratorWrapper):
def load_initial_data(self, target, db='default'):
if target is None or target != target.migrations[-1]:
return
# Load initial data, if we ended up at target
if self.verbosity:
print(" - Loading initial data for %s." % target.app_label())
# Override Django's get_apps call temporarily to only load from the
# current app
old_get_apps = models.get_apps
new_get_apps = lambda: [models.get_app(target.app_label())]
models.get_apps = new_get_apps
loaddata.get_apps = new_get_apps
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db)
finally:
models.get_apps = old_get_apps
loaddata.get_apps = old_get_apps
def migrate_many(self, target, migrations, database):
migrator = self._migrator
result = migrator.__class__.migrate_many(migrator, target, migrations, database)
if result:
self.load_initial_data(target, db=database)
return True
class Forwards(Migrator):
"""
Runs the specified migration forwards, in order.
"""
torun = 'forwards'
@staticmethod
def title(target):
if target is not None:
return " - Migrating forwards to %s." % target.name()
else:
assert False, "You cannot migrate forwards to zero."
@staticmethod
def status(migration):
return ' > %s' % migration
@staticmethod
def orm(migration):
return migration.orm()
def forwards(self, migration):
return self._wrap_direction(migration.forwards(), migration.orm())
direction = forwards
@staticmethod
def record(migration, database):
# Record us as having done this
record = MigrationHistory.for_migration(migration, database)
try:
from django.utils.timezone import now
record.applied = now()
except ImportError:
record.applied = datetime.datetime.utcnow()
if database != DEFAULT_DB_ALIAS:
record.save(using=database)
else:
# Django 1.1 and below always go down this branch.
record.save()
def format_backwards(self, migration):
if migration.no_dry_run():
return " (migration cannot be dry-run; cannot discover commands)"
old_debug, old_dry_run = south.db.db.debug, south.db.db.dry_run
south.db.db.debug = south.db.db.dry_run = True
stdout = sys.stdout
sys.stdout = StringIO()
try:
try:
self.backwards(migration)()
return sys.stdout.getvalue()
except:
raise
finally:
south.db.db.debug, south.db.db.dry_run = old_debug, old_dry_run
sys.stdout = stdout
def run_migration_error(self, migration, extra_info=''):
extra_info = ('\n'
'! You *might* be able to recover with:'
'%s'
'%s' %
(self.format_backwards(migration), extra_info))
return super(Forwards, self).run_migration_error(migration, extra_info)
def migrate_many(self, target, migrations, database):
try:
for migration in migrations:
result = self.migrate(migration, database)
if result is False: # The migrations errored, but nicely.
return False
finally:
# Call any pending post_syncdb signals
south.db.db.send_pending_create_signals(verbosity=self.verbosity,
interactive=self.interactive)
return True
class Backwards(Migrator):
"""
Runs the specified migration backwards, in order.
"""
torun = 'backwards'
@staticmethod
def title(target):
if target is None:
return " - Migrating backwards to zero state."
else:
return " - Migrating backwards to just after %s." % target.name()
@staticmethod
def status(migration):
return ' < %s' % migration
@staticmethod
def orm(migration):
return migration.prev_orm()
direction = Migrator.backwards
@staticmethod
def record(migration, database):
# Record us as having not done this
record = MigrationHistory.for_migration(migration, database)
if record.id is not None:
if database != DEFAULT_DB_ALIAS:
record.delete(using=database)
else:
# Django 1.1 always goes down here
record.delete()
def migrate_many(self, target, migrations, database):
for migration in migrations:
self.migrate(migration, database)
return True
| mpl-2.0 |
michaelhowden/eden | modules/templates/RGIMS/config.py | 6 | 9764 | # -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template settings for RGIMS:
Relief Goods and Inventory Management System
http://eden.sahanafoundation.org/wiki/Deployments/Philippines/RGIMS
"""
T = current.T
settings.base.system_name = "Relief Goods Inventory & Monitoring System"
settings.base.system_name_short = "RGIMS"
# Pre-Populate
settings.base.prepopulate = ("RGIMS", "default/users")
# Theme
settings.base.theme = "RGIMS"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "+0800"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Finance settings
settings.fin.currencies = {
"USD" : T("United States Dollars"),
"EUR" : T("Euros"),
"PHP" : T("Philippine Pesos")
}
settings.fin.currency_default = "PHP"
# Security Policy
settings.security.policy = 6 # Warehouse-specific restrictions
settings.security.map = True
def rgims_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
if tablename not in ("inv_recv", "inv_send"):
# Normal lookup
return 0
# For these tables we need to assign the site_id's realm not organisation_id's
db = current.db
stable = db.org_site
record = db(stable.site_id == row.site_id).select(stable.realm_entity,
limitby=(0, 1)
).first()
if record:
return record.realm_entity
# Normal lookup
return 0
settings.auth.realm_entity = rgims_realm_entity
# Enable this for a UN-style deployment
settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
settings.ui.camp = True
# Requests
settings.req.use_commit = False
settings.req.req_form_name = "Request Issue Form"
settings.req.req_shortname = "RIS"
# Restrict the type of requests that can be made, valid values in the
# list are ["Stock", "People", "Other"]. If this is commented out then
# all types will be valid.
settings.req.req_type = ["Stock"]
# Inventory Management
settings.inv.send_form_name = "Tally Out Sheet"
settings.inv.send_short_name = "TOS"
settings.inv.send_ref_field_name = "Tally Out Number"
settings.inv.recv_form_name = "Acknowledgement Receipt for Donations Received Form"
settings.inv.recv_shortname = "ARDR"
settings.inv.recv_type = {
#0: T("-"),
#1: T("Other Warehouse"),
32: T("Donation"),
33: T("Foreign Donation"),
34: T("Local Purchases"),
35: T("Confiscated Goods from Bureau Of Customs")
}
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = 10,
)),
#("cms", Storage(
# name_nice = T("Content Management"),
# #description = "Content Management System",
# restricted = True,
# module_type = 10,
# )),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 1
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# #description = "Ordering & Purchasing of Goods & Services",
# restricted = True,
# module_type = 10
# )),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 10,
)),
# Vehicle depends on Assets
#("vehicle", Storage(
# name_nice = T("Vehicles"),
# #description = "Manage Vehicles",
# restricted = True,
# module_type = 10,
# )),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 2,
)),
#("project", Storage(
# name_nice = T("Projects"),
# #description = "Tracking of Projects, Activities and Tasks",
# restricted = True,
# module_type = 10
# )),
#("survey", Storage(
# name_nice = T("Surveys"),
# #description = "Create, enter, and manage surveys.",
# restricted = True,
# module_type = 10,
# )),
#("cr", Storage(
# name_nice = T("Shelters"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# restricted = True,
# module_type = 10
# )),
#("hms", Storage(
# name_nice = T("Hospitals"),
# #description = "Helps to monitor status of hospitals",
# restricted = True,
# module_type = 10
# )),
#("irs", Storage(
# name_nice = T("Incidents"),
# #description = "Incident Reporting System",
# restricted = False,
# module_type = 10
# )),
])
# END =========================================================================
| mit |
DuCorey/bokeh | bokeh/themes/theme.py | 8 | 6112 | ''' Provide a ``Theme`` class for specifying new default values for Bokeh
:class:`~bokeh.model.Model` properties.
'''
from __future__ import absolute_import, print_function
import yaml
from ..core.has_props import HasProps
# whenever we cache that there's nothing themed for a class, we
# use this same dict instance, so we don't have a zillion empty
# dicts in our caches.
_empty_dict = dict()
# Note: in DirectoryHandler and in general we assume this is an
# immutable object, because we share it among sessions and we
# don't monitor it for changes. If you make this mutable by adding
# any kind of setter, you could have to refactor some other code.
class Theme(object):
''' Provide new default values for Bokeh models.
Bokeh Model properties all have some built-in default value. If a property
has not been explicitly set (e.g. ``m.foo = 10``) then accessing the
property with return the default value. It may be useful for users to be
able to specify a different set of default values than the built-in
default. The ``Theme`` class allows collections of custom default values
to be easily applied to Bokeh documents.
The ``Theme`` class can be constructed either from a YAML file or from a
JSON dict (but not both). The data should have a top level ``attrs``
key, followed by
Examples of both formats are shown below.
Args:
filename (str, optional) : path to a YAML theme file
json (str, optional) : a JSON dictionary specifying theme values
Raises:
ValueError
If neither ``filename`` or ``json`` is supplied.
Examples:
Themes are specified by providing a top-level key ``attrs`` which
has blocks for Model types to be themed. Each block has keys and
values that specify the new property defaults for that type.
Here is an example theme in YAML format that sets various visual
properties for all figures, grids, and titles:
.. code-block:: yaml
attrs:
Figure:
background_fill_color: '#2F2F2F'
border_fill_color: '#2F2F2F'
outline_line_color: '#444444'
Grid:
grid_line_dash: [6, 4]
grid_line_alpha: .3
Title:
text_color: "white"
Here is the same theme, in JSON format:
.. code-block:: python
{
'attrs' : {
'Figure' : {
'background_fill_color': '#2F2F2F',
'border_fill_color': '#2F2F2F',
'outline_line_color': '#444444',
},
'Grid': {
'grid_line_dash': [6, 4]',
'grid_line_alpha': .3,
},
'Title': {
'text_color': 'white'
}
}
'''
def __init__(self, filename=None, json=None):
if (filename is not None) and (json is not None):
raise ValueError("Theme should be constructed from a file or from json not both")
if filename is not None:
f = open(filename)
try:
json = yaml.load(f)
# empty docs result in None rather than {}, fix it.
if json is None:
json = {}
finally:
f.close()
if json is None:
raise ValueError("Theme requires json or a filename to construct")
self._json = json
if 'attrs' not in self._json:
self._json['attrs'] = {}
if not isinstance(self._json['attrs'], dict):
raise ValueError("theme problem: attrs field should be a dictionary of class names, not %r" % (self._json['attrs']))
for key, value in self._json['attrs'].items():
if not isinstance(value, dict):
raise ValueError("theme problem: attrs.%s should be a dictionary of properties, not %r" % (key, value))
self._line_defaults = self._json.get('line_defaults', _empty_dict)
self._fill_defaults = self._json.get('fill_defaults', _empty_dict)
self._text_defaults = self._json.get('text_defaults', _empty_dict)
# mapping from class name to the full set of properties
# (including those merged in from base classes) for that
# class.
self._by_class_cache = {}
def _add_glyph_defaults(self, cls, props):
from ..models.glyphs import Glyph
if issubclass(cls, Glyph):
if hasattr(cls, "line_alpha"):
props.update(self._line_defaults)
if hasattr(cls, "fill_alpha"):
props.update(self._fill_defaults)
if hasattr(cls, "text_alpha"):
props.update(self._text_defaults)
def _for_class(self, cls):
if cls.__name__ not in self._by_class_cache:
attrs = self._json['attrs']
combined = {}
# we go in reverse order so that subclass props override base class
for base in cls.__mro__[-2::-1]:
if not issubclass(base, HasProps):
continue
self._add_glyph_defaults(base, combined)
combined.update(attrs.get(base.__name__, _empty_dict))
if len(combined) == 0:
combined = _empty_dict
self._by_class_cache[cls.__name__] = combined
return self._by_class_cache[cls.__name__]
def apply_to_model(self, model):
''' Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the :class:`~bokeh.document.Document` the model is a part of.
'''
model.apply_theme(self._for_class(model.__class__))
# a little paranoia because it would be Bad(tm) to mess
# this up... would be nicer if python had a way to freeze
# the dict.
if len(_empty_dict) > 0:
raise RuntimeError("Somebody put stuff in _empty_dict")
| bsd-3-clause |
kartikluke/chammakbot | vendored/urllib3/util/request.py | 205 | 3705 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b, integer_types
from ..exceptions import UnrewindableBodyError
ACCEPT_ENCODING = 'gzip,deflate'
_FAILEDTELL = object()
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
def set_file_position(body, pos):
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, 'tell', None) is not None:
try:
pos = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body, body_pos):
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, 'seek', None)
if body_seek is not None and isinstance(body_pos, integer_types):
try:
body_seek(body_pos)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect/retry.")
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError("Unable to record file position for rewinding "
"request body during a redirect/retry.")
else:
raise ValueError("body_pos must be of type integer, "
"instead it was %s." % type(body_pos))
| apache-2.0 |
ronzohan/namebench | nb_third_party/dns/flags.py | 250 | 2687 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Message Flags."""
# Standard DNS flags
QR = 0x8000
AA = 0x0400
TC = 0x0200
RD = 0x0100
RA = 0x0080
AD = 0x0020
CD = 0x0010
# EDNS flags
DO = 0x8000
_by_text = {
'QR' : QR,
'AA' : AA,
'TC' : TC,
'RD' : RD,
'RA' : RA,
'AD' : AD,
'CD' : CD
}
_edns_by_text = {
'DO' : DO
}
# We construct the inverse mappings programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mappings not to be true inverses.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
_edns_by_value = dict([(y, x) for x, y in _edns_by_text.iteritems()])
def _order_flags(table):
order = list(table.iteritems())
order.sort()
order.reverse()
return order
_flags_order = _order_flags(_by_value)
_edns_flags_order = _order_flags(_edns_by_value)
def _from_text(text, table):
flags = 0
tokens = text.split()
for t in tokens:
flags = flags | table[t.upper()]
return flags
def _to_text(flags, table, order):
text_flags = []
for k, v in order:
if flags & k != 0:
text_flags.append(v)
return ' '.join(text_flags)
def from_text(text):
"""Convert a space-separated list of flag text values into a flags
value.
@rtype: int"""
return _from_text(text, _by_text)
def to_text(flags):
"""Convert a flags value into a space-separated list of flag text
values.
@rtype: string"""
return _to_text(flags, _by_value, _flags_order)
def edns_from_text(text):
"""Convert a space-separated list of EDNS flag text values into a EDNS
flags value.
@rtype: int"""
return _from_text(text, _edns_by_text)
def edns_to_text(flags):
"""Convert an EDNS flags value into a space-separated list of EDNS flag
text values.
@rtype: string"""
return _to_text(flags, _edns_by_value, _edns_flags_order)
| apache-2.0 |
telefonicaid/fiware-paas | automatization_scripts/tools/productinstance.py | 6 | 1587 | # -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = 'henar'
from xml.etree.ElementTree import Element, SubElement, tostring
class Attribute:
def __init__(self, att_name, att_value):
self.key = att_name
self.value = att_value
class ProductInstance:
def __init__(self, hostname, status, ip, product_release):
self.hostname = hostname
self.status = status
self.ip = ip
self.product_release = product_release
def add_attribute(self, attribute):
self.attributes.append(attribute)
def to_string(self):
var = self.hostname + "\t" + self.status + '\t' + self.ip + '\t' + self.product_release.name + '\t' + self.product_release.version
print var
##
## get_images - Obtiene la lista de imagenes --- Detalle images/detail
##
| apache-2.0 |
camptocamp/odoo | addons/mrp_repair/wizard/__init__.py | 445 | 1096 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cancel_repair
import make_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tensorflow/tensorflow | tensorflow/python/keras/layers/preprocessing/category_encoding.py | 2 | 10919 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras CategoryEncoding preprocessing layer."""
# pylint: disable=g-classes-have-attributes
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bincount_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
INT = "int"
ONE_HOT = "one_hot"
MULTI_HOT = "multi_hot"
COUNT = "count"
@keras_export("keras.layers.experimental.preprocessing.CategoryEncoding")
class CategoryEncoding(base_layer.Layer):
"""Category encoding layer.
This layer provides options for condensing data into a categorical encoding
when the total number of tokens are known in advance. It accepts integer
values as inputs and outputs a dense representation (one sample = 1-index
tensor of float values representing data about the sample's tokens) of those
inputs. For integer inputs where the total number of tokens is not known, see
`tf.keras.layers.experimental.preprocessing.IntegerLookup`.
Examples:
**One-hot encoding data**
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... num_tokens=4, output_mode="one_hot")
>>> layer([3, 2, 0, 1])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[0., 0., 0., 1.],
[0., 0., 1., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]], dtype=float32)>
**Multi-hot encoding data**
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... num_tokens=4, output_mode="multi_hot")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)>
**Using weighted inputs in `"count"` mode**
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... num_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
<tf.Tensor: shape=(4, 4), dtype=float64, numpy=
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]])>
Args:
num_tokens: The total number of tokens the layer should support. All inputs
to the layer must integers in the range 0 <= value < num_tokens or an
error will be thrown.
output_mode: Specification for the output of the layer.
Defaults to `"multi_hot"`. Values can be `"one_hot"`, `"multi_hot"` or
`"count"`, configuring the layer as follows:
- `"one_hot"`: Encodes each individual element in the input into an
array of `num_tokens` size, containing a 1 at the element index. If
the last dimension is size 1, will encode on that dimension. If the
last dimension is not size 1, will append a new dimension for the
encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
of `num_tokens` size, containing a 1 for each vocabulary term present
in the sample. Treats the last dimension as the sample dimension, if
input shape is (..., sample_length), output shape will be
(..., num_tokens).
- `"count"`: As `"multi_hot"`, but the int array contains a count of the
number of times the token at that index appeared in the sample.
sparse: Boolean. If true, returns a `SparseTensor` instead of a dense
`Tensor`. Defaults to `False`.
Call arguments:
inputs: A 2D tensor `(samples, timesteps)`.
count_weights: A 2D tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode. Not used in
`"multi_hot"` mode.
"""
def __init__(self,
num_tokens=None,
output_mode=MULTI_HOT,
sparse=False,
**kwargs):
# max_tokens is an old name for the num_tokens arg we continue to support
# because of usage.
if "max_tokens" in kwargs:
logging.warning(
"max_tokens is deprecated, please use num_tokens instead.")
num_tokens = kwargs["max_tokens"]
del kwargs["max_tokens"]
super(CategoryEncoding, self).__init__(**kwargs)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = MULTI_HOT
# 'output_mode' must be one of (COUNT, ONE_HOT, MULTI_HOT)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(COUNT, ONE_HOT, MULTI_HOT),
layer_name="CategoryEncoding",
arg_name="output_mode")
if num_tokens is None:
raise ValueError("num_tokens must be set to use this layer. If the "
"number of tokens is not known beforehand, use the "
"IntegerLookup layer instead.")
if num_tokens < 1:
raise ValueError("num_tokens must be >= 1.")
self.num_tokens = num_tokens
self.output_mode = output_mode
self.sparse = sparse
def compute_output_shape(self, input_shape):
if not input_shape:
return tensor_shape.TensorShape([self.num_tokens])
if self.output_mode == ONE_HOT and input_shape[-1] != 1:
return tensor_shape.TensorShape(input_shape + [self.num_tokens])
else:
return tensor_shape.TensorShape(input_shape[:-1] + [self.num_tokens])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
if self.sparse:
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=dtypes.int64)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.int64)
def get_config(self):
config = {
"num_tokens": self.num_tokens,
"output_mode": self.output_mode,
"sparse": self.sparse,
}
base_config = super(CategoryEncoding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, count_weights=None):
if isinstance(inputs, (list, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
def expand_dims(inputs, axis):
if tf_utils.is_sparse(inputs):
return sparse_ops.sparse_expand_dims(inputs, axis)
else:
return array_ops.expand_dims(inputs, axis)
original_shape = inputs.shape
# In all cases, we should uprank scalar input to a single sample.
if inputs.shape.rank == 0:
inputs = expand_dims(inputs, -1)
# One hot will unprank only if the final output dimension is not already 1.
if self.output_mode == ONE_HOT:
if inputs.shape[-1] != 1:
inputs = expand_dims(inputs, -1)
# TODO(b/190445202): remove output rank restriction.
if inputs.shape.rank > 2:
raise ValueError(
"Received input shape {}, which would result in output rank {}. "
"Currently only outputs up to rank 2 are supported.".format(
original_shape, inputs.shape.rank))
if count_weights is not None and self.output_mode != COUNT:
raise ValueError(
"`count_weights` is not used when `output_mode` is not `'count'`. "
"Received `count_weights={}`.".format(count_weights))
out_depth = self.num_tokens
binary_output = self.output_mode in (MULTI_HOT, ONE_HOT)
if isinstance(inputs, sparse_tensor.SparseTensor):
max_value = math_ops.reduce_max(inputs.values)
min_value = math_ops.reduce_min(inputs.values)
else:
max_value = math_ops.reduce_max(inputs)
min_value = math_ops.reduce_min(inputs)
condition = math_ops.logical_and(
math_ops.greater(
math_ops.cast(out_depth, max_value.dtype), max_value),
math_ops.greater_equal(
min_value, math_ops.cast(0, min_value.dtype)))
assertion = control_flow_ops.Assert(condition, [
"Input values must be in the range 0 <= values < num_tokens"
" with num_tokens={}".format(out_depth)
])
with ops.control_dependencies([assertion]):
if self.sparse:
return sparse_bincount(inputs, out_depth, binary_output,
count_weights)
else:
return dense_bincount(inputs, out_depth, binary_output,
count_weights)
def sparse_bincount(inputs, out_depth, binary_output, count_weights=None):
"""Apply binary or count encoding to an input and return a sparse tensor."""
result = bincount_ops.sparse_bincount(
inputs,
weights=count_weights,
minlength=out_depth,
maxlength=out_depth,
axis=-1,
binary_output=binary_output)
if inputs.shape.rank == 1:
output_shape = (out_depth,)
else:
result = math_ops.cast(result, backend.floatx())
batch_size = array_ops.shape(result)[0]
output_shape = (batch_size, out_depth)
result = sparse_tensor.SparseTensor(
indices=result.indices,
values=result.values,
dense_shape=output_shape)
return result
def dense_bincount(inputs, out_depth, binary_output, count_weights=None):
"""Apply binary or count encoding to an input."""
result = bincount_ops.bincount(
inputs,
weights=count_weights,
minlength=out_depth,
maxlength=out_depth,
dtype=backend.floatx(),
axis=-1,
binary_output=binary_output)
if inputs.shape.rank == 1:
result.set_shape(tensor_shape.TensorShape((out_depth,)))
else:
batch_size = inputs.shape.as_list()[0]
result.set_shape(tensor_shape.TensorShape((batch_size, out_depth)))
return result
| apache-2.0 |
cible/djangotribune | setup.py | 1 | 1046 | from setuptools import setup, find_packages
setup(
name='djangotribune',
version=__import__('djangotribune').__version__,
description=__import__('djangotribune').__doc__,
long_description=open('README.rst').read(),
author='David Thenon',
author_email='sveetch@gmail.com',
url='http://pypi.python.org/pypi/djangotribune',
license='MIT',
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Communications :: Chat',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'texttable==0.8.4',
'crispy-forms-foundation>=0.2.3.1',
'pytz',
],
include_package_data=True,
zip_safe=False
)
| mit |
meduz/scikit-learn | sklearn/manifold/tests/test_mds.py | 99 | 1873 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.manifold import mds
from sklearn.utils.testing import assert_raises
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
Tsjerk/Martinize | martinize/cli.py | 1 | 10450 | # MARTINIZE
# A simple, versatile tool for coarse-graining molecular systems
# Copyright (C) 2017 Tsjerk A. Wassenaar and contributors
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import absolute_import
import sys
import os
import logging
import inspect
import simopt
from simopt import MULTI, MA
from . import core
from .converters import atom, atoms, Link
from .ForceFields.forcefield import FORCE_FIELD_COLLECTION, JSONForceField
# Option list
OPTIONS = simopt.Options([
# level opt attribute type num default flags description
"""
Input/output related options
""",
(0, "-v", "verbose", str, 1, None, 0, "Verbosity level"),
(0, "-f", "input", str, 1, None, 0, "Input GRO or PDB file"),
(0, "-o", "outtop", str, 1, "martini.top", 0, "Output topology (TOP)"),
(0, "-x", "outstruc", str, 1, None, 0, "Output coarse grained structure (PDB)"),
(0, "-n", "index", str, 1, None, 0, "Output index file with CG (and multiscale) beads."),
(1, "-nmap", "mapping", str, 1, None, 0, "Output index file containing per bead mapping."),
(0, "-v", "verbose", bool, 0, False, 0, "Verbose. Be load and noisy."),
(1, "-ss", "secstruc", str, 1, None, 0, "Secondary structure (File or string)"),
(1, "-ssc", "sscutoff", float, 1, 0.5, 0, "Cutoff fraction for ss in case of ambiguity (default: 0.5)."),
(0, "-dssp", "dsspexe", str, 1, None, 0, "DSSP executable for determining structure"),
# ("-pymol", "pymolexe", str, 1, None, "PyMOL executable for determining structure"),
(0, "-collagen", "collagen", bool, 0, False, 0, "Use collagen parameters"),
(1, "-his", "sethischarge", bool, 0, False, 0, "Interactively set the charge of each His-residue."),
(0, "-nt", "neutraltermini", bool, 0, False, 0, "Set neutral termini (charged is default)"),
(1, "-cb", "chargedbreaks", bool, 0, False, 0, "Set charges at chain breaks (neutral is default)"),
(0, "-cys", "cystines", str, 1, None, MULTI, "Disulphide bond (+)"),
(1, "-link", "links", Link, 1, None, MULTI, "Link (+)"),
(1, "-merge", "merges", str, 1, None, MULTI, "Merge chains: e.g. -merge A,B,C (+)"),
(0, "-name", "name", str, 1, None, 0, "Moleculetype name"),
(1, "-p", "posres", str, 1, 'None', 0, "Output position restraints (None/All/Backbone) (default: None)"),
(1, "-pf", "posrefc", float, 1, 1000, 0, "Position restraints force constant (default: 1000 kJ/mol/nm^2)"),
(1, "-ed", "extdih", bool, 0, False, 0, "Use dihedrals for extended regions rather than elastic bonds)"),
(1, "-sep", "separate", bool, 0, False, 0, "Write separate topologies for identical chains."),
(0, "-ff", "forcefield", str, 1, 'martini22', 0, "Which forcefield to use"),
# Fij = Fc exp( -a (rij - lo)**p )
(1, "-elastic", "elastic", bool, 0, False, 0, "Write elastic bonds"),
(1, "-ef", "elastic_fc", float, 1, 500, 0, "Elastic bond force constant Fc"),
(1, "-el", "ellowerbound", float, 1, 0, 0, "Elastic bond lower cutoff: F = Fc if rij < lo"),
(1, "-eu", "elupperbound", float, 1, 0.90, 0, "Elastic bond upper cutoff: F = 0 if rij > up"),
(1, "-ea", "eldecay", float, 1, 0, 0, "Elastic bond decay factor a"),
(1, "-ep", "elpower", float, 1, 1, 0, "Elastic bond decay power p"),
(1, "-em", "elminforce", float, 1, 0, 0, "Remove elastic bonds with force constant lower than this"),
(1, "-eb", "elbeads", str, 1, 'BB', 0, "Comma separated list of bead names for elastic bonds"),
# ("-hetatm", "hetatm", bool, 0, False, "Include HETATM records from PDB file (Use with care!)"),
(1, "-multi", "multi", str, 1, None, MULTI, "Chain to be set up for multiscaling (+)"),
])
class MartinizeException(BaseException): pass
def update_options(options):
options["Version"] = ""
if options['forcefield'].lower() in FORCE_FIELD_COLLECTION:
options['ForceField'] = FORCE_FIELD_COLLECTION[options['forcefield'].lower()]()
elif os.path.isfile(options['forcefield']):
options['ForceField'] = JSONForceField(options['forcefield'])
else:
message = "Forcefield '{}' can not be loaded.".format(options['forcefield'])
logging.error(message)
raise MartinizeException(message)
# Process the raw options from the command line
# Boolean options are set to more intuitive variables
options['RetainHETATM'] = False # options['-hetatm']
options['MixedChains'] = False # options['-mixed']
options['elbeads'] = options['elbeads'].split(',')
options['posres'] = [i.lower() for i in options['posres'].split(",")]
if "backbone" in options['posres']:
options['posres'].append("BB")
if "none" in options['posres']:
options['posres'] = []
if options['ForceField'].ElasticNetwork:
# Some forcefields, like elnedyn, always use an elatic network.
# This is set in the forcefield file, with the parameter ElasticNetwork.
options['elastic'] = True
# Merges, links and cystines
options['merges'] = "all" in options['merges'] and ["all"] or [i.split(",") for i in options['merges']]
# Cystines
# This should be done for all special bonds listed in the _special_ dictionary
CystineCheckBonds = False # By default, do not detect cystine bridges
CystineMaxDist2 = (10*0.22)**2 # Maximum distance (A) for detection of SS bonds
for i in options['cystines']:
if i.lower() == "auto":
CystineCheckBonds = True
elif i.replace(".", "").isdigit():
CystineCheckBonds = True
CystineMaxDist2 = (10*float(i))**2
else:
# This item should be a pair of cysteines
cysA, cysB = [atom(j) for j in i.split(",")]
# Internally we handle the residue number shifted by ord(' ')<<20.
# We have to add this to the cys-residue numbers given here as well.
constant = 32 << 20
options.links.append(Link(a=("SG", "CYS", cysA[2]+constant, cysA[3]),
b=("SG", "CYS", cysB[2]+constant, cysB[3]),
length=-1, fc=-1))
# Now we have done everything to it, we can add Link/cystine related stuff to options
# 'multi' is not stored anywhere else, so that we also add
options['CystineCheckBonds'] = CystineCheckBonds
options['CystineMaxDist2'] = CystineMaxDist2
## LOGGING ##
# Set the log level and communicate which options are set and what is happening
# If 'Verbose' is set, change the logger level
logLevel = options["verbose"] and logging.DEBUG or logging.INFO
logging.basicConfig(format='%(levelname)-7s %(message)s', level=logLevel)
#logging.info('MARTINIZE, script version %s'%__version__)
logging.info('If you use this script please cite:')
logging.info('de Jong et al., J. Chem. Theory Comput., 2013, DOI:10.1021/ct300646g')
logging.info("Chain termini will%s be charged"%(options['neutraltermini'] and " not" or ""))
logging.info("Residues at chain brakes will%s be charged"%((not options['chargedbreaks']) and " not" or ""))
if 'ForceField' in options:
logging.info("The %s forcefield will be used."%(options['ForceField'].name))
else:
logging.error("Forcefield '%s' has not been implemented."%(options['forcefield']))
sys.exit()
if options['extdih']:
logging.info('Dihedrals will be used for extended regions. (Elastic bonds may be more stable)')
else:
logging.info('Local elastic bonds will be used for extended regions.')
if options['posres']:
logging.info("Position restraints will be generated.")
logging.warning("Position restraints are only enabled if -DPOSRES is set in the MDP file")
if options['MixedChains']:
logging.warning("So far no parameters for mixed chains are available. This might crash the program!")
if options['RetainHETATM']:
logging.warning("I don't know how to handle HETATMs. This will probably crash the program.")
return options
def main(argv):
## TEMPORARY ---
# Exception to be defined in martinize
## <---
## OPTIONS
# Parse options
try:
options = OPTIONS.parse(argv[1:])
options["Arguments"] = argv[1:]
update_options(options)
except simopt.SimoptHelp:
print(OPTIONS.help(argv[1:]))
return 0
except simopt.MissingMandatoryError as e:
print(e)
return 3
except simopt.Usage as e:
print(e)
return 1
except MartinizeException as e:
print(e)
return 5
## WORK
try:
system = core.main(options)
except MartinizeException as e:
print(e)
return 2
except OSError:
return 4
## OUTPUT
# Build atom list
# Build topology
# Build index
return 0
def cli():
sys.exit(main(sys.argv))
| gpl-2.0 |
stefanweller/ansible-modules-extras | files/blockinfile.py | 4 | 9774 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: blockinfile
author:
- 'YAEGASHI Takeshi (@yaegashi)'
extends_documentation_fragment:
- files
- validate
short_description: Insert/update/remove a text block
surrounded by marker lines.
version_added: '2.0'
description:
- This module will insert/update/remove a block of multi-line text
surrounded by customizable marker lines.
notes:
- This module supports check mode.
- When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
options:
dest:
aliases: [ name, destfile ]
required: true
description:
- The file to modify.
state:
required: false
choices: [ present, absent ]
default: present
description:
- Whether the block should be there or not.
marker:
required: false
default: '# {mark} ANSIBLE MANAGED BLOCK'
description:
- The marker line template.
"{mark}" will be replaced with "BEGIN" or "END".
block:
aliases: [ content ]
required: false
default: ''
description:
- The text to insert inside the marker lines.
If it's missing or an empty string,
the block will be removed as if C(state) were specified to C(absent).
insertafter:
required: false
default: EOF
description:
- If specified, the block will be inserted after the last match of
specified regular expression. A special value is available; C(EOF) for
inserting the block at the end of the file. If specified regular
expresion has no matches, C(EOF) will be used instead.
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
default: None
description:
- If specified, the block will be inserted before the last match of
specified regular expression. A special value is available; C(BOF) for
inserting the block at the beginning of the file. If specified regular
expresion has no matches, the block will be inserted at the end of the
file.
choices: [ 'BOF', '*regex*' ]
create:
required: false
default: 'no'
choices: [ 'yes', 'no' ]
description:
- Create a new file if it doesn't exist.
backup:
required: false
default: 'no'
choices: [ 'yes', 'no' ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
follow:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- 'This flag indicates that filesystem links, if they exist, should be followed.'
version_added: "2.1"
"""
EXAMPLES = r"""
- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config
blockinfile:
dest: /etc/ssh/sshd_config
block: |
Match User ansible-agent
PasswordAuthentication no
- name: insert/update eth0 configuration stanza in /etc/network/interfaces
(it might be better to copy files into /etc/network/interfaces.d/)
blockinfile:
dest: /etc/network/interfaces
block: |
iface eth0 inet static
address 192.168.0.1
netmask 255.255.255.0
- name: insert/update HTML surrounded by custom markers after <body> line
blockinfile:
dest: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
insertafter: "<body>"
content: |
<h1>Welcome to {{ansible_hostname}}</h1>
<p>Last updated on {{ansible_date_time.iso8601}}</p>
- name: remove HTML as well as surrounding markers
blockinfile:
dest: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
content: ""
- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config
blockinfile:
dest: /etc/hosts
block: |
{{item.name}} {{item.ip}}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{item.name}}"
with_items:
- { name: host1, ip: 10.10.1.10 }
- { name: host2, ip: 10.10.1.11 }
- { name: host3, ip: 10.10.1.12 }
"""
import re
import os
import tempfile
def write_changes(module, contents, dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, dest)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile'], type='path'),
state=dict(default='present', choices=['absent', 'present']),
marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),
block=dict(default='', type='str', aliases=['content']),
insertafter=dict(default=None),
insertbefore=dict(default=None),
create=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
dest = params['dest']
if module.boolean(params.get('follow', None)):
dest = os.path.realpath(dest)
if os.path.isdir(dest):
module.fail_json(rc=256,
msg='Destination %s is a directory !' % dest)
if not os.path.exists(dest):
if not module.boolean(params['create']):
module.fail_json(rc=257,
msg='Destination %s does not exist !' % dest)
original = None
lines = []
else:
f = open(dest, 'rb')
original = f.read()
f.close()
lines = original.splitlines()
insertbefore = params['insertbefore']
insertafter = params['insertafter']
block = params['block']
marker = params['marker']
present = params['state'] == 'present'
if insertbefore is None and insertafter is None:
insertafter = 'EOF'
if insertafter not in (None, 'EOF'):
insertre = re.compile(insertafter)
elif insertbefore not in (None, 'BOF'):
insertre = re.compile(insertbefore)
else:
insertre = None
marker0 = re.sub(r'{mark}', 'BEGIN', marker)
marker1 = re.sub(r'{mark}', 'END', marker)
if present and block:
# Escape seqeuences like '\n' need to be handled in Ansible 1.x
if module.ansible_version.startswith('1.'):
block = re.sub('', block, '')
blocklines = [marker0] + block.splitlines() + [marker1]
else:
blocklines = []
n0 = n1 = None
for i, line in enumerate(lines):
if line.startswith(marker0):
n0 = i
if line.startswith(marker1):
n1 = i
if None in (n0, n1):
n0 = None
if insertre is not None:
for i, line in enumerate(lines):
if insertre.search(line):
n0 = i
if n0 is None:
n0 = len(lines)
elif insertafter is not None:
n0 += 1
elif insertbefore is not None:
n0 = 0 # insertbefore=BOF
else:
n0 = len(lines) # insertafter=EOF
elif n0 < n1:
lines[n0:n1+1] = []
else:
lines[n1:n0+1] = []
n0 = n1
lines[n0:n0] = blocklines
if lines:
result = '\n'.join(lines)
if original and original.endswith('\n'):
result += '\n'
else:
result = ''
if original == result:
msg = ''
changed = False
elif original is None:
msg = 'File created'
changed = True
elif not blocklines:
msg = 'Block removed'
changed = True
else:
msg = 'Block inserted'
changed = True
if changed and not module.check_mode:
if module.boolean(params['backup']) and os.path.exists(dest):
module.backup_local(dest)
write_changes(module, result, dest)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.splitter import *
if __name__ == '__main__':
main()
| gpl-3.0 |
danakj/chromium | third_party/pycoverage/coverage/bytecode.py | 209 | 2036 | """Bytecode manipulation for coverage.py"""
import opcode, types
from coverage.backward import byte_to_int
class ByteCode(object):
"""A single bytecode."""
def __init__(self):
# The offset of this bytecode in the code object.
self.offset = -1
# The opcode, defined in the `opcode` module.
self.op = -1
# The argument, a small integer, whose meaning depends on the opcode.
self.arg = -1
# The offset in the code object of the next bytecode.
self.next_offset = -1
# The offset to jump to.
self.jump_to = -1
class ByteCodes(object):
"""Iterator over byte codes in `code`.
Returns `ByteCode` objects.
"""
# pylint: disable=R0924
def __init__(self, code):
self.code = code
def __getitem__(self, i):
return byte_to_int(self.code[i])
def __iter__(self):
offset = 0
while offset < len(self.code):
bc = ByteCode()
bc.op = self[offset]
bc.offset = offset
next_offset = offset+1
if bc.op >= opcode.HAVE_ARGUMENT:
bc.arg = self[offset+1] + 256*self[offset+2]
next_offset += 2
label = -1
if bc.op in opcode.hasjrel:
label = next_offset + bc.arg
elif bc.op in opcode.hasjabs:
label = bc.arg
bc.jump_to = label
bc.next_offset = offset = next_offset
yield bc
class CodeObjects(object):
"""Iterate over all the code objects in `code`."""
def __init__(self, code):
self.stack = [code]
def __iter__(self):
while self.stack:
# We're going to return the code object on the stack, but first
# push its children for later returning.
code = self.stack.pop()
for c in code.co_consts:
if isinstance(c, types.CodeType):
self.stack.append(c)
yield code
| bsd-3-clause |
FokkeZB/titanium_mobile | support/android/bindings.py | 35 | 2457 | # Functions for reading the generated binding JSON data
import os, sys
import zipfile
android_dir = os.path.dirname(sys._getframe(0).f_code.co_filename)
common_dir = os.path.abspath(os.path.join(android_dir, "..", "common"))
sys.path.append(common_dir)
try:
import simplejson as json
except ImportError, e:
import json
android_modules_dir = os.path.abspath(os.path.join(android_dir, 'modules'))
modules_json = os.path.join(android_dir, 'modules.json')
module_jars = None
if os.path.exists(modules_json):
module_jars = json.loads(open(modules_json, 'r').read())
def get_module_bindings(jar):
bindings_path = None
for name in jar.namelist():
if name.endswith('.json') and name.startswith('org/appcelerator/titanium/bindings/'):
bindings_path = name
break
if bindings_path is None: return None
return json.loads(jar.read(bindings_path))
def get_all_module_names():
module_names = []
for module_jar in module_jars.keys():
for module_name in module_jars[module_jar]:
module_names.append(module_name)
return module_names
def find_module_jar(module):
for module_jar in module_jars.keys():
for module_name in module_jars[module_jar]:
if module_name.lower() == module:
if module_jar == "titanium.jar": return os.path.join(android_dir, module_jar)
else: return os.path.join(android_modules_dir, module_jar)
return None
def get_all_module_bindings(dir=None):
if dir == None:
dir = android_modules_dir
modules = {}
external_child_modules = {}
for jar in os.listdir(android_modules_dir):
if not jar.endswith('.jar'): continue
module_path = os.path.join(android_modules_dir, jar)
module_jar = zipfile.ZipFile(module_path)
module_bindings = get_module_bindings(module_jar)
module_jar.close()
if module_bindings is None: continue
for module_class in module_bindings['modules'].keys():
if module_class not in module_bindings['proxies']:
# parent module is external, so the reference needs to be injected at boot time
if module_class not in external_child_modules:
external_child_modules[module_class] = []
external_child_modules[module_class].extend(module_bindings['modules'][module_class]['childModules'])
continue
full_api_name = module_bindings['proxies'][module_class]['proxyAttrs']['fullAPIName']
modules[module_class] = module_bindings['modules'][module_class]
modules[module_class]['fullAPIName'] = full_api_name
return (modules, external_child_modules)
| apache-2.0 |
aagm/gfw-api | gfw/common.py | 3 | 4896 | # Global Forest Watch API
# Copyright (C) 2013 World Resource Institute
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""This module supports common functions."""
import json
import re
import logging
import webapp2
from google.appengine.api import memcache
from hashlib import md5
from appengine_config import runtime_config
class CORSRequestHandler(webapp2.RequestHandler):
def options(self):
"""Options to support CORS requests."""
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Access-Control-Allow-Headers'] = \
'Origin, X-Requested-With, Content-Type, Accept'
self.response.headers['Access-Control-Allow-Methods'] = 'POST, GET'
def write(self, data):
"""Sends supplied result dictionnary as JSON response."""
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
self.response.headers.add_header(
'Access-Control-Allow-Headers',
'Origin, X-Requested-With, Content-Type, Accept')
self.response.headers.add_header('charset', 'utf-8')
self.response.headers["Content-Type"] = "application/json"
self.response.out.write(str(data))
def write_error(self, status, data):
"""Sends supplied result dictionnary as JSON response."""
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
self.response.headers.add_header(
'Access-Control-Allow-Headers',
'Origin, X-Requested-With, Content-Type, Accept')
self.response.headers.add_header('charset', 'utf-8')
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(status, message=str(data))
self.response.out.write(str(data))
@classmethod
def get_or_execute(cls, args, target, rid):
if 'bust' in args:
memcache.delete(rid)
result = target.execute(args)
else:
result = memcache.get(rid)
if not result:
result = target.execute(args)
try:
memcache.set(key=rid, value=result)
except Exception as e:
logging.exception(e)
action, data = result
return action, data
def args(self, only=[]):
raw = {}
if not self.request.arguments():
if self.request.body:
raw = json.loads(self.request.body)
else:
args = self.request.arguments()
vals = map(self.request.get, args)
raw = dict(zip(args, vals))
result = {}
for key, val in raw.iteritems():
if only and key in only:
result[key] = val
else:
result[key] = val
return result
def complete(self, action, data):
if action == 'respond':
self.write(json.dumps(data, sort_keys=True))
elif action == 'redirect':
self.redirect(data)
elif action == 'error':
self.write_error(400, data.get('message') or data )
else:
self.write_error(400, 'Unknown action %s' % action)
def get_id(self, params):
whitespace = re.compile(r'\s+')
params = re.sub(whitespace, '', json.dumps(params, sort_keys=True))
return '/'.join([self.request.path.lower(), md5(params).hexdigest()])
#
# SHARED CONSTANTS/TEMPLATES
#
APP_VERSION = runtime_config.get('APP_VERSION')
APP_BASE_URL = runtime_config.get('APP_BASE_URL')
IS_DEV = runtime_config.get('IS_DEV')
CONTENT_TYPES = {
'shp': 'application/octet-stream',
'kml': 'application/vnd.google-earth.kmz',
'svg': 'image/svg+xml',
'csv': 'application/csv',
'geojson': 'application/json',
'json': 'application/json'
}
GCS_URL_TMPL = 'http://storage.googleapis.com/gfw-apis-analysis%s.%s'
#
# Helper Methods
#
def get_params_hash(params):
return md5(json.dumps(params, sort_keys=True)).hexdigest()
def get_cartodb_format(gfw_media_type):
"""Return CartoDB format for supplied GFW custom media type."""
tokens = gfw_media_type.split('.')
if len(tokens) == 2:
return 'json'
else:
return tokens[2].split('+')[0] | gpl-2.0 |
abartlet/samba | third_party/waf/wafadmin/Utils.py | 20 | 19073 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""
Utilities, the stable ones are the following:
* h_file: compute a unique value for a file (hash), it uses
the module fnv if it is installed (see waf/utils/fnv & http://code.google.com/p/waf/wiki/FAQ)
else, md5 (see the python docs)
For large projects (projects with more than 15000 files) or slow hard disks and filesystems (HFS)
it is possible to use a hashing based on the path and the size (may give broken cache results)
The method h_file MUST raise an OSError if the file is a folder
import stat
def h_file(filename):
st = os.lstat(filename)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(filename)
return m.digest()
To replace the function in your project, use something like this:
import Utils
Utils.h_file = h_file
* h_list
* h_fun
* get_term_cols
* ordered_dict
"""
import os, sys, imp, string, errno, traceback, inspect, re, shutil, datetime, gc
# In python 3.0 we can get rid of all this
try: from UserDict import UserDict
except ImportError: from collections import UserDict
if sys.hexversion >= 0x2060000 or os.name == 'java':
import subprocess as pproc
else:
import pproc
import Logs
from Constants import *
try:
from collections import deque
except ImportError:
class deque(list):
def popleft(self):
return self.pop(0)
is_win32 = sys.platform == 'win32'
try:
# defaultdict in python 2.5
from collections import defaultdict as DefaultDict
except ImportError:
class DefaultDict(dict):
def __init__(self, default_factory):
super(DefaultDict, self).__init__()
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(DefaultDict, self).__getitem__(key)
except KeyError:
value = self.default_factory()
self[key] = value
return value
class WafError(Exception):
def __init__(self, *args):
self.args = args
try:
self.stack = traceback.extract_stack()
except:
pass
Exception.__init__(self, *args)
def __str__(self):
return str(len(self.args) == 1 and self.args[0] or self.args)
class WscriptError(WafError):
def __init__(self, message, wscript_file=None):
if wscript_file:
self.wscript_file = wscript_file
self.wscript_line = None
else:
try:
(self.wscript_file, self.wscript_line) = self.locate_error()
except:
(self.wscript_file, self.wscript_line) = (None, None)
msg_file_line = ''
if self.wscript_file:
msg_file_line = "%s:" % self.wscript_file
if self.wscript_line:
msg_file_line += "%s:" % self.wscript_line
err_message = "%s error: %s" % (msg_file_line, message)
WafError.__init__(self, err_message)
def locate_error(self):
stack = traceback.extract_stack()
stack.reverse()
for frame in stack:
file_name = os.path.basename(frame[0])
is_wscript = (file_name == WSCRIPT_FILE or file_name == WSCRIPT_BUILD_FILE)
if is_wscript:
return (frame[0], frame[1])
return (None, None)
indicator = is_win32 and '\x1b[A\x1b[K%s%s%s\r' or '\x1b[K%s%s%s\r'
try:
from fnv import new as md5
import Constants
Constants.SIG_NIL = 'signofnv'
def h_file(filename):
m = md5()
try:
m.hfile(filename)
x = m.digest()
if x is None: raise OSError("not a file")
return x
except SystemError:
raise OSError("not a file" + filename)
except ImportError:
try:
try:
from hashlib import md5
except ImportError:
from md5 import md5
def h_file(filename):
f = open(filename, 'rb')
m = md5()
while (filename):
filename = f.read(100000)
m.update(filename)
f.close()
return m.digest()
except ImportError:
# portability fixes may be added elsewhere (although, md5 should be everywhere by now)
md5 = None
def readf(fname, m='r', encoding='ISO8859-1'):
"""backported from waf 1.8"""
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
if encoding:
txt = txt.decode(encoding)
else:
txt = txt.decode()
else:
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef(fname, data, m='w', encoding='ISO8859-1'):
"""backported from waf 1.8"""
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
f = open(fname, m)
try:
f.write(data)
finally:
f.close()
class ordered_dict(UserDict):
def __init__(self, dict = None):
self.allkeys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
self.allkeys.remove(key)
UserDict.__delitem__(self, key)
def __setitem__(self, key, item):
if key not in self.allkeys: self.allkeys.append(key)
UserDict.__setitem__(self, key, item)
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
try:
proc = pproc.Popen(s, **kw)
return proc.wait()
except OSError:
return -1
if is_win32:
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
if len(s) > 2000:
startupinfo = pproc.STARTUPINFO()
startupinfo.dwFlags |= pproc.STARTF_USESHOWWINDOW
kw['startupinfo'] = startupinfo
try:
if 'stdout' not in kw:
kw['stdout'] = pproc.PIPE
kw['stderr'] = pproc.PIPE
kw['universal_newlines'] = True
proc = pproc.Popen(s,**kw)
(stdout, stderr) = proc.communicate()
Logs.info(stdout)
if stderr:
Logs.error(stderr)
return proc.returncode
else:
proc = pproc.Popen(s,**kw)
return proc.wait()
except OSError:
return -1
listdir = os.listdir
if is_win32:
def listdir_win32(s):
if re.match('^[A-Za-z]:$', s):
# os.path.isdir fails if s contains only the drive name... (x:)
s += os.sep
if not os.path.isdir(s):
e = OSError()
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = listdir_win32
def waf_version(mini = 0x010000, maxi = 0x100000):
"Halts if the waf version is wrong"
ver = HEXVERSION
try: min_val = mini + 0
except TypeError: min_val = int(mini.replace('.', '0'), 16)
if min_val > ver:
Logs.error("waf version should be at least %s (%s found)" % (mini, ver))
sys.exit(1)
try: max_val = maxi + 0
except TypeError: max_val = int(maxi.replace('.', '0'), 16)
if max_val < ver:
Logs.error("waf version should be at most %s (%s found)" % (maxi, ver))
sys.exit(1)
def python_24_guard():
if sys.hexversion < 0x20400f0 or sys.hexversion >= 0x3000000:
raise ImportError("Waf requires Python >= 2.3 but the raw source requires Python 2.4, 2.5 or 2.6")
def ex_stack():
exc_type, exc_value, tb = sys.exc_info()
if Logs.verbose > 1:
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
return ''.join(exc_lines)
return str(exc_value)
def to_list(sth):
if isinstance(sth, str):
return sth.split()
else:
return sth
g_loaded_modules = {}
"index modules by absolute path"
g_module=None
"the main module is special"
def load_module(file_path, name=WSCRIPT_FILE):
"this function requires an absolute path"
try:
return g_loaded_modules[file_path]
except KeyError:
pass
module = imp.new_module(name)
try:
code = readf(file_path, m='rU')
except (IOError, OSError):
raise WscriptError('Could not read the file %r' % file_path)
module.waf_hash_val = code
dt = os.path.dirname(file_path)
sys.path.insert(0, dt)
try:
exec(compile(code, file_path, 'exec'), module.__dict__)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), file_path)
sys.path.remove(dt)
g_loaded_modules[file_path] = module
return module
def set_main_module(file_path):
"Load custom options, if defined"
global g_module
g_module = load_module(file_path, 'wscript_main')
g_module.root_path = file_path
try:
g_module.APPNAME
except:
g_module.APPNAME = 'noname'
try:
g_module.VERSION
except:
g_module.VERSION = '1.0'
# note: to register the module globally, use the following:
# sys.modules['wscript_main'] = g_module
def to_hashtable(s):
"used for importing env files"
tbl = {}
lst = s.split('\n')
for line in lst:
if not line: continue
mems = line.split('=')
tbl[mems[0]] = mems[1]
return tbl
def get_term_cols():
"console width"
return 80
try:
import struct, fcntl, termios
except ImportError:
pass
else:
if Logs.got_tty:
def myfun():
dummy_lines, cols = struct.unpack("HHHH", \
fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ , \
struct.pack("HHHH", 0, 0, 0, 0)))[:2]
return cols
# we actually try the function once to see if it is suitable
try:
myfun()
except:
pass
else:
get_term_cols = myfun
rot_idx = 0
rot_chr = ['\\', '|', '/', '-']
"the rotation character in the progress bar"
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re.split(re_sp, path)[2:]
ret[0] = '\\' + ret[0]
return ret
return re.split(re_sp, path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
split_path = split_path_win32
def copy_attrs(orig, dest, names, only_if_set=False):
for a in to_list(names):
u = getattr(orig, a, ())
if u or not only_if_set:
setattr(dest, a, u)
def def_attrs(cls, **kw):
'''
set attributes for class.
@param cls [any class]: the class to update the given attributes in.
@param kw [dictionary]: dictionary of attributes names and values.
if the given class hasn't one (or more) of these attributes, add the attribute with its value to the class.
'''
for k, v in kw.iteritems():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(path):
fu = re.compile("[^a-zA-Z0-9]").sub("_", path)
fu = fu.upper()
return fu
def quote_whitespace(path):
return (path.strip().find(' ') > 0 and '"%s"' % path or path).replace('""', '"')
def trimquotes(s):
if not s: return ''
s = s.rstrip()
if s[0] == "'" and s[-1] == "'": return s[1:-1]
return s
def h_list(lst):
m = md5()
m.update(str(lst))
return m.digest()
def h_fun(fun):
try:
return fun.code
except AttributeError:
try:
h = inspect.getsource(fun)
except IOError:
h = "nocode"
try:
fun.code = h
except AttributeError:
pass
return h
def pprint(col, str, label='', sep='\n'):
"print messages in color"
sys.stderr.write("%s%s%s %s%s" % (Logs.colors(col), str, Logs.colors.NORMAL, label, sep))
def check_dir(path):
"""If a folder doesn't exists, create it."""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError, e:
if not os.path.isdir(path):
raise WafError("Cannot create the folder '%s' (error: %s)" % (path, e))
def cmd_output(cmd, **kw):
silent = False
if 'silent' in kw:
silent = kw['silent']
del(kw['silent'])
if 'e' in kw:
tmp = kw['e']
del(kw['e'])
kw['env'] = tmp
kw['shell'] = isinstance(cmd, str)
kw['stdout'] = pproc.PIPE
if silent:
kw['stderr'] = pproc.PIPE
try:
p = pproc.Popen(cmd, **kw)
output = p.communicate()[0]
except OSError, e:
raise ValueError(str(e))
if p.returncode:
if not silent:
msg = "command execution failed: %s -> %r" % (cmd, str(output))
raise ValueError(msg)
output = ''
return output
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
"substitute ${PREFIX}/bin in /usr/local/bin"
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
# environments may contain lists
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var, expr)
def unversioned_sys_platform_to_binary_format(unversioned_sys_platform):
"infers the binary format from the unversioned_sys_platform name."
if unversioned_sys_platform in ('linux', 'freebsd', 'netbsd', 'openbsd', 'sunos', 'gnu'):
return 'elf'
elif unversioned_sys_platform == 'darwin':
return 'mac-o'
elif unversioned_sys_platform in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
# TODO we assume all other operating systems are elf, which is not true.
# we may set this to 'unknown' and have ccroot and other tools handle the case "gracefully" (whatever that means).
return 'elf'
def unversioned_sys_platform():
"""returns an unversioned name from sys.platform.
sys.plaform is not very well defined and depends directly on the python source tree.
The version appended to the names is unreliable as it's taken from the build environment at the time python was built,
i.e., it's possible to get freebsd7 on a freebsd8 system.
So we remove the version from the name, except for special cases where the os has a stupid name like os2 or win32.
Some possible values of sys.platform are, amongst others:
aix3 aix4 atheos beos5 darwin freebsd2 freebsd3 freebsd4 freebsd5 freebsd6 freebsd7
generic gnu0 irix5 irix6 linux2 mac netbsd1 next3 os2emx riscos sunos5 unixware7
Investigating the python source tree may reveal more values.
"""
s = sys.platform
if s == 'java':
# The real OS is hidden under the JVM.
from java.lang import System
s = System.getProperty('os.name')
# see http://lopica.sourceforge.net/os.html for a list of possible values
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hpux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else: s = s.lower()
if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s
return re.split('\d+$', s)[0]
#@deprecated('use unversioned_sys_platform instead')
def detect_platform():
"""this function has been in the Utils module for some time.
It's hard to guess what people have used it for.
It seems its goal is to return an unversionned sys.platform, but it's not handling all platforms.
For example, the version is not removed on freebsd and netbsd, amongst others.
"""
s = sys.platform
# known POSIX
for x in 'cygwin linux irix sunos hpux aix darwin gnu'.split():
# sys.platform may be linux2
if s.find(x) >= 0:
return x
# unknown POSIX
if os.name in 'posix java os2'.split():
return os.name
return s
def load_tool(tool, tooldir=None):
'''
load_tool: import a Python module, optionally using several directories.
@param tool [string]: name of tool to import.
@param tooldir [list]: directories to look for the tool.
@return: the loaded module.
Warning: this function is not thread-safe: plays with sys.path,
so must run in sequence.
'''
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
else:
tooldir = []
try:
return __import__(tool)
finally:
for dt in tooldir:
sys.path.remove(dt)
def nada(*k, **kw):
"""A function that does nothing"""
pass
def diff_path(top, subdir):
"""difference between two absolute paths"""
top = os.path.normpath(top).replace('\\', '/').split('/')
subdir = os.path.normpath(subdir).replace('\\', '/').split('/')
if len(top) == len(subdir): return ''
diff = subdir[len(top) - len(subdir):]
return os.path.join(*diff)
class Context(object):
"""A base class for commands to be executed from Waf scripts"""
def set_curdir(self, dir):
self.curdir_ = dir
def get_curdir(self):
try:
return self.curdir_
except AttributeError:
self.curdir_ = os.getcwd()
return self.get_curdir()
curdir = property(get_curdir, set_curdir)
def recurse(self, dirs, name=''):
"""The function for calling scripts from folders, it tries to call wscript + function_name
and if that file does not exist, it will call the method 'function_name' from a file named wscript
the dirs can be a list of folders or a string containing space-separated folder paths
"""
if not name:
name = inspect.stack()[1][3]
if isinstance(dirs, str):
dirs = to_list(dirs)
for x in dirs:
if os.path.isabs(x):
nexdir = x
else:
nexdir = os.path.join(self.curdir, x)
base = os.path.join(nexdir, WSCRIPT_FILE)
file_path = base + '_' + name
try:
txt = readf(file_path, m='rU')
except (OSError, IOError):
try:
module = load_module(base)
except OSError:
raise WscriptError('No such script %s' % base)
try:
f = module.__dict__[name]
except KeyError:
raise WscriptError('No function %s defined in %s' % (name, base))
if getattr(self.__class__, 'pre_recurse', None):
self.pre_recurse(f, base, nexdir)
old = self.curdir
self.curdir = nexdir
try:
f(self)
finally:
self.curdir = old
if getattr(self.__class__, 'post_recurse', None):
self.post_recurse(module, base, nexdir)
else:
dc = {'ctx': self}
if getattr(self.__class__, 'pre_recurse', None):
dc = self.pre_recurse(txt, file_path, nexdir)
old = self.curdir
self.curdir = nexdir
try:
try:
exec(compile(txt, file_path, 'exec'), dc)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), base)
finally:
self.curdir = old
if getattr(self.__class__, 'post_recurse', None):
self.post_recurse(txt, file_path, nexdir)
if is_win32:
old = shutil.copy2
def copy2(src, dst):
old(src, dst)
shutil.copystat(src, src)
setattr(shutil, 'copy2', copy2)
def zip_folder(dir, zip_file_name, prefix):
"""
prefix represents the app to add in the archive
"""
import zipfile
zip = zipfile.ZipFile(zip_file_name, 'w', compression=zipfile.ZIP_DEFLATED)
base = os.path.abspath(dir)
if prefix:
if prefix[-1] != os.sep:
prefix += os.sep
n = len(base)
for root, dirs, files in os.walk(base):
for f in files:
archive_name = prefix + root[n:] + os.sep + f
zip.write(root + os.sep + f, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
def get_elapsed_time(start):
"Format a time delta (datetime.timedelta) using the format DdHhMmS.MSs"
delta = datetime.datetime.now() - start
# cast to int necessary for python 3.0
days = int(delta.days)
hours = int(delta.seconds / 3600)
minutes = int((delta.seconds - hours * 3600) / 60)
seconds = delta.seconds - hours * 3600 - minutes * 60 \
+ float(delta.microseconds) / 1000 / 1000
result = ''
if days:
result += '%dd' % days
if days or hours:
result += '%dh' % hours
if days or hours or minutes:
result += '%dm' % minutes
return '%s%.3fs' % (result, seconds)
if os.name == 'java':
# For Jython (they should really fix the inconsistency)
try:
gc.disable()
gc.enable()
except NotImplementedError:
gc.disable = gc.enable
def run_once(fun):
"""
decorator, make a function cache its results, use like this:
@run_once
def foo(k):
return 345*2343
"""
cache = {}
def wrap(k):
try:
return cache[k]
except KeyError:
ret = fun(k)
cache[k] = ret
return ret
wrap.__cache__ = cache
return wrap
| gpl-3.0 |
alex-ta/Fontinator | FeatureEngineering/trainer.py | 1 | 2444 | import os
from PIL import Image, ImageFont, ImageDraw
import numpy as np
import cv2
import pickle
import extractFeatures
import classifier
# Static Config
FONT_SIZE = 24
PADDING_TOP_BOTTOM = 8
TRAIN_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789ÄÖÜäöü!"()[]?ß.,+-'
# Get list of all fonts for training
local_path = os.path.dirname(os.path.realpath(__file__))
fonts_path = os.path.join(local_path, '..', 'DataGenerator', 'fonts')
fonts = os.listdir(fonts_path)
fonts.sort(key=lambda s: s.lower())
cleaned_fonts = []
for font in fonts:
# Skip everything which is not a Font File (TTF)
if font.upper().endswith('.TTF'):
cleaned_fonts.append(font)
fonts = cleaned_fonts
# Save the font, char, label assignment
with open('labels.pickle', 'wb') as file_handle:
pickle.dump(TRAIN_CHARS, file_handle)
pickle.dump(fonts, file_handle)
train_classifier_features = []
train_classifier_labels = []
# Iterate for all available fonts
font_label = 0
for font in fonts:
print(font)
# Load Fontfile
font_path = os.path.join(fonts_path, font)
img_size = FONT_SIZE + 2 * PADDING_TOP_BOTTOM
font_file = ImageFont.truetype(font_path, FONT_SIZE)
font_char_features = {}
# Iterate over all Characters
for char_label, char in enumerate(TRAIN_CHARS):
# Draw the Character in the specified Font
image = Image.new("RGB", (img_size, img_size), (255, 255, 255))
draw = ImageDraw.Draw(image)
text = char
draw.text((10, 3), text, (0, 0, 0), font=font_file)
# Convert to Grayscale image
image = np.array(image)
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# Extract features
char_features = extractFeatures.get_feature_vector(image)
# Append the FeatureVector and the Label to the training data
train_classifier_features.append( char_features )
train_classifier_labels.append( char_label + font_label * len(TRAIN_CHARS) )
font_char_features[char] = char_features
font_label += 1
# Feature normalization
norm_min = np.min( np.array(train_classifier_features), axis=0 )
norm_range = np.max( np.array(train_classifier_features), axis=0 ) - norm_min
# Train Classifier
cla = classifier.Classifier()
cla.setNormalization(norm_min, norm_range)
cla.trainClassifier(train_classifier_features, train_classifier_labels)
cla.saveTrainedClassifier('./classie.pickle')
| apache-2.0 |
lucienimmink/scanner.py | mutagen/_tools/mid3iconv.py | 1 | 5238 | # -*- coding: utf-8 -*-
# Copyright 2006 Emfox Zhou <EmfoxZhou@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
ID3iconv is a Java based ID3 encoding convertor, here's the Python version.
"""
import sys
import locale
import mutagen
import mutagen.id3
from mutagen._senf import argv, print_, fsnative
from ._util import SignalHandler, OptionParser
VERSION = (0, 3)
_sig = SignalHandler()
def getpreferredencoding():
return locale.getpreferredencoding() or "utf-8"
def isascii(string):
"""Checks whether a unicode string is non-empty and contains only ASCII
characters.
"""
if not string:
return False
try:
string.encode('ascii')
except UnicodeEncodeError:
return False
return True
class ID3OptionParser(OptionParser):
def __init__(self):
mutagen_version = ".".join(map(str, mutagen.version))
my_version = ".".join(map(str, VERSION))
version = "mid3iconv %s\nUses Mutagen %s" % (
my_version, mutagen_version)
return OptionParser.__init__(
self, version=version,
usage="%prog [OPTION] [FILE]...",
description=("Mutagen-based replacement the id3iconv utility, "
"which converts ID3 tags from legacy encodings "
"to Unicode and stores them using the ID3v2 format."))
def format_help(self, *args, **kwargs):
text = OptionParser.format_help(self, *args, **kwargs)
return text + "\nFiles are updated in-place, so use --dry-run first.\n"
def update(options, filenames):
encoding = options.encoding or getpreferredencoding()
verbose = options.verbose
noupdate = options.noupdate
force_v1 = options.force_v1
remove_v1 = options.remove_v1
def conv(uni):
return uni.encode('iso-8859-1').decode(encoding)
for filename in filenames:
with _sig.block():
if verbose != "quiet":
print_(u"Updating", filename)
if has_id3v1(filename) and not noupdate and force_v1:
mutagen.id3.delete(filename, False, True)
try:
id3 = mutagen.id3.ID3(filename)
except mutagen.id3.ID3NoHeaderError:
if verbose != "quiet":
print_(u"No ID3 header found; skipping...")
continue
except Exception as err:
print_(str(err), file=sys.stderr)
continue
for tag in filter(lambda t: t.startswith(("T", "COMM")), id3):
frame = id3[tag]
if isinstance(frame, mutagen.id3.TimeStampTextFrame):
# non-unicode fields
continue
try:
text = frame.text
except AttributeError:
continue
try:
text = [conv(x) for x in frame.text]
except (UnicodeError, LookupError):
continue
else:
frame.text = text
if not text or min(map(isascii, text)):
frame.encoding = 3
else:
frame.encoding = 1
if verbose == "debug":
print_(id3.pprint())
if not noupdate:
if remove_v1:
id3.save(filename, v1=False)
else:
id3.save(filename)
def has_id3v1(filename):
try:
with open(filename, 'rb') as f:
f.seek(-128, 2)
return f.read(3) == b"TAG"
except IOError:
return False
def main(argv):
parser = ID3OptionParser()
parser.add_option(
"-e", "--encoding", metavar="ENCODING", action="store",
type="string", dest="encoding",
help=("Specify original tag encoding (default is %s)" % (
getpreferredencoding())))
parser.add_option(
"-p", "--dry-run", action="store_true", dest="noupdate",
help="Do not actually modify files")
parser.add_option(
"--force-v1", action="store_true", dest="force_v1",
help="Use an ID3v1 tag even if an ID3v2 tag is present")
parser.add_option(
"--remove-v1", action="store_true", dest="remove_v1",
help="Remove v1 tag after processing the files")
parser.add_option(
"-q", "--quiet", action="store_const", dest="verbose",
const="quiet", help="Only output errors")
parser.add_option(
"-d", "--debug", action="store_const", dest="verbose",
const="debug", help="Output updated tags")
for i, arg in enumerate(argv):
if arg == "-v1":
argv[i] = fsnative(u"--force-v1")
elif arg == "-removev1":
argv[i] = fsnative(u"--remove-v1")
(options, args) = parser.parse_args(argv[1:])
if args:
update(options, args)
else:
parser.print_help()
def entry_point():
_sig.init()
return main(argv)
| mit |
bezhermoso/home | lib/ansible/runner/lookup_plugins/pipe.py | 12 | 1517 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from ansible import utils, errors
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
raise errors.AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
| gpl-3.0 |
valrus/mingus3 | mingus/containers/__init__.py | 4 | 1292 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
================================================================================
mingus - Music theory Python package, containers package
Copyright (C) 2008-2009, Bart Spaans
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
================================================================================
================================================================================
"""
from .Note import Note
from .NoteContainer import NoteContainer
from .Bar import Bar
from .Track import Track
from .Composition import Composition
from .Suite import Suite
from .Instrument import Instrument, Piano, Guitar, MidiInstrument
| gpl-3.0 |
barnsnake351/nova | nova/virt/libvirt/volume/nfs.py | 10 | 4068 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _LE, _LW
from nova import paths
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import fs
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtNFSVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def _get_mount_point_base(self):
return CONF.libvirt.nfs_mount_point_base
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
self._ensure_mounted(connection_info)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
export = connection_info['data']['export']
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, connection_info):
"""@type connection_info: dict
"""
nfs_export = connection_info['data']['export']
mount_path = self._get_mount_path(connection_info)
if not libvirt_utils.is_mounted(mount_path, nfs_export):
options = connection_info['data'].get('options')
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
| apache-2.0 |
freakboy3742/django | django/core/handlers/asgi.py | 24 | 11170 | import logging
import sys
import tempfile
import traceback
from asgiref.sync import sync_to_async
from django.conf import settings
from django.core import signals
from django.core.exceptions import RequestAborted, RequestDataTooBig
from django.core.handlers import base
from django.http import (
FileResponse, HttpRequest, HttpResponse, HttpResponseBadRequest,
HttpResponseServerError, QueryDict, parse_cookie,
)
from django.urls import set_script_prefix
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
class ASGIRequest(HttpRequest):
"""
Custom request subclass that decodes from an ASGI-standard request dict
and wraps request body handling.
"""
# Number of seconds until a Request gives up on trying to read a request
# body and aborts.
body_receive_timeout = 60
def __init__(self, scope, body_file):
self.scope = scope
self._post_parse_error = False
self._read_started = False
self.resolver_match = None
self.script_name = self.scope.get('root_path', '')
if self.script_name and scope['path'].startswith(self.script_name):
# TODO: Better is-prefix checking, slash handling?
self.path_info = scope['path'][len(self.script_name):]
else:
self.path_info = scope['path']
# The Django path is different from ASGI scope path args, it should
# combine with script name.
if self.script_name:
self.path = '%s/%s' % (
self.script_name.rstrip('/'),
self.path_info.replace('/', '', 1),
)
else:
self.path = scope['path']
# HTTP basics.
self.method = self.scope['method'].upper()
# Ensure query string is encoded correctly.
query_string = self.scope.get('query_string', '')
if isinstance(query_string, bytes):
query_string = query_string.decode()
self.META = {
'REQUEST_METHOD': self.method,
'QUERY_STRING': query_string,
'SCRIPT_NAME': self.script_name,
'PATH_INFO': self.path_info,
# WSGI-expecting code will need these for a while
'wsgi.multithread': True,
'wsgi.multiprocess': True,
}
if self.scope.get('client'):
self.META['REMOTE_ADDR'] = self.scope['client'][0]
self.META['REMOTE_HOST'] = self.META['REMOTE_ADDR']
self.META['REMOTE_PORT'] = self.scope['client'][1]
if self.scope.get('server'):
self.META['SERVER_NAME'] = self.scope['server'][0]
self.META['SERVER_PORT'] = str(self.scope['server'][1])
else:
self.META['SERVER_NAME'] = 'unknown'
self.META['SERVER_PORT'] = '0'
# Headers go into META.
for name, value in self.scope.get('headers', []):
name = name.decode('latin1')
if name == 'content-length':
corrected_name = 'CONTENT_LENGTH'
elif name == 'content-type':
corrected_name = 'CONTENT_TYPE'
else:
corrected_name = 'HTTP_%s' % name.upper().replace('-', '_')
# HTTP/2 say only ASCII chars are allowed in headers, but decode
# latin1 just in case.
value = value.decode('latin1')
if corrected_name in self.META:
value = self.META[corrected_name] + ',' + value
self.META[corrected_name] = value
# Pull out request encoding, if provided.
self._set_content_type_params(self.META)
# Directly assign the body file to be our stream.
self._stream = body_file
# Other bits.
self.resolver_match = None
@cached_property
def GET(self):
return QueryDict(self.META['QUERY_STRING'])
def _get_scheme(self):
return self.scope.get('scheme') or super()._get_scheme()
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
@cached_property
def COOKIES(self):
return parse_cookie(self.META.get('HTTP_COOKIE', ''))
class ASGIHandler(base.BaseHandler):
"""Handler for ASGI requests."""
request_class = ASGIRequest
# Size to chunk response bodies into for multiple response messages.
chunk_size = 2 ** 16
def __init__(self):
super().__init__()
self.load_middleware(is_async=True)
async def __call__(self, scope, receive, send):
"""
Async entrypoint - parses the request and hands off to get_response.
"""
# Serve only HTTP connections.
# FIXME: Allow to override this.
if scope['type'] != 'http':
raise ValueError(
'Django can only handle ASGI/HTTP connections, not %s.'
% scope['type']
)
# Receive the HTTP request body as a stream object.
try:
body_file = await self.read_body(receive)
except RequestAborted:
return
# Request is complete and can be served.
set_script_prefix(self.get_script_prefix(scope))
await sync_to_async(signals.request_started.send, thread_sensitive=True)(sender=self.__class__, scope=scope)
# Get the request and check for basic issues.
request, error_response = self.create_request(scope, body_file)
if request is None:
await self.send_response(error_response, send)
return
# Get the response, using the async mode of BaseHandler.
response = await self.get_response_async(request)
response._handler_class = self.__class__
# Increase chunk size on file responses (ASGI servers handles low-level
# chunking).
if isinstance(response, FileResponse):
response.block_size = self.chunk_size
# Send the response.
await self.send_response(response, send)
async def read_body(self, receive):
"""Reads a HTTP body from an ASGI connection."""
# Use the tempfile that auto rolls-over to a disk file as it fills up.
body_file = tempfile.SpooledTemporaryFile(max_size=settings.FILE_UPLOAD_MAX_MEMORY_SIZE, mode='w+b')
while True:
message = await receive()
if message['type'] == 'http.disconnect':
# Early client disconnect.
raise RequestAborted()
# Add a body chunk from the message, if provided.
if 'body' in message:
body_file.write(message['body'])
# Quit out if that's the end.
if not message.get('more_body', False):
break
body_file.seek(0)
return body_file
def create_request(self, scope, body_file):
"""
Create the Request object and returns either (request, None) or
(None, response) if there is an error response.
"""
try:
return self.request_class(scope, body_file), None
except UnicodeDecodeError:
logger.warning(
'Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={'status_code': 400},
)
return None, HttpResponseBadRequest()
except RequestDataTooBig:
return None, HttpResponse('413 Payload too large', status=413)
def handle_uncaught_exception(self, request, resolver, exc_info):
"""Last-chance handler for exceptions."""
# There's no WSGI server to catch the exception further up
# if this fails, so translate it into a plain text response.
try:
return super().handle_uncaught_exception(request, resolver, exc_info)
except Exception:
return HttpResponseServerError(
traceback.format_exc() if settings.DEBUG else 'Internal Server Error',
content_type='text/plain',
)
async def send_response(self, response, send):
"""Encode and send a response out over ASGI."""
# Collect cookies into headers. Have to preserve header case as there
# are some non-RFC compliant clients that require e.g. Content-Type.
response_headers = []
for header, value in response.items():
if isinstance(header, str):
header = header.encode('ascii')
if isinstance(value, str):
value = value.encode('latin1')
response_headers.append((bytes(header), bytes(value)))
for c in response.cookies.values():
response_headers.append(
(b'Set-Cookie', c.output(header='').encode('ascii').strip())
)
# Initial response message.
await send({
'type': 'http.response.start',
'status': response.status_code,
'headers': response_headers,
})
# Streaming responses need to be pinned to their iterator.
if response.streaming:
# Access `__iter__` and not `streaming_content` directly in case
# it has been overridden in a subclass.
for part in response:
for chunk, _ in self.chunk_bytes(part):
await send({
'type': 'http.response.body',
'body': chunk,
# Ignore "more" as there may be more parts; instead,
# use an empty final closing message with False.
'more_body': True,
})
# Final closing message.
await send({'type': 'http.response.body'})
# Other responses just need chunking.
else:
# Yield chunks of response.
for chunk, last in self.chunk_bytes(response.content):
await send({
'type': 'http.response.body',
'body': chunk,
'more_body': not last,
})
await sync_to_async(response.close, thread_sensitive=True)()
@classmethod
def chunk_bytes(cls, data):
"""
Chunks some data up so it can be sent in reasonable size messages.
Yields (chunk, last_chunk) tuples.
"""
position = 0
if not data:
yield data, True
return
while position < len(data):
yield (
data[position:position + cls.chunk_size],
(position + cls.chunk_size) >= len(data),
)
position += cls.chunk_size
def get_script_prefix(self, scope):
"""
Return the script prefix to use from either the scope or a setting.
"""
if settings.FORCE_SCRIPT_NAME:
return settings.FORCE_SCRIPT_NAME
return scope.get('root_path', '') or ''
| bsd-3-clause |
jasonmccampbell/scipy-refactor | scipy/weave/ext_tools.py | 11 | 17883 | import os
import sys
import re
import catalog
import build_tools
import converters
import base_spec
class ext_function_from_specs(object):
def __init__(self,name,code_block,arg_specs):
self.name = name
self.arg_specs = base_spec.arg_spec_list(arg_specs)
self.code_block = code_block
self.compiler = ''
self.customize = base_info.custom_info()
def header_code(self):
pass
def function_declaration_code(self):
code = 'static PyObject* %s(PyObject*self, PyObject* args,' \
' PyObject* kywds)\n{\n'
return code % self.name
def template_declaration_code(self):
code = 'template<class T>\n' \
'static PyObject* %s(PyObject*self, PyObject* args,' \
' PyObject* kywds)\n{\n'
return code % self.name
#def cpp_function_declaration_code(self):
# pass
#def cpp_function_call_code(self):
#s pass
def parse_tuple_code(self):
""" Create code block for PyArg_ParseTuple. Variable declarations
for all PyObjects are done also.
This code got a lot uglier when I added local_dict...
"""
declare_return = 'py::object return_val;\n' \
'int exception_occurred = 0;\n' \
'PyObject *py_local_dict = NULL;\n'
arg_string_list = self.arg_specs.variable_as_strings() + ['"local_dict"']
arg_strings = ','.join(arg_string_list)
if arg_strings: arg_strings += ','
declare_kwlist = 'static const char *kwlist[] = {%s NULL};\n' % \
arg_strings
py_objects = ', '.join(self.arg_specs.py_pointers())
init_flags = ', '.join(self.arg_specs.init_flags())
init_flags_init = '= '.join(self.arg_specs.init_flags())
py_vars = ' = '.join(self.arg_specs.py_variables())
if py_objects:
declare_py_objects = 'PyObject ' + py_objects +';\n'
declare_py_objects += 'int '+ init_flags + ';\n'
init_values = py_vars + ' = NULL;\n'
init_values += init_flags_init + ' = 0;\n\n'
else:
declare_py_objects = ''
init_values = ''
#Each variable is in charge of its own cleanup now.
#cnt = len(arg_list)
#declare_cleanup = "blitz::TinyVector<PyObject*,%d> clean_up(0);\n" % cnt
ref_string = ', '.join(self.arg_specs.py_references())
if ref_string:
ref_string += ', &py_local_dict'
else:
ref_string = '&py_local_dict'
format = "O"* len(self.arg_specs) + "|O" + ':' + self.name
parse_tuple = 'if(!PyArg_ParseTupleAndKeywords(args,' \
'kywds,"%s",const_cast<char**>(kwlist),%s))\n' % \
(format,ref_string)
parse_tuple += ' return NULL;\n'
return declare_return + declare_kwlist + declare_py_objects \
+ init_values + parse_tuple
def arg_declaration_code(self):
arg_strings = []
for arg in self.arg_specs:
arg_strings.append(arg.declaration_code())
arg_strings.append(arg.init_flag() +" = 1;\n")
code = "".join(arg_strings)
return code
def arg_cleanup_code(self):
arg_strings = []
have_cleanup = filter(lambda x:x.cleanup_code(),self.arg_specs)
for arg in have_cleanup:
code = "if(%s)\n" % arg.init_flag()
code += "{\n"
code += indent(arg.cleanup_code(),4)
code += "}\n"
arg_strings.append(code)
code = "".join(arg_strings)
return code
def arg_local_dict_code(self):
arg_strings = []
for arg in self.arg_specs:
arg_strings.append(arg.local_dict_code())
code = "".join(arg_strings)
return code
def function_code(self):
decl_code = indent(self.arg_declaration_code(),4)
cleanup_code = indent(self.arg_cleanup_code(),4)
function_code = indent(self.code_block,4)
local_dict_code = indent(self.arg_local_dict_code(),4)
dict_code = "if(py_local_dict) \n" \
"{ \n" \
" py::dict local_dict = py::dict(py_local_dict); \n" + \
local_dict_code + \
"} \n"
try_code = "try \n" \
"{ \n" + \
decl_code + \
" /*<function call here>*/ \n" + \
function_code + \
indent(dict_code,4) + \
"\n} \n"
catch_code = "catch(...) \n" \
"{ \n" + \
" return_val = py::object(); \n" \
" exception_occurred = 1; \n" \
"} \n"
return_code = " /*cleanup code*/ \n" + \
cleanup_code + \
' if(!(PyObject*)return_val && !exception_occurred)\n' \
' {\n \n' \
' return_val = Py_None; \n' \
' }\n \n' \
' return return_val.disown(); \n' \
'} \n'
all_code = self.function_declaration_code() + \
indent(self.parse_tuple_code(),4) + \
indent(try_code,4) + \
indent(catch_code,4) + \
return_code
return all_code
def python_function_definition_code(self):
args = (self.name, self.name)
function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS|' \
'METH_KEYWORDS},\n' % args
return function_decls
def set_compiler(self,compiler):
self.compiler = compiler
for arg in self.arg_specs:
arg.set_compiler(compiler)
class ext_function(ext_function_from_specs):
def __init__(self,name,code_block, args, local_dict=None, global_dict=None,
auto_downcast=1, type_converters=None):
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
if type_converters is None:
type_converters = converters.default
arg_specs = assign_variable_types(args,local_dict, global_dict,
auto_downcast, type_converters)
ext_function_from_specs.__init__(self,name,code_block,arg_specs)
import base_info
class ext_module(object):
def __init__(self,name,compiler=''):
standard_info = converters.standard_info
self.name = name
self.functions = []
self.compiler = compiler
self.customize = base_info.custom_info()
self._build_information = base_info.info_list(standard_info)
def add_function(self,func):
self.functions.append(func)
def module_code(self):
code = '\n'.join([
"""\
#ifdef __CPLUSPLUS__
extern "C" {
#endif
""",
self.warning_code(),
self.header_code(),
self.support_code(),
self.function_code(),
self.python_function_definition_code(),
self.module_init_code(),
"""\
#ifdef __CPLUSCPLUS__
}
#endif
"""
])
return code
def arg_specs(self):
all_arg_specs = base_spec.arg_spec_list()
for func in self.functions:
all_arg_specs += func.arg_specs
return all_arg_specs
def build_information(self):
info = self._build_information + [self.customize] + \
self.arg_specs().build_information()
for func in self.functions:
info.append(func.customize)
#redundant, but easiest place to make sure compiler is set
for i in info:
i.set_compiler(self.compiler)
return info
def get_headers(self):
all_headers = self.build_information().headers()
# blitz/array.h always needs to go before most other headers, so we
# hack that here, but we need to ensure that Python.h is the very
# first header included. As indicated in
# http://docs.python.org/api/includes.html
# "Warning: Since Python may define some pre-processor definitions which
# affect the standard headers on some systems, you must include Python.h
# before any standard headers are included. "
# Since blitz/array.h pulls in system headers, we must massage this
# list a bit so that the order is Python.h, blitz/array.h, ...
if '"blitz/array.h"' in all_headers:
all_headers.remove('"blitz/array.h"')
# Insert blitz AFTER Python.h, which must remain the first header
all_headers.insert(1,'"blitz/array.h"')
return all_headers
def warning_code(self):
all_warnings = self.build_information().warnings()
w=map(lambda x: "#pragma warning(%s)\n" % x,all_warnings)
return '#ifndef __GNUC__\n' + ''.join(w) + '\n#endif'
def header_code(self):
h = self.get_headers()
h= map(lambda x: '#include ' + x + '\n',h)
return ''.join(h) + '\n'
def support_code(self):
code = self.build_information().support_code()
return ''.join(code) + '\n'
def function_code(self):
all_function_code = ""
for func in self.functions:
all_function_code += func.function_code()
return ''.join(all_function_code) + '\n'
def python_function_definition_code(self):
all_definition_code = ""
for func in self.functions:
all_definition_code += func.python_function_definition_code()
all_definition_code = indent(''.join(all_definition_code),4)
code = 'static PyMethodDef compiled_methods[] = \n' \
'{\n' \
'%s' \
' {NULL, NULL} /* Sentinel */\n' \
'};\n'
return code % (all_definition_code)
def module_init_code(self):
init_code_list = self.build_information().module_init_code()
init_code = indent(''.join(init_code_list),4)
code = 'PyMODINIT_FUNC init%s(void)\n' \
'{\n' \
'%s' \
' (void) Py_InitModule("%s", compiled_methods);\n' \
'}\n' % (self.name,init_code,self.name)
return code
def generate_file(self,file_name="",location='.'):
code = self.module_code()
if not file_name:
file_name = self.name + '.cpp'
name = generate_file_name(file_name,location)
#return name
return generate_module(code,name)
def set_compiler(self,compiler):
# This is not used anymore -- I think we should ditch it.
#for i in self.arg_specs()
# i.set_compiler(compiler)
for i in self.build_information():
i.set_compiler(compiler)
for i in self.functions:
i.set_compiler(compiler)
self.compiler = compiler
def build_kw_and_file(self,location,kw):
arg_specs = self.arg_specs()
info = self.build_information()
_source_files = info.sources()
# remove duplicates
source_files = {}
for i in _source_files:
source_files[i] = None
source_files = source_files.keys()
# add internally specified macros, includes, etc. to the key words
# values of the same names so that distutils will use them.
kw['define_macros'] = kw.get('define_macros',[]) + \
info.define_macros()
kw['include_dirs'] = kw.get('include_dirs',[]) + info.include_dirs()
kw['libraries'] = kw.get('libraries',[]) + info.libraries()
kw['library_dirs'] = kw.get('library_dirs',[]) + info.library_dirs()
kw['extra_compile_args'] = kw.get('extra_compile_args',[]) + \
info.extra_compile_args()
kw['extra_link_args'] = kw.get('extra_link_args',[]) + \
info.extra_link_args()
kw['sources'] = kw.get('sources',[]) + source_files
file = self.generate_file(location=location)
return kw,file
def setup_extension(self,location='.',**kw):
kw,file = self.build_kw_and_file(location,kw)
return build_tools.create_extension(file, **kw)
def compile(self,location='.',compiler=None, verbose = 0, **kw):
if compiler is not None:
self.compiler = compiler
# !! removed -- we don't have any compiler dependent code
# currently in spec or info classes
# hmm. Is there a cleaner way to do this? Seems like
# choosing the compiler spagettis around a little.
#compiler = build_tools.choose_compiler(self.compiler)
#self.set_compiler(compiler)
kw,file = self.build_kw_and_file(location,kw)
# This is needed so that files build correctly even when different
# versions of Python are running around.
# Imported at beginning of file now to help with test paths.
# import catalog
#temp = catalog.default_temp_dir()
# for speed, build in the machines temp directory
temp = catalog.intermediate_dir()
success = build_tools.build_extension(file, temp_dir = temp,
compiler_name = compiler,
verbose = verbose, **kw)
if not success:
raise SystemError('Compilation failed')
def generate_file_name(module_name,module_location):
module_file = os.path.join(module_location,module_name)
return os.path.abspath(module_file)
def generate_module(module_string, module_file):
""" generate the source code file. Only overwrite
the existing file if the actual source has changed.
"""
file_changed = 1
if os.path.exists(module_file):
f = open(module_file,'r')
old_string = f.read()
f.close()
if old_string == module_string:
file_changed = 0
if file_changed:
f = open(module_file,'w')
f.write(module_string)
f.close()
return module_file
def assign_variable_types(variables,local_dict = {}, global_dict = {},
auto_downcast = 1,
type_converters = converters.default):
incoming_vars = {}
incoming_vars.update(global_dict)
incoming_vars.update(local_dict)
variable_specs = []
errors={}
for var in variables:
try:
example_type = incoming_vars[var]
# look through possible type specs to find which one
# should be used to for example_type
spec = None
for factory in type_converters:
if factory.type_match(example_type):
spec = factory.type_spec(var,example_type)
break
if not spec:
# should really define our own type.
raise IndexError
else:
variable_specs.append(spec)
except KeyError:
errors[var] = ("The type and dimensionality specifications" +
"for variable '" + var + "' are missing.")
except IndexError:
errors[var] = ("Unable to convert variable '"+ var +
"' to a C++ type.")
if errors:
raise TypeError(format_error_msg(errors))
if auto_downcast:
variable_specs = downcast(variable_specs)
return variable_specs
def downcast(var_specs):
""" Cast python scalars down to most common type of
arrays used.
Right now, focus on complex and float types. Ignore int types.
Require all arrays to have same type before forcing downcasts.
Note: var_specs are currently altered in place (horrors...!)
"""
numeric_types = []
#grab all the numeric types associated with a variables.
for var in var_specs:
if hasattr(var,'numeric_type'):
numeric_types.append(var.numeric_type)
# if arrays are present, but none of them are double precision,
# make all numeric types float or complex(float)
if ( ('f' in numeric_types or 'F' in numeric_types) and
not ('d' in numeric_types or 'D' in numeric_types) ):
for var in var_specs:
if hasattr(var,'numeric_type'):
if issubclass(var.numeric_type, complex):
var.numeric_type = 'F'
elif issubclass(var.numeric_type, float):
var.numeric_type = 'f'
return var_specs
def indent(st,spaces):
indention = ' '*spaces
indented = indention + st.replace('\n','\n'+indention)
# trim off any trailing spaces
indented = re.sub(r' +$',r'',indented)
return indented
def format_error_msg(errors):
#minimum effort right now...
import pprint,cStringIO
msg = cStringIO.StringIO()
pprint.pprint(errors,msg)
return msg.getvalue()
| bsd-3-clause |
eXcomm/tornado | tornado/locale.py | 26 | 20297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation methods for generating localized strings.
To load a locale and generate a translated string::
user_locale = tornado.locale.get("es_LA")
print user_locale.translate("Sign out")
`tornado.locale.get()` returns the closest matching locale, not necessarily the
specific locale you requested. You can support pluralization with
additional arguments to `~Locale.translate()`, e.g.::
people = [...]
message = user_locale.translate(
"%(list)s is online", "%(list)s are online", len(people))
print message % {"list": user_locale.list(people)}
The first string is chosen if ``len(people) == 1``, otherwise the second
string is chosen.
Applications should call one of `load_translations` (which uses a simple
CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
supported by `gettext` and related tools). If neither method is called,
the `Locale.translate` method will simply return the original string.
"""
from __future__ import absolute_import, division, print_function, with_statement
import codecs
import csv
import datetime
from io import BytesIO
import numbers
import os
import re
from tornado import escape
from tornado.log import gen_log
from tornado.util import u
from tornado._locale_data import LOCALE_NAMES
_default_locale = "en_US"
_translations = {}
_supported_locales = frozenset([_default_locale])
_use_gettext = False
CONTEXT_SEPARATOR = "\x04"
def get(*locale_codes):
"""Returns the closest match for the given locale codes.
We iterate over all given locale codes in order. If we have a tight
or a loose match for the code (e.g., "en" for "en_US"), we return
the locale. Otherwise we move to the next code in the list.
By default we return ``en_US`` if no translations are found for any of
the specified locales. You can change the default locale with
`set_default_locale()`.
"""
return Locale.get_closest(*locale_codes)
def set_default_locale(code):
"""Sets the default locale.
The default locale is assumed to be the language used for all strings
in the system. The translations loaded from disk are mappings from
the default locale to the destination locale. Consequently, you don't
need to create a translation file for the default locale.
"""
global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
def load_translations(directory, encoding=None):
"""Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders
(e.g., ``My name is %(name)s``) and their associated translations.
The directory should have translation files of the form ``LOCALE.csv``,
e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
translation, and an optional plural indicator. Plural indicators should
be one of "plural" or "singular". A given string can have both singular
and plural forms. For example ``%(name)s liked this`` may have a
different verb conjugation depending on whether %(name)s is one
name or a list of names. There should be two rows in the CSV file for
that string, one with plural indicator "singular", and one "plural".
For strings with no verbs that would change on translation, simply
use "unknown" or the empty string (or don't include the column at all).
The file is read using the `csv` module in the default "excel" dialect.
In this format there should not be spaces after the commas.
If no ``encoding`` parameter is given, the encoding will be
detected automatically (among UTF-8 and UTF-16) if the file
contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM
is present.
Example translation ``es_LA.csv``::
"I love you","Te amo"
"%(name)s liked this","A %(name)s les gustó esto","plural"
"%(name)s liked this","A %(name)s le gustó esto","singular"
.. versionchanged:: 4.3
Added ``encoding`` parameter. Added support for BOM-based encoding
detection, UTF-16, and UTF-8-with-BOM.
"""
global _translations
global _supported_locales
_translations = {}
for path in os.listdir(directory):
if not path.endswith(".csv"):
continue
locale, extension = path.split(".")
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
gen_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
full_path = os.path.join(directory, path)
if encoding is None:
# Try to autodetect encoding based on the BOM.
with open(full_path, 'rb') as f:
data = f.read(len(codecs.BOM_UTF16_LE))
if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
encoding = 'utf-16'
else:
# utf-8-sig is "utf-8 with optional BOM". It's discouraged
# in most cases but is common with CSV files because Excel
# cannot read utf-8 files without a BOM.
encoding = 'utf-8-sig'
try:
# python 3: csv.reader requires a file open in text mode.
# Force utf8 to avoid dependence on $LANG environment variable.
f = open(full_path, "r", encoding=encoding)
except TypeError:
# python 2: csv can only handle byte strings (in ascii-compatible
# encodings), which we decode below. Transcode everything into
# utf8 before passing it to csv.reader.
f = BytesIO()
with codecs.open(full_path, "r", encoding=encoding) as infile:
f.write(escape.utf8(infile.read()))
f.seek(0)
_translations[locale] = {}
for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2:
continue
row = [escape.to_unicode(c).strip() for c in row]
english, translation = row[:2]
if len(row) > 2:
plural = row[2] or "unknown"
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
gen_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have you app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
import gettext
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith('.'):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales():
"""Returns a list of all the supported locale codes."""
return _supported_locales
class Locale(object):
"""Object representing a locale.
After calling one of `load_translations` or `load_gettext_translations`,
call `get` or `get_closest` to get a Locale object.
"""
@classmethod
def get_closest(cls, *locale_codes):
"""Returns the closest match for the given locale code."""
for code in locale_codes:
if not code:
continue
code = code.replace("-", "_")
parts = code.split("_")
if len(parts) > 2:
continue
elif len(parts) == 2:
code = parts[0].lower() + "_" + parts[1].upper()
if code in _supported_locales:
return cls.get(code)
if parts[0].lower() in _supported_locales:
return cls.get(parts[0].lower())
return cls.get(_default_locale)
@classmethod
def get(cls, code):
"""Returns the Locale for the given locale code.
If it is not supported, we raise an exception.
"""
if not hasattr(cls, "_cache"):
cls._cache = {}
if code not in cls._cache:
assert code in _supported_locales
translations = _translations.get(code, None)
if translations is None:
locale = CSVLocale(code, {})
elif _use_gettext:
locale = GettextLocale(code, translations)
else:
locale = CSVLocale(code, translations)
cls._cache[code] = locale
return cls._cache[code]
def __init__(self, code, translations):
self.code = code
self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown"))
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
self.rtl = True
break
self.translations = translations
# Initialize strings for date formatting
_ = self.translate
self._months = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
self._weekdays = [
_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
_("Friday"), _("Saturday"), _("Sunday")]
def translate(self, message, plural_message=None, count=None):
"""Returns the translation for the given message for this locale.
If ``plural_message`` is given, you must also provide
``count``. We return ``plural_message`` when ``count != 1``,
and we return the singular form for the given message when
``count == 1``.
"""
raise NotImplementedError()
def pgettext(self, context, message, plural_message=None, count=None):
raise NotImplementedError()
def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
"""
if isinstance(date, numbers.Real):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
_ = self.translate
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return _("1 second ago", "%(seconds)d seconds ago",
seconds) % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return _("1 minute ago", "%(minutes)d minutes ago",
minutes) % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return _("1 hour ago", "%(hours)d hours ago",
hours) % {"hours": hours}
if days == 0:
format = _("%(time)s")
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = _("yesterday") if shorter else \
_("yesterday at %(time)s")
elif days < 5:
format = _("%(weekday)s") if shorter else \
_("%(weekday)s at %(time)s")
elif days < 334: # 11mo, since confusing for same month last year
format = _("%(month_name)s %(day)s") if shorter else \
_("%(month_name)s %(day)s at %(time)s")
if format is None:
format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
_("%(month_name)s %(day)s, %(year)s at %(time)s")
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
if tfhour_clock:
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
(u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
local_date.hour % 12 or 12, local_date.minute,
("am", "pm")[local_date.hour >= 12])
return format % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
def format_day(self, date, gmt_offset=0, dow=True):
"""Formats the given date as a day of week.
Example: "Monday, January 22". You can remove the day of week with
``dow=False``.
"""
local_date = date - datetime.timedelta(minutes=gmt_offset)
_ = self.translate
if dow:
return _("%(weekday)s, %(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
}
else:
return _("%(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"day": str(local_date.day),
}
def list(self, parts):
"""Returns a comma-separated list for the given list of parts.
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
of size 1.
"""
_ = self.translate
if len(parts) == 0:
return ""
if len(parts) == 1:
return parts[0]
comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ")
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
}
def friendly_number(self, value):
"""Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"):
return str(value)
value = str(value)
parts = []
while value:
parts.append(value[-3:])
value = value[:-3]
return ",".join(reversed(parts))
class CSVLocale(Locale):
"""Locale implementation using tornado's CSV translation format."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
def pgettext(self, context, message, plural_message=None, count=None):
if self.translations:
gen_log.warning('pgettext is not supported by CSVLocale')
return self.translate(message, plural_message, count)
class GettextLocale(Locale):
"""Locale implementation using the `gettext` module."""
def __init__(self, code, translations):
try:
# python 2
self.ngettext = translations.ungettext
self.gettext = translations.ugettext
except AttributeError:
# python 3
self.ngettext = translations.ngettext
self.gettext = translations.gettext
# self.gettext must exist before __init__ is called, since it
# calls into self.translate
super(GettextLocale, self).__init__(code, translations)
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
return self.ngettext(message, plural_message, count)
else:
return self.gettext(message)
def pgettext(self, context, message, plural_message=None, count=None):
"""Allows to set context for translation, accepts plural forms.
Usage example::
pgettext("law", "right")
pgettext("good", "right")
Plural message example::
pgettext("organization", "club", "clubs", len(clubs))
pgettext("stick", "club", "clubs", len(clubs))
To generate POT file with context, add following options to step 1
of `load_gettext_translations` sequence::
xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3
.. versionadded:: 4.2
"""
if plural_message is not None:
assert count is not None
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, message),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message),
count)
result = self.ngettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = self.ngettext(message, plural_message, count)
return result
else:
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = self.gettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
| apache-2.0 |
thjashin/tensorflow | tensorflow/python/kernel_tests/extract_image_patches_op_test.py | 111 | 3672 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractImagePatches op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractImagePatches(test.TestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
"""Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.test_session(use_gpu=True):
out_tensor = array_ops.extract_image_patches(
constant_op.constant(image),
ksizes=ksizes,
strides=strides,
rates=rates,
padding=padding,
name="im2col")
self.assertAllClose(patches, out_tensor.eval())
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""
# [2, 3, 4, 5]
image = np.reshape(range(120), [2, 3, 4, 5])
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
# [2, 4, 5, 3]
image = np.reshape(range(120), [2, 4, 5, 3])
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[2, 3],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize2x2Stride1x1Rate1x1Valid(self):
"""Test for 1x1 kernel ."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 1x1 kernel ."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
patches=patches)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Techlightenment/suds | suds/sax/enc.py | 204 | 2720 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides XML I{special character} encoder classes.
"""
import re
class Encoder:
"""
An XML special character encoder/decoder.
@cvar encodings: A mapping of special characters encoding.
@type encodings: [(str,str)]
@cvar decodings: A mapping of special characters decoding.
@type decodings: [(str,str)]
@cvar special: A list of special characters
@type special: [char]
"""
encodings = \
(( '&(?!(amp|lt|gt|quot|apos);)', '&' ),( '<', '<' ),( '>', '>' ),( '"', '"' ),("'", ''' ))
decodings = \
(( '<', '<' ),( '>', '>' ),( '"', '"' ),( ''', "'" ),( '&', '&' ))
special = \
('&', '<', '>', '"', "'")
def needsEncoding(self, s):
"""
Get whether string I{s} contains special characters.
@param s: A string to check.
@type s: str
@return: True if needs encoding.
@rtype: boolean
"""
if isinstance(s, basestring):
for c in self.special:
if c in s:
return True
return False
def encode(self, s):
"""
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
"""
if isinstance(s, basestring) and self.needsEncoding(s):
for x in self.encodings:
s = re.sub(x[0], x[1], s)
return s
def decode(self, s):
"""
Decode special characters encodings found in string I{s}.
@param s: A string to decode.
@type s: str
@return: The decoded string.
@rtype: str
"""
if isinstance(s, basestring) and '&' in s:
for x in self.decodings:
s = s.replace(x[0], x[1])
return s
| lgpl-3.0 |
stonegithubs/odoo | addons/project_timesheet/__openerp__.py | 260 | 2151 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bill Time on Tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
Synchronization of project task work entries with timesheet entries.
====================================================================
This module lets you transfer the entries under tasks defined for Project
Management to the Timesheet line entries for particular date and particular user
with the effect of creating, editing and deleting either ways.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/project-management',
'depends': ['resource', 'project', 'hr_timesheet_sheet', 'hr_timesheet_invoice', 'account_analytic_analysis', 'procurement'],
'data': [
'security/ir.model.access.csv',
'security/project_timesheet_security.xml',
'report/task_report_view.xml',
'project_timesheet_view.xml',
],
'demo': ['project_timesheet_demo.xml'],
'test': [
'test/worktask_entry_to_timesheetline_entry.yml',
'test/work_timesheet.yml',
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
catkin/catkin_tools | tests/system/workspace_factory.py | 1 | 5447 | import os
import shutil
from ..utils import temporary_directory
class workspace_factory(temporary_directory):
def __init__(self, source_space='src', prefix=''):
super(workspace_factory, self).__init__(prefix=prefix)
self.source_space = source_space
def __enter__(self):
self.temporary_directory = super(workspace_factory, self).__enter__()
self.workspace_factory = WorkspaceFactory(self.temporary_directory, self.source_space)
return self.workspace_factory
def __exit__(self, exc_type, exc_value, traceback):
super(workspace_factory, self).__exit__(exc_type, exc_value, traceback)
class WorkspaceFactory(object):
def __init__(self, workspace, source_space='src'):
self.workspace = workspace
self.source_space = os.path.join(self.workspace, source_space)
self.packages = {}
class Package(object):
PACKAGE_XML_TEMPLATE = """\
<?xml version="1.0"?>
<package>
<name>{name}</name>
<version>0.0.0</version>
<description>
Description for {name}
</description>
<maintainer email="person@email.com">Firstname Lastname</maintainer>
<license>MIT</license>
{depends_xml}
{export_xml}
</package>
"""
PACKAGE_XML_EXPORT_TEMPLATE = """
<export>
<build_type>{build_type}</build_type>
</export>"""
CATKIN_CMAKELISTS_TEMPLATE = """
cmake_minimum_required(VERSION 2.8.12)
project({name})
find_package(catkin REQUIRED COMPONENTS {catkin_components})
catkin_package()"""
CMAKE_CMAKELISTS_TEMPLATE = """
cmake_minimum_required(VERSION 2.8.12)
project({name})
{find_packages}
add_custom_target(install)"""
CMAKE_CMAKELISTS_FIND_PACKAGE_TEMPLATE = """
find_package({name})"""
def __init__(self, name, build_type, depends, build_depends, run_depends, test_depends):
self.name = name
self.build_type = build_type
self.build_depends = (build_depends or []) + (depends or [])
self.run_depends = (run_depends or []) + (depends or [])
self.test_depends = (test_depends or [])
def get_package_xml(self):
# Get dependencies
depends_xml = '\n'.join(
[' <buildtool_depend>{0}</buildtool_depend>'.format(self.build_type)] +
[' <build_depend>{0}</build_depend>'.format(x) for x in self.build_depends] +
[' <run_depend>{0}</run_depend>'.format(x) for x in self.run_depends] +
[' <test_depend>{0}</test_depend>'.format(x) for x in self.test_depends]
)
# Get exports section
if self.build_type == 'catkin':
export_xml = ''
else:
export_xml = self.PACKAGE_XML_EXPORT_TEMPLATE.format(build_type=self.build_type)
# Format the package.xml template
return self.PACKAGE_XML_TEMPLATE.format(
name=self.name,
depends_xml=depends_xml,
export_xml=export_xml)
def get_cmakelists_txt(self):
if self.build_type == 'catkin':
return self.CATKIN_CMAKELISTS_TEMPLATE.format(
name=self.name,
catkin_components=' '.join(self.build_depends))
if self.build_type == 'cmake':
find_packages = '\n'.join([
self.CMAKE_CMAKELISTS_FIND_PACKAGE_TEMPLATE.format(name=name)
for name in self.build_depends])
return self.CMAKE_CMAKELISTS_TEMPLATE.format(
name=self.name,
find_packages=find_packages)
def add_package(self, pkg_name, package_path):
"""Copy a static package into the workspace"""
shutil.copytree(package_path, self.source_space)
def create_package(
self,
pkg_name,
build_type='cmake',
depends=None,
build_depends=None,
run_depends=None,
test_depends=None
):
"""Add a package to be generated in this workspace."""
self.packages[pkg_name] = self.Package(pkg_name, build_type, depends, build_depends, run_depends, test_depends)
def build(self):
"""Generate workspace paths and packages."""
cwd = os.getcwd()
if not os.path.isdir(self.workspace):
if os.path.exists(self.workspace):
raise RuntimeError("Cannot build workspace in '{0}' because it is a file".format(self.workspace))
os.makedirs(self.workspace)
if os.path.exists(self.source_space):
print("WARNING: source space given to WorkspaceFactory exists, clearing before build()'ing")
self.clear()
os.makedirs(self.source_space)
try:
os.chdir(self.source_space)
for name, pkg in self.packages.items():
pkg_dir = os.path.join(self.source_space, name)
os.makedirs(pkg_dir)
pkg_xml_path = os.path.join(pkg_dir, 'package.xml')
with open(pkg_xml_path, 'w') as f:
f.write(pkg.get_package_xml())
cmakelists_txt_path = os.path.join(pkg_dir, 'CMakeLists.txt')
with open(cmakelists_txt_path, 'w') as f:
f.write(pkg.get_cmakelists_txt())
finally:
os.chdir(cwd)
def clear(self):
if os.path.exists(self.workspace):
shutil.rmtree(self.workspace)
| apache-2.0 |
mandeepdhami/neutron | neutron/db/migration/alembic_migrations/versions/16cdf118d31d_extra_dhcp_options_ipv6_support.py | 25 | 1634 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""extra_dhcp_options IPv6 support
Revision ID: 16cdf118d31d
Revises: 14be42f3d0a5
Create Date: 2014-10-23 17:04:19.796731
"""
# revision identifiers, used by Alembic.
revision = '16cdf118d31d'
down_revision = '14be42f3d0a5'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
CONSTRAINT_NAME_OLD = 'uidx_portid_optname'
CONSTRAINT_NAME_NEW = 'uniq_extradhcpopts0portid0optname0ipversion'
TABLE_NAME = 'extradhcpopts'
def upgrade():
with migration.remove_fks_from_table(TABLE_NAME):
op.drop_constraint(
name=CONSTRAINT_NAME_OLD,
table_name=TABLE_NAME,
type_='unique'
)
op.add_column('extradhcpopts', sa.Column('ip_version', sa.Integer(),
server_default='4', nullable=False))
op.execute("UPDATE extradhcpopts SET ip_version = 4")
op.create_unique_constraint(
name=CONSTRAINT_NAME_NEW,
source='extradhcpopts',
local_cols=['port_id', 'opt_name', 'ip_version']
)
| apache-2.0 |
leonsas/django-push-notifications | push_notifications/models.py | 1 | 3559 | from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .fields import HexIntegerField, UUIDField
# Compatibility with custom user models, while keeping backwards-compatibility with <1.5
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
class Device(models.Model):
name = models.CharField(max_length=255, verbose_name=_("Name"), blank=True, null=True)
active = models.BooleanField(verbose_name=_("Is active"), default=True,
help_text=_("Inactive devices will not be sent notifications"))
user = models.ForeignKey(AUTH_USER_MODEL, blank=True, null=True)
date_created = models.DateTimeField(verbose_name=_("Creation date"), auto_now_add=True, null=True)
class Meta:
abstract = True
def __unicode__(self):
return self.name or \
str(self.device_id or "") or \
"%s for %s" % (self.__class__.__name__, self.user or "unknown user")
class GCMDeviceManager(models.Manager):
def get_queryset(self):
return GCMDeviceQuerySet(self.model)
get_query_set = get_queryset # Django < 1.6 compatiblity
class GCMDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, **kwargs):
if self:
from .gcm import gcm_send_bulk_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
reg_ids = list(self.values_list("registration_id", flat=True))
return gcm_send_bulk_message(registration_ids=reg_ids, data=data, **kwargs)
class GCMDevice(Device):
# device_id cannot be a reliable primary key as fragmentation between different devices
# can make it turn out to be null and such:
# http://android-developers.blogspot.co.uk/2011/03/identifying-app-installations.html
device_id = HexIntegerField(verbose_name=_("Device ID"), blank=True, null=True, db_index=True,
help_text=_("ANDROID_ID / TelephonyManager.getDeviceId() (always as hex)"))
registration_id = models.TextField(verbose_name=_("Registration ID"))
objects = GCMDeviceManager()
class Meta:
verbose_name = _("GCM device")
def send_message(self, message, **kwargs):
from .gcm import gcm_send_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
return gcm_send_message(registration_id=self.registration_id, data=data, **kwargs)
class APNSDeviceManager(models.Manager):
def get_queryset(self):
return APNSDeviceQuerySet(self.model)
get_query_set = get_queryset # Django < 1.6 compatiblity
class APNSDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, **kwargs):
if self:
from .apns import apns_send_bulk_message
reg_ids = list(self.values_list("registration_id", flat=True))
return apns_send_bulk_message(registration_ids=reg_ids, alert=message, **kwargs)
class APNSDevice(Device):
device_id = UUIDField(verbose_name=_("Device ID"), blank=True, null=True, db_index=True,
help_text="UDID / UIDevice.identifierForVendor()")
registration_id = models.CharField(verbose_name=_("Registration ID"), max_length=64, unique=True)
objects = APNSDeviceManager()
class Meta:
verbose_name = _("APNS device")
def send_message(self, message, **kwargs):
from .apns import apns_send_message
return apns_send_message(registration_id=self.registration_id, alert=message, **kwargs)
# This is an APNS-only function right now, but maybe GCM will implement it
# in the future. But the definition of 'expired' may not be the same. Whatevs
def get_expired_tokens():
from .apns import apns_fetch_inactive_ids
return apns_fetch_inactive_ids()
| mit |
togaurav1981/Hello-World | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/filters/__init__.py | 93 | 11491 | # -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""
Lookup a filter by name. Return None if not found.
"""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""
Return an instantiated filter. Options are passed to the filter
initializer if wanted. Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""
Return a generator of all filter names.
"""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""
Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""
Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(str, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""
Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""
Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""
Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in list({'spaces': '·', 'tabs': '»', 'newlines': '¶'}.items()):
opt = options.get(name, False)
if isinstance(opt, str) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' '*(tabsize-1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""
Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
*New in Pygments 1.2.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return '', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = '\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""
Merges consecutive tokens with the same token type in the output stream of a
lexer.
*New in Pygments 1.2.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
}
| mit |
zozo123/buildbot | master/buildbot/util/pathmatch.py | 1 | 2714 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
_ident_re = re.compile('^[a-zA-Z_-][.a-zA-Z0-9_-]*$')
def ident(x):
if _ident_re.match(x):
return x
raise TypeError
class Matcher(object):
def __init__(self):
self._patterns = {}
self._dirty = True
def __setitem__(self, path, value):
assert path not in self._patterns, "duplicate path %s" % (path,)
self._patterns[path] = value
self._dirty = True
def __repr__(self):
return '<Matcher %r>' % (self._patterns,)
path_elt_re = re.compile('^(.?):([a-z0-9_.]+)$')
type_fns = dict(n=int, i=ident)
def __getitem__(self, path):
if self._dirty:
self._compile()
patterns = self._by_length.get(len(path), {})
for pattern in patterns:
kwargs = {}
for pattern_elt, path_elt in zip(pattern, path):
mo = self.path_elt_re.match(pattern_elt)
if mo:
type_flag, arg_name = mo.groups()
if type_flag:
try:
type_fn = self.type_fns[type_flag]
except Exception:
assert type_flag in self.type_fns, \
"no such type flag %s" % type_flag
try:
path_elt = type_fn(path_elt)
except Exception:
break
kwargs[arg_name] = path_elt
else:
if pattern_elt != path_elt:
break
else:
# complete match
return patterns[pattern], kwargs
else:
raise KeyError('No match for %r' % (path,))
def iterPatterns(self):
return self._patterns.iteritems()
def _compile(self):
self._by_length = {}
for k, v in self.iterPatterns():
l = len(k)
self._by_length.setdefault(l, {})[k] = v
| gpl-3.0 |
x111ong/django | django/db/models/signals.py | 399 | 2734 | from django.apps import apps
from django.dispatch import Signal
from django.utils import six
class_prepared = Signal(providing_args=["class"])
class ModelSignal(Signal):
"""
Signal subclass that allows the sender to be lazily specified as a string
of the `app_label.ModelName` form.
"""
def __init__(self, *args, **kwargs):
super(ModelSignal, self).__init__(*args, **kwargs)
self.unresolved_references = {}
class_prepared.connect(self._resolve_references)
def _resolve_references(self, sender, **kwargs):
opts = sender._meta
reference = (opts.app_label, opts.object_name)
try:
receivers = self.unresolved_references.pop(reference)
except KeyError:
pass
else:
for receiver, weak, dispatch_uid in receivers:
super(ModelSignal, self).connect(
receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid
)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
if isinstance(sender, six.string_types):
try:
app_label, model_name = sender.split('.')
except ValueError:
raise ValueError(
"Specified sender must either be a model or a "
"model name of the 'app_label.ModelName' form."
)
try:
sender = apps.get_registered_model(app_label, model_name)
except LookupError:
ref = (app_label, model_name)
refs = self.unresolved_references.setdefault(ref, [])
refs.append((receiver, weak, dispatch_uid))
return
super(ModelSignal, self).connect(
receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid
)
pre_init = ModelSignal(providing_args=["instance", "args", "kwargs"], use_caching=True)
post_init = ModelSignal(providing_args=["instance"], use_caching=True)
pre_save = ModelSignal(providing_args=["instance", "raw", "using", "update_fields"],
use_caching=True)
post_save = ModelSignal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True)
pre_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
post_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
m2m_changed = ModelSignal(
providing_args=["action", "instance", "reverse", "model", "pk_set", "using"],
use_caching=True,
)
pre_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"])
post_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"])
| bsd-3-clause |
jusdng/odoo | addons/hr_recruitment/__openerp__.py | 260 | 2780 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recruitment Process',
'version': '1.0',
'category': 'Human Resources',
'sequence': 25,
'summary': 'Jobs, Recruitment, Applications, Job Interviews, Surveys',
'description': """
Manage job positions and the recruitment process
================================================
This application allows you to easily keep track of jobs, vacancies, applications, interviews...
It is integrated with the mail gateway to automatically fetch email sent to <jobs@yourcompany.com> in the list of applications. It's also integrated with the document management system to store and search in the CV base and find the candidate that you are looking for. Similarly, it is integrated with the survey module to allow you to define interviews for different jobs.
You can define the different phases of interviews and easily rate the applicant from the kanban view.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/recruitment',
'depends': [
'decimal_precision',
'hr',
'survey',
'calendar',
'fetchmail',
'web_kanban_gauge',
],
'data': [
'wizard/hr_recruitment_create_partner_job_view.xml',
'hr_recruitment_view.xml',
'hr_recruitment_menu.xml',
'security/hr_recruitment_security.xml',
'security/ir.model.access.csv',
'report/hr_recruitment_report_view.xml',
'hr_recruitment_installer_view.xml',
'res_config_view.xml',
'survey_data_recruitment.xml',
'hr_recruitment_data.xml',
'views/hr_recruitment.xml',
],
'demo': ['hr_recruitment_demo.xml'],
'test': ['test/recruitment_process.yml'],
'installable': True,
'auto_install': False,
'application': True,
}
| agpl-3.0 |
lucidbard/NewsBlur | vendor/typogrify/smartypants.py | 37 | 29160 | #!/usr/bin/python
r"""
==============
smartypants.py
==============
----------------------------
SmartyPants ported to Python
----------------------------
Ported by `Chad Miller`_
Copyright (c) 2004, 2007 Chad Miller
original `SmartyPants`_ by `John Gruber`_
Copyright (c) 2003 John Gruber
Synopsis
========
A smart-quotes plugin for Pyblosxom_.
The priginal "SmartyPants" is a free web publishing plug-in for Movable Type,
Blosxom, and BBEdit that easily translates plain ASCII punctuation characters
into "smart" typographic punctuation HTML entities.
This software, *smartypants.py*, endeavours to be a functional port of
SmartyPants to Python, for use with Pyblosxom_.
Description
===========
SmartyPants can perform the following transformations:
- Straight quotes ( " and ' ) into "curly" quote HTML entities
- Backticks-style quotes (\`\`like this'') into "curly" quote HTML entities
- Dashes (``--`` and ``---``) into en- and em-dash entities
- Three consecutive dots (``...`` or ``. . .``) into an ellipsis entity
This means you can write, edit, and save your posts using plain old
ASCII straight quotes, plain dashes, and plain dots, but your published
posts (and final HTML output) will appear with smart quotes, em-dashes,
and proper ellipses.
SmartyPants does not modify characters within ``<pre>``, ``<code>``, ``<kbd>``,
``<math>`` or ``<script>`` tag blocks. Typically, these tags are used to
display text where smart quotes and other "smart punctuation" would not be
appropriate, such as source code or example markup.
Backslash Escapes
=================
If you need to use literal straight quotes (or plain hyphens and
periods), SmartyPants accepts the following backslash escape sequences
to force non-smart punctuation. It does so by transforming the escape
sequence into a decimal-encoded HTML entity:
(FIXME: table here.)
.. comment It sucks that there's a disconnect between the visual layout and table markup when special characters are involved.
.. comment ====== ===== =========
.. comment Escape Value Character
.. comment ====== ===== =========
.. comment \\\\\\\\ \ \\\\
.. comment \\\\" " "
.. comment \\\\' ' '
.. comment \\\\. . .
.. comment \\\\- - \-
.. comment \\\\` ` \`
.. comment ====== ===== =========
This is useful, for example, when you want to use straight quotes as
foot and inch marks: 6'2" tall; a 17" iMac.
Options
=======
For Pyblosxom users, the ``smartypants_attributes`` attribute is where you
specify configuration options.
Numeric values are the easiest way to configure SmartyPants' behavior:
"0"
Suppress all transformations. (Do nothing.)
"1"
Performs default SmartyPants transformations: quotes (including
\`\`backticks'' -style), em-dashes, and ellipses. "``--``" (dash dash)
is used to signify an em-dash; there is no support for en-dashes.
"2"
Same as smarty_pants="1", except that it uses the old-school typewriter
shorthand for dashes: "``--``" (dash dash) for en-dashes, "``---``"
(dash dash dash)
for em-dashes.
"3"
Same as smarty_pants="2", but inverts the shorthand for dashes:
"``--``" (dash dash) for em-dashes, and "``---``" (dash dash dash) for
en-dashes.
"-1"
Stupefy mode. Reverses the SmartyPants transformation process, turning
the HTML entities produced by SmartyPants into their ASCII equivalents.
E.g. "“" is turned into a simple double-quote ("), "—" is
turned into two dashes, etc.
The following single-character attribute values can be combined to toggle
individual transformations from within the smarty_pants attribute. For
example, to educate normal quotes and em-dashes, but not ellipses or
\`\`backticks'' -style quotes:
``py['smartypants_attributes'] = "1"``
"q"
Educates normal quote characters: (") and (').
"b"
Educates \`\`backticks'' -style double quotes.
"B"
Educates \`\`backticks'' -style double quotes and \`single' quotes.
"d"
Educates em-dashes.
"D"
Educates em-dashes and en-dashes, using old-school typewriter shorthand:
(dash dash) for en-dashes, (dash dash dash) for em-dashes.
"i"
Educates em-dashes and en-dashes, using inverted old-school typewriter
shorthand: (dash dash) for em-dashes, (dash dash dash) for en-dashes.
"e"
Educates ellipses.
"w"
Translates any instance of ``"`` into a normal double-quote character.
This should be of no interest to most people, but of particular interest
to anyone who writes their posts using Dreamweaver, as Dreamweaver
inexplicably uses this entity to represent a literal double-quote
character. SmartyPants only educates normal quotes, not entities (because
ordinarily, entities are used for the explicit purpose of representing the
specific character they represent). The "w" option must be used in
conjunction with one (or both) of the other quote options ("q" or "b").
Thus, if you wish to apply all SmartyPants transformations (quotes, en-
and em-dashes, and ellipses) and also translate ``"`` entities into
regular quotes so SmartyPants can educate them, you should pass the
following to the smarty_pants attribute:
The ``smartypants_forbidden_flavours`` list contains pyblosxom flavours for
which no Smarty Pants rendering will occur.
Caveats
=======
Why You Might Not Want to Use Smart Quotes in Your Weblog
---------------------------------------------------------
For one thing, you might not care.
Most normal, mentally stable individuals do not take notice of proper
typographic punctuation. Many design and typography nerds, however, break
out in a nasty rash when they encounter, say, a restaurant sign that uses
a straight apostrophe to spell "Joe's".
If you're the sort of person who just doesn't care, you might well want to
continue not caring. Using straight quotes -- and sticking to the 7-bit
ASCII character set in general -- is certainly a simpler way to live.
Even if you I *do* care about accurate typography, you still might want to
think twice before educating the quote characters in your weblog. One side
effect of publishing curly quote HTML entities is that it makes your
weblog a bit harder for others to quote from using copy-and-paste. What
happens is that when someone copies text from your blog, the copied text
contains the 8-bit curly quote characters (as well as the 8-bit characters
for em-dashes and ellipses, if you use these options). These characters
are not standard across different text encoding methods, which is why they
need to be encoded as HTML entities.
People copying text from your weblog, however, may not notice that you're
using curly quotes, and they'll go ahead and paste the unencoded 8-bit
characters copied from their browser into an email message or their own
weblog. When pasted as raw "smart quotes", these characters are likely to
get mangled beyond recognition.
That said, my own opinion is that any decent text editor or email client
makes it easy to stupefy smart quote characters into their 7-bit
equivalents, and I don't consider it my problem if you're using an
indecent text editor or email client.
Algorithmic Shortcomings
------------------------
One situation in which quotes will get curled the wrong way is when
apostrophes are used at the start of leading contractions. For example:
``'Twas the night before Christmas.``
In the case above, SmartyPants will turn the apostrophe into an opening
single-quote, when in fact it should be a closing one. I don't think
this problem can be solved in the general case -- every word processor
I've tried gets this wrong as well. In such cases, it's best to use the
proper HTML entity for closing single-quotes (``’``) by hand.
Bugs
====
To file bug reports or feature requests (other than topics listed in the
Caveats section above) please send email to: mailto:smartypantspy@chad.org
If the bug involves quotes being curled the wrong way, please send example
text to illustrate.
To Do list
----------
- Provide a function for use within templates to quote anything at all.
Version History
===============
1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400
- Fixed bug where blocks of precious unalterable text was instead
interpreted. Thanks to Le Roux and Dirk van Oosterbosch.
1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400
- Fix bogus magical quotation when there is no hint that the
user wants it, e.g., in "21st century". Thanks to Nathan Hamblen.
- Be smarter about quotes before terminating numbers in an en-dash'ed
range.
1.5_1.4: Thu, 10 Feb 2005 20:24:36 -0500
- Fix a date-processing bug, as reported by jacob childress.
- Begin a test-suite for ensuring correct output.
- Removed import of "string", since I didn't really need it.
(This was my first every Python program. Sue me!)
1.5_1.3: Wed, 15 Sep 2004 18:25:58 -0400
- Abort processing if the flavour is in forbidden-list. Default of
[ "rss" ] (Idea of Wolfgang SCHNERRING.)
- Remove stray virgules from en-dashes. Patch by Wolfgang SCHNERRING.
1.5_1.2: Mon, 24 May 2004 08:14:54 -0400
- Some single quotes weren't replaced properly. Diff-tesuji played
by Benjamin GEIGER.
1.5_1.1: Sun, 14 Mar 2004 14:38:28 -0500
- Support upcoming pyblosxom 0.9 plugin verification feature.
1.5_1.0: Tue, 09 Mar 2004 08:08:35 -0500
- Initial release
Version Information
-------------------
Version numbers will track the SmartyPants_ version numbers, with the addition
of an underscore and the smartypants.py version on the end.
New versions will be available at `http://wiki.chad.org/SmartyPantsPy`_
.. _http://wiki.chad.org/SmartyPantsPy: http://wiki.chad.org/SmartyPantsPy
Authors
=======
`John Gruber`_ did all of the hard work of writing this software in Perl for
`Movable Type`_ and almost all of this useful documentation. `Chad Miller`_
ported it to Python to use with Pyblosxom_.
Additional Credits
==================
Portions of the SmartyPants original work are based on Brad Choate's nifty
MTRegex plug-in. `Brad Choate`_ also contributed a few bits of source code to
this plug-in. Brad Choate is a fine hacker indeed.
`Jeremy Hedley`_ and `Charles Wiltgen`_ deserve mention for exemplary beta
testing of the original SmartyPants.
`Rael Dornfest`_ ported SmartyPants to Blosxom.
.. _Brad Choate: http://bradchoate.com/
.. _Jeremy Hedley: http://antipixel.com/
.. _Charles Wiltgen: http://playbacktime.com/
.. _Rael Dornfest: http://raelity.org/
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _John Gruber: http://daringfireball.net/
.. _Chad Miller: http://web.chad.org/
.. _Pyblosxom: http://roughingit.subtlehints.net/pyblosxom
.. _SmartyPants: http://daringfireball.net/projects/smartypants/
.. _Movable Type: http://www.movabletype.org/
"""
default_smartypants_attr = "1"
import re
tags_to_skip_regex = re.compile(r"<(/)?(pre|code|kbd|script|math)[^>]*>", re.I)
def verify_installation(request):
return 1
# assert the plugin is functional
def cb_story(args):
global default_smartypants_attr
try:
forbidden_flavours = args["entry"]["smartypants_forbidden_flavours"]
except KeyError:
forbidden_flavours = [ "rss" ]
try:
attributes = args["entry"]["smartypants_attributes"]
except KeyError:
attributes = default_smartypants_attr
if attributes is None:
attributes = default_smartypants_attr
entryData = args["entry"].getData()
try:
if args["request"]["flavour"] in forbidden_flavours:
return
except KeyError:
if "<" in args["entry"]["body"][0:15]: # sniff the stream
return # abort if it looks like escaped HTML. FIXME
# FIXME: make these configurable, perhaps?
args["entry"]["body"] = smartyPants(entryData, attributes)
args["entry"]["title"] = smartyPants(args["entry"]["title"], attributes)
### interal functions below here
def smartyPants(text, attr=default_smartypants_attr):
convert_quot = False # should we translate " entities into normal quotes?
# Parse attributes:
# 0 : do nothing
# 1 : set all
# 2 : set all, using old school en- and em- dash shortcuts
# 3 : set all, using inverted old school en and em- dash shortcuts
#
# q : quotes
# b : backtick quotes (``double'' only)
# B : backtick quotes (``double'' and `single')
# d : dashes
# D : old school dashes
# i : inverted old school dashes
# e : ellipses
# w : convert " entities to " for Dreamweaver users
skipped_tag_stack = []
do_dashes = "0"
do_backticks = "0"
do_quotes = "0"
do_ellipses = "0"
do_stupefy = "0"
if attr == "0":
# Do nothing.
return text
elif attr == "1":
do_quotes = "1"
do_backticks = "1"
do_dashes = "1"
do_ellipses = "1"
elif attr == "2":
# Do everything, turn all options on, use old school dash shorthand.
do_quotes = "1"
do_backticks = "1"
do_dashes = "2"
do_ellipses = "1"
elif attr == "3":
# Do everything, turn all options on, use inverted old school dash shorthand.
do_quotes = "1"
do_backticks = "1"
do_dashes = "3"
do_ellipses = "1"
elif attr == "-1":
# Special "stupefy" mode.
do_stupefy = "1"
else:
for c in attr:
if c == "q": do_quotes = "1"
elif c == "b": do_backticks = "1"
elif c == "B": do_backticks = "2"
elif c == "d": do_dashes = "1"
elif c == "D": do_dashes = "2"
elif c == "i": do_dashes = "3"
elif c == "e": do_ellipses = "1"
elif c == "w": convert_quot = "1"
else:
pass
# ignore unknown option
tokens = _tokenize(text)
result = []
in_pre = False
prev_token_last_char = ""
# This is a cheat, used to get some context
# for one-character tokens that consist of
# just a quote char. What we do is remember
# the last character of the previous text
# token, to use as context to curl single-
# character quote tokens correctly.
for cur_token in tokens:
if cur_token[0] == "tag":
# Don't mess with quotes inside some tags. This does not handle self <closing/> tags!
result.append(cur_token[1])
skip_match = tags_to_skip_regex.match(cur_token[1])
if skip_match is not None:
if not skip_match.group(1):
skipped_tag_stack.append(skip_match.group(2).lower())
in_pre = True
else:
if len(skipped_tag_stack) > 0:
if skip_match.group(2).lower() == skipped_tag_stack[-1]:
skipped_tag_stack.pop()
else:
pass
# This close doesn't match the open. This isn't XHTML. We should barf here.
if len(skipped_tag_stack) == 0:
in_pre = False
else:
t = cur_token[1]
last_char = t[-1:] # Remember last char of this token before processing.
if not in_pre:
oldstr = t
t = processEscapes(t)
if convert_quot != "0":
t = re.sub('"', '"', t)
if do_dashes != "0":
if do_dashes == "1":
t = educateDashes(t)
if do_dashes == "2":
t = educateDashesOldSchool(t)
if do_dashes == "3":
t = educateDashesOldSchoolInverted(t)
if do_ellipses != "0":
t = educateEllipses(t)
# Note: backticks need to be processed before quotes.
if do_backticks != "0":
t = educateBackticks(t)
if do_backticks == "2":
t = educateSingleBackticks(t)
if do_quotes != "0":
if t == "'":
# Special case: single-character ' token
if re.match("\S", prev_token_last_char):
t = "’"
else:
t = "‘"
elif t == '"':
# Special case: single-character " token
if re.match("\S", prev_token_last_char):
t = "”"
else:
t = "“"
else:
# Normal case:
t = educateQuotes(t)
if do_stupefy == "1":
t = stupefyEntities(t)
prev_token_last_char = last_char
result.append(t)
return "".join(result)
def educateQuotes(str):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
oldstr = str
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
str = re.sub(r"""^'(?=%s\\B)""" % (punct_class,), r"""’""", str)
str = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), r"""”""", str)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
str = re.sub(r""""'(?=\w)""", """“‘""", str)
str = re.sub(r"""'"(?=\w)""", """‘“""", str)
# Special case for decade abbreviations (the '80s):
str = re.sub(r"""\b'(?=\d{2}s)""", r"""’""", str)
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
str = opening_single_quotes_regex.sub(r"""\1‘""", str)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
str = closing_single_quotes_regex.sub(r"""\1’""", str)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
str = closing_single_quotes_regex.sub(r"""\1’\2""", str)
# Any remaining single quotes should be opening ones:
str = re.sub(r"""'""", r"""‘""", str)
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
str = opening_double_quotes_regex.sub(r"""\1“""", str)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=\s)
""" % (close_class,), re.VERBOSE)
str = closing_double_quotes_regex.sub(r"""”""", str)
closing_double_quotes_regex = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
str = closing_double_quotes_regex.sub(r"""\1”""", str)
# Any remaining quotes should be opening ones.
str = re.sub(r'"', r"""“""", str)
return str
def educateBackticks(str):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
str = re.sub(r"""``""", r"""“""", str)
str = re.sub(r"""''""", r"""”""", str)
return str
def educateSingleBackticks(str):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
str = re.sub(r"""`""", r"""‘""", str)
str = re.sub(r"""'""", r"""’""", str)
return str
def educateDashes(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity.
"""
str = re.sub(r"""---""", r"""–""", str) # en (yes, backwards)
str = re.sub(r"""--""", r"""—""", str) # em (yes, backwards)
return str
def educateDashesOldSchool(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an en-dash HTML entity, and each "---" translated to
an em-dash HTML entity.
"""
str = re.sub(r"""---""", r"""—""", str) # em (yes, backwards)
str = re.sub(r"""--""", r"""–""", str) # en (yes, backwards)
return str
def educateDashesOldSchoolInverted(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity, and each "---" translated to
an en-dash HTML entity. Two reasons why: First, unlike the
en- and em-dash syntax supported by
EducateDashesOldSchool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
str = re.sub(r"""---""", r"""–""", str) # em
str = re.sub(r"""--""", r"""—""", str) # en
return str
def educateEllipses(str):
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
an ellipsis HTML entity.
Example input: Huh...?
Example output: Huh…?
"""
str = re.sub(r"""\.\.\.""", r"""…""", str)
str = re.sub(r"""\. \. \.""", r"""…""", str)
return str
def stupefyEntities(str):
"""
Parameter: String.
Returns: The string, with each SmartyPants HTML entity translated to
its ASCII counterpart.
Example input: “Hello — world.”
Example output: "Hello -- world."
"""
str = re.sub(r"""–""", r"""-""", str) # en-dash
str = re.sub(r"""—""", r"""--""", str) # em-dash
str = re.sub(r"""‘""", r"""'""", str) # open single quote
str = re.sub(r"""’""", r"""'""", str) # close single quote
str = re.sub(r"""“""", r'''"''', str) # open double quote
str = re.sub(r"""”""", r'''"''', str) # close double quote
str = re.sub(r"""…""", r"""...""", str)# ellipsis
return str
def processEscapes(str):
r"""
Parameter: String.
Returns: The string, with after processing the following backslash
escape sequences. This is useful if you want to force a "dumb"
quote or other character to appear.
Escape Value
------ -----
\\ \
\" "
\' '
\. .
\- -
\` `
"""
str = re.sub(r"""\\\\""", r"""\""", str)
str = re.sub(r'''\\"''', r""""""", str)
str = re.sub(r"""\\'""", r"""'""", str)
str = re.sub(r"""\\\.""", r""".""", str)
str = re.sub(r"""\\-""", r"""-""", str)
str = re.sub(r"""\\`""", r"""`""", str)
return str
def _tokenize(str):
"""
Parameter: String containing HTML markup.
Returns: Reference to an array of the tokens comprising the input
string. Each token is either a tag (possibly with nested,
tags contained therein, such as <a href="<MTFoo>">, or a
run of text between tags. Each element of the array is a
two-element array; the first is either 'tag' or 'text';
the second is the actual value.
Based on the _tokenize() subroutine from Brad Choate's MTRegex plugin.
<http://www.bradchoate.com/past/mtregex.php>
"""
pos = 0
length = len(str)
tokens = []
depth = 6
nested_tags = "|".join(['(?:<(?:[^<>]',] * depth) + (')*>)' * depth)
#match = r"""(?: <! ( -- .*? -- \s* )+ > ) | # comments
# (?: <\? .*? \?> ) | # directives
# %s # nested tags """ % (nested_tags,)
tag_soup = re.compile(r"""([^<]*)(<[^>]*>)""")
token_match = tag_soup.search(str)
previous_end = 0
while token_match is not None:
if token_match.group(1):
tokens.append(['text', token_match.group(1)])
tokens.append(['tag', token_match.group(2)])
previous_end = token_match.end()
token_match = tag_soup.search(str, token_match.end())
if previous_end < len(str):
tokens.append(['text', str[previous_end:]])
return tokens
if __name__ == "__main__":
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_string
docstring_html = publish_string(__doc__, writer_name='html')
print docstring_html
# Unit test output goes out stderr. No worries.
import unittest
sp = smartyPants
class TestSmartypantsAllAttributes(unittest.TestCase):
# the default attribute is "1", which means "all".
def test_dates(self):
self.assertEqual(sp("1440-80's"), "1440-80’s")
self.assertEqual(sp("1440-'80s"), "1440-‘80s")
self.assertEqual(sp("1440---'80s"), "1440–‘80s")
self.assertEqual(sp("1960s"), "1960s") # no effect.
self.assertEqual(sp("1960's"), "1960’s")
self.assertEqual(sp("one two '60s"), "one two ‘60s")
self.assertEqual(sp("'60s"), "‘60s")
def test_skip_tags(self):
self.assertEqual(
sp("""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>"""),
"""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>""")
self.assertEqual(
sp("""<p>He said "Let's write some code." This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>"""),
"""<p>He said “Let’s write some code.” This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>""")
def test_ordinal_numbers(self):
self.assertEqual(sp("21st century"), "21st century") # no effect.
self.assertEqual(sp("3rd"), "3rd") # no effect.
def test_educated_quotes(self):
self.assertEqual(sp('''"Isn't this fun?"'''), '''“Isn’t this fun?”''')
unittest.main()
__author__ = "Chad Miller <smartypantspy@chad.org>"
__version__ = "1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400"
__url__ = "http://wiki.chad.org/SmartyPantsPy"
__description__ = "Smart-quotes, smart-ellipses, and smart-dashes for weblog entries in pyblosxom"
| mit |
libscie/liberator | liberator/lib/python3.6/site-packages/pip/baseparser.py | 339 | 10465 | """Base option parser setup"""
from __future__ import absolute_import
import sys
import optparse
import os
import re
import textwrap
from distutils.util import strtobool
from pip._vendor.six import string_types
from pip._vendor.six.moves import configparser
from pip.locations import (
legacy_config_file, config_basename, running_under_virtualenv,
site_config_files
)
from pip.utils import appdirs, get_terminal_size
_environ_prefix_re = re.compile(r"^PIP_", re.I)
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have initial newlines, some don't
description = description.lstrip('\n')
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser.
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
"""
def expand_default(self, option):
if self.parser is not None:
self.parser._update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
isolated = False
def __init__(self, *args, **kwargs):
self.config = configparser.RawConfigParser()
self.name = kwargs.pop('name')
self.isolated = kwargs.pop("isolated", False)
self.files = self.get_config_files()
if self.files:
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
# the files returned by this method will be parsed in order with the
# first files listed being overridden by later files in standard
# ConfigParser fashion
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
# at the base we have any site-wide configuration
files = list(site_config_files)
# per-user configuration next
if not self.isolated:
if config_file and os.path.exists(config_file):
files.append(config_file)
else:
# This is the legacy config file, we consider it to be a lower
# priority than the new file location.
files.append(legacy_config_file)
# This is the new config file, we consider it to be a higher
# priority than the legacy file.
files.append(
os.path.join(
appdirs.user_config_dir("pip"),
config_basename,
)
)
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
venv_config_file = os.path.join(
sys.prefix,
config_basename,
)
if os.path.exists(venv_config_file):
files.append(venv_config_file)
return files
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print("An error occurred during configuration: %s" % exc)
sys.exit(3)
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in config.items():
# ignore empty values
if not val:
continue
option = self.get_option(key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
def get_default_values(self):
"""Overriding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
| cc0-1.0 |
sns-chops/multiphonon | tests/backward/sqe2dos_TestCase2.py | 1 | 1138 | #!/usr/bin/env python
#
interactive = False
import sys, os
datadir = os.path.join(os.path.dirname(__file__), "../data")
sys.path.insert(0, datadir)
here = os.path.dirname(__file__)
import unittest, warnings
import numpy as np, histogram.hdf as hh, histogram as H
from multiphonon.backward import sqe2dos
from dos import loadDOS
class TestCase(unittest.TestCase):
def test2a(self):
"sqe2dos: V exp"
iqehist = hh.load(os.path.join(datadir, 'XYZ2-iqe-Ei_20.h5'))
initdos = hh.load(os.path.join(datadir, 'XYZ2-initdos-Ei_80.h5'))
iterdos = sqe2dos.sqe2dos(
iqehist, T=300,
Ecutoff=17.1, elastic_E_cutoff=(-3., 1.1), M=79.452,
C_ms=.05, Ei=20., workdir='work-XYZ2',
initdos = initdos)
list(iterdos)
if interactive:
dos = hh.load('work-XYZ2/final-dos.h5')
pylab.errorbar(dos.E, dos.I, dos.E2**.5, label='final')
pylab.legend()
pylab.show()
return
pass # end of TestCase
if __name__ == "__main__":
interactive = True
import pylab
unittest.main()
# End of file
| mit |
maxive/erp | addons/auth_oauth/models/res_users.py | 1 | 4942 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import requests
from odoo import api, fields, models
from odoo.exceptions import AccessDenied, UserError
from odoo.addons.auth_signup.models.res_users import SignupError
from odoo.addons import base
base.models.res_users.USER_PRIVATE_FIELDS.append('oauth_access_token')
class ResUsers(models.Model):
_inherit = 'res.users'
oauth_provider_id = fields.Many2one('auth.oauth.provider', string='OAuth Provider')
oauth_uid = fields.Char(string='OAuth User ID', help="Oauth Provider user_id", copy=False)
oauth_access_token = fields.Char(string='OAuth Access Token', readonly=True, copy=False)
_sql_constraints = [
('uniq_users_oauth_provider_oauth_uid', 'unique(oauth_provider_id, oauth_uid)', 'OAuth UID must be unique per provider'),
]
@api.model
def _auth_oauth_rpc(self, endpoint, access_token):
return requests.get(endpoint, params={'access_token': access_token}).json()
@api.model
def _auth_oauth_validate(self, provider, access_token):
""" return the validation data corresponding to the access token """
oauth_provider = self.env['auth.oauth.provider'].browse(provider)
validation = self._auth_oauth_rpc(oauth_provider.validation_endpoint, access_token)
if validation.get("error"):
raise Exception(validation['error'])
if oauth_provider.data_endpoint:
data = self._auth_oauth_rpc(oauth_provider.data_endpoint, access_token)
validation.update(data)
return validation
@api.model
def _generate_signup_values(self, provider, validation, params):
oauth_uid = validation['user_id']
email = validation.get('email', 'provider_%s_user_%s' % (provider, oauth_uid))
name = validation.get('name', email)
return {
'name': name,
'login': email,
'email': email,
'oauth_provider_id': provider,
'oauth_uid': oauth_uid,
'oauth_access_token': params['access_token'],
'active': True,
}
@api.model
def _auth_oauth_signin(self, provider, validation, params):
""" retrieve and sign in the user corresponding to provider and validated access token
:param provider: oauth provider id (int)
:param validation: result of validation of access token (dict)
:param params: oauth parameters (dict)
:return: user login (str)
:raise: AccessDenied if signin failed
This method can be overridden to add alternative signin methods.
"""
oauth_uid = validation['user_id']
try:
oauth_user = self.search([("oauth_uid", "=", oauth_uid), ('oauth_provider_id', '=', provider)])
if not oauth_user:
raise AccessDenied()
assert len(oauth_user) == 1
oauth_user.write({'oauth_access_token': params['access_token']})
return oauth_user.login
except AccessDenied as access_denied_exception:
if self.env.context.get('no_user_creation'):
return None
state = json.loads(params['state'])
token = state.get('t')
values = self._generate_signup_values(provider, validation, params)
try:
_, login, _ = self.signup(values, token)
return login
except (SignupError, UserError):
raise access_denied_exception
@api.model
def auth_oauth(self, provider, params):
# Advice by Google (to avoid Confused Deputy Problem)
# if validation.audience != OUR_CLIENT_ID:
# abort()
# else:
# continue with the process
access_token = params.get('access_token')
validation = self._auth_oauth_validate(provider, access_token)
# required check
if not validation.get('user_id'):
# Workaround: facebook does not send 'user_id' in Open Graph Api
if validation.get('id'):
validation['user_id'] = validation['id']
else:
raise AccessDenied()
# retrieve and sign in user
login = self._auth_oauth_signin(provider, validation, params)
if not login:
raise AccessDenied()
# return user credentials
return (self.env.cr.dbname, login, access_token)
@api.model
def check_credentials(self, password):
try:
return super(ResUsers, self).check_credentials(password)
except AccessDenied:
res = self.sudo().search([('id', '=', self.env.uid), ('oauth_access_token', '=', password)])
if not res:
raise
def _get_session_token_fields(self):
return super(ResUsers, self)._get_session_token_fields() | {'oauth_access_token'}
| agpl-3.0 |
faux123/samsung_GS2 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
Demolisty24/AlexaFood-Backend | venv/Lib/site-packages/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| mit |
2014c2g3/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/sax/__init__.py | 637 | 3505 | """Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
from io import BytesIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
#if "PY_SAX_PARSER" in os.environ:
# default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| gpl-3.0 |
YilunZhou/Klampt | Python/klampt/src/robotsim.py | 2 | 184716 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
Klamp't Core Python bindings
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_robotsim', [dirname(__file__)])
except ImportError:
import _robotsim
return _robotsim
if fp is not None:
try:
_mod = imp.load_module('_robotsim', fp, pathname, description)
finally:
fp.close()
return _mod
_robotsim = swig_import_helper()
del swig_import_helper
else:
import _robotsim
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _robotsim.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _robotsim.SwigPyIterator_value(self)
def incr(self, n=1): return _robotsim.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _robotsim.SwigPyIterator_decr(self, n)
def distance(self, *args): return _robotsim.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _robotsim.SwigPyIterator_equal(self, *args)
def copy(self): return _robotsim.SwigPyIterator_copy(self)
def next(self): return _robotsim.SwigPyIterator_next(self)
def __next__(self): return _robotsim.SwigPyIterator___next__(self)
def previous(self): return _robotsim.SwigPyIterator_previous(self)
def advance(self, *args): return _robotsim.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _robotsim.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _robotsim.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _robotsim.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _robotsim.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _robotsim.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _robotsim.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _robotsim.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class doubleArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, doubleArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, doubleArray, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _robotsim.new_doubleArray(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_doubleArray
__del__ = lambda self : None;
def __getitem__(self, *args): return _robotsim.doubleArray___getitem__(self, *args)
def __setitem__(self, *args): return _robotsim.doubleArray___setitem__(self, *args)
def cast(self): return _robotsim.doubleArray_cast(self)
__swig_getmethods__["frompointer"] = lambda x: _robotsim.doubleArray_frompointer
if _newclass:frompointer = staticmethod(_robotsim.doubleArray_frompointer)
doubleArray_swigregister = _robotsim.doubleArray_swigregister
doubleArray_swigregister(doubleArray)
def doubleArray_frompointer(*args):
return _robotsim.doubleArray_frompointer(*args)
doubleArray_frompointer = _robotsim.doubleArray_frompointer
class floatArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, floatArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, floatArray, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _robotsim.new_floatArray(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_floatArray
__del__ = lambda self : None;
def __getitem__(self, *args): return _robotsim.floatArray___getitem__(self, *args)
def __setitem__(self, *args): return _robotsim.floatArray___setitem__(self, *args)
def cast(self): return _robotsim.floatArray_cast(self)
__swig_getmethods__["frompointer"] = lambda x: _robotsim.floatArray_frompointer
if _newclass:frompointer = staticmethod(_robotsim.floatArray_frompointer)
floatArray_swigregister = _robotsim.floatArray_swigregister
floatArray_swigregister(floatArray)
def floatArray_frompointer(*args):
return _robotsim.floatArray_frompointer(*args)
floatArray_frompointer = _robotsim.floatArray_frompointer
class intArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, intArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, intArray, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _robotsim.new_intArray(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_intArray
__del__ = lambda self : None;
def __getitem__(self, *args): return _robotsim.intArray___getitem__(self, *args)
def __setitem__(self, *args): return _robotsim.intArray___setitem__(self, *args)
def cast(self): return _robotsim.intArray_cast(self)
__swig_getmethods__["frompointer"] = lambda x: _robotsim.intArray_frompointer
if _newclass:frompointer = staticmethod(_robotsim.intArray_frompointer)
intArray_swigregister = _robotsim.intArray_swigregister
intArray_swigregister(intArray)
def intArray_frompointer(*args):
return _robotsim.intArray_frompointer(*args)
intArray_frompointer = _robotsim.intArray_frompointer
class stringVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, stringVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, stringVector, name)
__repr__ = _swig_repr
def iterator(self): return _robotsim.stringVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _robotsim.stringVector___nonzero__(self)
def __bool__(self): return _robotsim.stringVector___bool__(self)
def __len__(self): return _robotsim.stringVector___len__(self)
def pop(self): return _robotsim.stringVector_pop(self)
def __getslice__(self, *args): return _robotsim.stringVector___getslice__(self, *args)
def __setslice__(self, *args): return _robotsim.stringVector___setslice__(self, *args)
def __delslice__(self, *args): return _robotsim.stringVector___delslice__(self, *args)
def __delitem__(self, *args): return _robotsim.stringVector___delitem__(self, *args)
def __getitem__(self, *args): return _robotsim.stringVector___getitem__(self, *args)
def __setitem__(self, *args): return _robotsim.stringVector___setitem__(self, *args)
def append(self, *args): return _robotsim.stringVector_append(self, *args)
def empty(self): return _robotsim.stringVector_empty(self)
def size(self): return _robotsim.stringVector_size(self)
def clear(self): return _robotsim.stringVector_clear(self)
def swap(self, *args): return _robotsim.stringVector_swap(self, *args)
def get_allocator(self): return _robotsim.stringVector_get_allocator(self)
def begin(self): return _robotsim.stringVector_begin(self)
def end(self): return _robotsim.stringVector_end(self)
def rbegin(self): return _robotsim.stringVector_rbegin(self)
def rend(self): return _robotsim.stringVector_rend(self)
def pop_back(self): return _robotsim.stringVector_pop_back(self)
def erase(self, *args): return _robotsim.stringVector_erase(self, *args)
def __init__(self, *args):
this = _robotsim.new_stringVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _robotsim.stringVector_push_back(self, *args)
def front(self): return _robotsim.stringVector_front(self)
def back(self): return _robotsim.stringVector_back(self)
def assign(self, *args): return _robotsim.stringVector_assign(self, *args)
def resize(self, *args): return _robotsim.stringVector_resize(self, *args)
def insert(self, *args): return _robotsim.stringVector_insert(self, *args)
def reserve(self, *args): return _robotsim.stringVector_reserve(self, *args)
def capacity(self): return _robotsim.stringVector_capacity(self)
__swig_destroy__ = _robotsim.delete_stringVector
__del__ = lambda self : None;
stringVector_swigregister = _robotsim.stringVector_swigregister
stringVector_swigregister(stringVector)
class doubleVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, doubleVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, doubleVector, name)
__repr__ = _swig_repr
def iterator(self): return _robotsim.doubleVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _robotsim.doubleVector___nonzero__(self)
def __bool__(self): return _robotsim.doubleVector___bool__(self)
def __len__(self): return _robotsim.doubleVector___len__(self)
def pop(self): return _robotsim.doubleVector_pop(self)
def __getslice__(self, *args): return _robotsim.doubleVector___getslice__(self, *args)
def __setslice__(self, *args): return _robotsim.doubleVector___setslice__(self, *args)
def __delslice__(self, *args): return _robotsim.doubleVector___delslice__(self, *args)
def __delitem__(self, *args): return _robotsim.doubleVector___delitem__(self, *args)
def __getitem__(self, *args): return _robotsim.doubleVector___getitem__(self, *args)
def __setitem__(self, *args): return _robotsim.doubleVector___setitem__(self, *args)
def append(self, *args): return _robotsim.doubleVector_append(self, *args)
def empty(self): return _robotsim.doubleVector_empty(self)
def size(self): return _robotsim.doubleVector_size(self)
def clear(self): return _robotsim.doubleVector_clear(self)
def swap(self, *args): return _robotsim.doubleVector_swap(self, *args)
def get_allocator(self): return _robotsim.doubleVector_get_allocator(self)
def begin(self): return _robotsim.doubleVector_begin(self)
def end(self): return _robotsim.doubleVector_end(self)
def rbegin(self): return _robotsim.doubleVector_rbegin(self)
def rend(self): return _robotsim.doubleVector_rend(self)
def pop_back(self): return _robotsim.doubleVector_pop_back(self)
def erase(self, *args): return _robotsim.doubleVector_erase(self, *args)
def __init__(self, *args):
this = _robotsim.new_doubleVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _robotsim.doubleVector_push_back(self, *args)
def front(self): return _robotsim.doubleVector_front(self)
def back(self): return _robotsim.doubleVector_back(self)
def assign(self, *args): return _robotsim.doubleVector_assign(self, *args)
def resize(self, *args): return _robotsim.doubleVector_resize(self, *args)
def insert(self, *args): return _robotsim.doubleVector_insert(self, *args)
def reserve(self, *args): return _robotsim.doubleVector_reserve(self, *args)
def capacity(self): return _robotsim.doubleVector_capacity(self)
__swig_destroy__ = _robotsim.delete_doubleVector
__del__ = lambda self : None;
doubleVector_swigregister = _robotsim.doubleVector_swigregister
doubleVector_swigregister(doubleVector)
class floatVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, floatVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, floatVector, name)
__repr__ = _swig_repr
def iterator(self): return _robotsim.floatVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _robotsim.floatVector___nonzero__(self)
def __bool__(self): return _robotsim.floatVector___bool__(self)
def __len__(self): return _robotsim.floatVector___len__(self)
def pop(self): return _robotsim.floatVector_pop(self)
def __getslice__(self, *args): return _robotsim.floatVector___getslice__(self, *args)
def __setslice__(self, *args): return _robotsim.floatVector___setslice__(self, *args)
def __delslice__(self, *args): return _robotsim.floatVector___delslice__(self, *args)
def __delitem__(self, *args): return _robotsim.floatVector___delitem__(self, *args)
def __getitem__(self, *args): return _robotsim.floatVector___getitem__(self, *args)
def __setitem__(self, *args): return _robotsim.floatVector___setitem__(self, *args)
def append(self, *args): return _robotsim.floatVector_append(self, *args)
def empty(self): return _robotsim.floatVector_empty(self)
def size(self): return _robotsim.floatVector_size(self)
def clear(self): return _robotsim.floatVector_clear(self)
def swap(self, *args): return _robotsim.floatVector_swap(self, *args)
def get_allocator(self): return _robotsim.floatVector_get_allocator(self)
def begin(self): return _robotsim.floatVector_begin(self)
def end(self): return _robotsim.floatVector_end(self)
def rbegin(self): return _robotsim.floatVector_rbegin(self)
def rend(self): return _robotsim.floatVector_rend(self)
def pop_back(self): return _robotsim.floatVector_pop_back(self)
def erase(self, *args): return _robotsim.floatVector_erase(self, *args)
def __init__(self, *args):
this = _robotsim.new_floatVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _robotsim.floatVector_push_back(self, *args)
def front(self): return _robotsim.floatVector_front(self)
def back(self): return _robotsim.floatVector_back(self)
def assign(self, *args): return _robotsim.floatVector_assign(self, *args)
def resize(self, *args): return _robotsim.floatVector_resize(self, *args)
def insert(self, *args): return _robotsim.floatVector_insert(self, *args)
def reserve(self, *args): return _robotsim.floatVector_reserve(self, *args)
def capacity(self): return _robotsim.floatVector_capacity(self)
__swig_destroy__ = _robotsim.delete_floatVector
__del__ = lambda self : None;
floatVector_swigregister = _robotsim.floatVector_swigregister
floatVector_swigregister(floatVector)
class intVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, intVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, intVector, name)
__repr__ = _swig_repr
def iterator(self): return _robotsim.intVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _robotsim.intVector___nonzero__(self)
def __bool__(self): return _robotsim.intVector___bool__(self)
def __len__(self): return _robotsim.intVector___len__(self)
def pop(self): return _robotsim.intVector_pop(self)
def __getslice__(self, *args): return _robotsim.intVector___getslice__(self, *args)
def __setslice__(self, *args): return _robotsim.intVector___setslice__(self, *args)
def __delslice__(self, *args): return _robotsim.intVector___delslice__(self, *args)
def __delitem__(self, *args): return _robotsim.intVector___delitem__(self, *args)
def __getitem__(self, *args): return _robotsim.intVector___getitem__(self, *args)
def __setitem__(self, *args): return _robotsim.intVector___setitem__(self, *args)
def append(self, *args): return _robotsim.intVector_append(self, *args)
def empty(self): return _robotsim.intVector_empty(self)
def size(self): return _robotsim.intVector_size(self)
def clear(self): return _robotsim.intVector_clear(self)
def swap(self, *args): return _robotsim.intVector_swap(self, *args)
def get_allocator(self): return _robotsim.intVector_get_allocator(self)
def begin(self): return _robotsim.intVector_begin(self)
def end(self): return _robotsim.intVector_end(self)
def rbegin(self): return _robotsim.intVector_rbegin(self)
def rend(self): return _robotsim.intVector_rend(self)
def pop_back(self): return _robotsim.intVector_pop_back(self)
def erase(self, *args): return _robotsim.intVector_erase(self, *args)
def __init__(self, *args):
this = _robotsim.new_intVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _robotsim.intVector_push_back(self, *args)
def front(self): return _robotsim.intVector_front(self)
def back(self): return _robotsim.intVector_back(self)
def assign(self, *args): return _robotsim.intVector_assign(self, *args)
def resize(self, *args): return _robotsim.intVector_resize(self, *args)
def insert(self, *args): return _robotsim.intVector_insert(self, *args)
def reserve(self, *args): return _robotsim.intVector_reserve(self, *args)
def capacity(self): return _robotsim.intVector_capacity(self)
__swig_destroy__ = _robotsim.delete_intVector
__del__ = lambda self : None;
intVector_swigregister = _robotsim.intVector_swigregister
intVector_swigregister(intVector)
class doubleMatrix(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, doubleMatrix, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, doubleMatrix, name)
__repr__ = _swig_repr
def iterator(self): return _robotsim.doubleMatrix_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _robotsim.doubleMatrix___nonzero__(self)
def __bool__(self): return _robotsim.doubleMatrix___bool__(self)
def __len__(self): return _robotsim.doubleMatrix___len__(self)
def pop(self): return _robotsim.doubleMatrix_pop(self)
def __getslice__(self, *args): return _robotsim.doubleMatrix___getslice__(self, *args)
def __setslice__(self, *args): return _robotsim.doubleMatrix___setslice__(self, *args)
def __delslice__(self, *args): return _robotsim.doubleMatrix___delslice__(self, *args)
def __delitem__(self, *args): return _robotsim.doubleMatrix___delitem__(self, *args)
def __getitem__(self, *args): return _robotsim.doubleMatrix___getitem__(self, *args)
def __setitem__(self, *args): return _robotsim.doubleMatrix___setitem__(self, *args)
def append(self, *args): return _robotsim.doubleMatrix_append(self, *args)
def empty(self): return _robotsim.doubleMatrix_empty(self)
def size(self): return _robotsim.doubleMatrix_size(self)
def clear(self): return _robotsim.doubleMatrix_clear(self)
def swap(self, *args): return _robotsim.doubleMatrix_swap(self, *args)
def get_allocator(self): return _robotsim.doubleMatrix_get_allocator(self)
def begin(self): return _robotsim.doubleMatrix_begin(self)
def end(self): return _robotsim.doubleMatrix_end(self)
def rbegin(self): return _robotsim.doubleMatrix_rbegin(self)
def rend(self): return _robotsim.doubleMatrix_rend(self)
def pop_back(self): return _robotsim.doubleMatrix_pop_back(self)
def erase(self, *args): return _robotsim.doubleMatrix_erase(self, *args)
def __init__(self, *args):
this = _robotsim.new_doubleMatrix(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _robotsim.doubleMatrix_push_back(self, *args)
def front(self): return _robotsim.doubleMatrix_front(self)
def back(self): return _robotsim.doubleMatrix_back(self)
def assign(self, *args): return _robotsim.doubleMatrix_assign(self, *args)
def resize(self, *args): return _robotsim.doubleMatrix_resize(self, *args)
def insert(self, *args): return _robotsim.doubleMatrix_insert(self, *args)
def reserve(self, *args): return _robotsim.doubleMatrix_reserve(self, *args)
def capacity(self): return _robotsim.doubleMatrix_capacity(self)
__swig_destroy__ = _robotsim.delete_doubleMatrix
__del__ = lambda self : None;
doubleMatrix_swigregister = _robotsim.doubleMatrix_swigregister
doubleMatrix_swigregister(doubleMatrix)
class TriangleMesh(_object):
"""
A 3D indexed triangle mesh class.
Attributes: vertices: a list of vertices, given as a flattened
coordinate list [x1, y1, z1, x2, y2, ...]
indices: a list of triangle vertices given as indices into the
vertices list, i.e., [a1,b1,c2, a2,b2,c2, ...]
Note: because the bindings are generated by SWIG, you can access the
indices / vertices members via some automatically generated accessors
/ modifiers. In particular len(), append(), and indexing via [] are
useful. Some other methods like resize() are also provided. However,
you CANNOT set these items via assignment.
Examples:
m = TriangleMesh() m.vertices.append(0) m.vertices.append(0)
m.vertices.append(0) print len(m.vertices) #prints 3 m.vertices =
[0,0,0] #this is an error m.vertices += [1,2,3] #this is also an error
C++ includes: geometry.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TriangleMesh, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TriangleMesh, name)
__repr__ = _swig_repr
def translate(self, *args):
"""
translate(TriangleMesh self, double const [3] t)
Translates all the vertices by v=v+t.
"""
return _robotsim.TriangleMesh_translate(self, *args)
def transform(self, *args):
"""
transform(TriangleMesh self, double const [9] R, double const [3] t)
Transforms all the vertices by the rigid transform v=R*v+t.
"""
return _robotsim.TriangleMesh_transform(self, *args)
__swig_setmethods__["indices"] = _robotsim.TriangleMesh_indices_set
__swig_getmethods__["indices"] = _robotsim.TriangleMesh_indices_get
if _newclass:indices = _swig_property(_robotsim.TriangleMesh_indices_get, _robotsim.TriangleMesh_indices_set)
__swig_setmethods__["vertices"] = _robotsim.TriangleMesh_vertices_set
__swig_getmethods__["vertices"] = _robotsim.TriangleMesh_vertices_get
if _newclass:vertices = _swig_property(_robotsim.TriangleMesh_vertices_get, _robotsim.TriangleMesh_vertices_set)
def __init__(self):
"""
__init__(TriangleMesh self) -> TriangleMesh
A 3D indexed triangle mesh class.
Attributes: vertices: a list of vertices, given as a flattened
coordinate list [x1, y1, z1, x2, y2, ...]
indices: a list of triangle vertices given as indices into the
vertices list, i.e., [a1,b1,c2, a2,b2,c2, ...]
Note: because the bindings are generated by SWIG, you can access the
indices / vertices members via some automatically generated accessors
/ modifiers. In particular len(), append(), and indexing via [] are
useful. Some other methods like resize() are also provided. However,
you CANNOT set these items via assignment.
Examples:
m = TriangleMesh() m.vertices.append(0) m.vertices.append(0)
m.vertices.append(0) print len(m.vertices) #prints 3 m.vertices =
[0,0,0] #this is an error m.vertices += [1,2,3] #this is also an error
C++ includes: geometry.h
"""
this = _robotsim.new_TriangleMesh()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_TriangleMesh
__del__ = lambda self : None;
TriangleMesh_swigregister = _robotsim.TriangleMesh_swigregister
TriangleMesh_swigregister(TriangleMesh)
class PointCloud(_object):
"""
A 3D point cloud class.
Attributes: vertices: a list of vertices, given as a list [x1, y1, z1,
x2, y2, ... zn]
properties: a list of vertex properties, given as a list [p11, p21,
..., pk1, p12, p22, ..., pk2, ... , pn1, pn2, ..., pn2] where each
vertex has k properties. The name of each property is given by the
propertyNames member.
Note: because the bindings are generated by SWIG, you can access the
vertices/properties/propertyName members via some automatically
generated accessors / modifiers. In particular len(), append(), and
indexing via [] are useful. Some other methods like resize() are also
provided. However, you CANNOT set these items via assignment.
Examples:
pc = PointCloud() pc.propertyNames.append('rgb') pc.vertices.append(0)
pc.vertices.append(0) pc.vertices.append(0) pc.properties.append(0)
print len(pc.vertices) #prints 3 print pc.numPoints() #prints 1
pc.addPoint([1,2,3]) print pc.numPoints() #prints 2 print
len(pc.properties.size()) #prints 2: 1 property category x 2 points
print pc.getProperty(1,0) #prints 0; this is the default value added
when addPoint is called
C++ includes: geometry.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PointCloud, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PointCloud, name)
__repr__ = _swig_repr
def numPoints(self):
"""
numPoints(PointCloud self) -> int
Returns the number of points.
"""
return _robotsim.PointCloud_numPoints(self)
def numProperties(self):
"""
numProperties(PointCloud self) -> int
Returns the number of properties.
"""
return _robotsim.PointCloud_numProperties(self)
def setPoints(self, *args):
"""
setPoints(PointCloud self, int num, doubleVector plist)
Sets all the points to the given list (a 3n-list)
"""
return _robotsim.PointCloud_setPoints(self, *args)
def addPoint(self, *args):
"""
addPoint(PointCloud self, double const [3] p) -> int
Adds a point. Sets all its properties to 0. Returns the index.
"""
return _robotsim.PointCloud_addPoint(self, *args)
def setPoint(self, *args):
"""
setPoint(PointCloud self, int index, double const [3] p)
Sets the position of the point at the given index to p.
"""
return _robotsim.PointCloud_setPoint(self, *args)
def getPoint(self, *args):
"""
getPoint(PointCloud self, int index)
Retrieves the position of the point at the given index.
"""
return _robotsim.PointCloud_getPoint(self, *args)
def addProperty(self, *args):
"""
addProperty(PointCloud self, std::string const & pname)
addProperty(PointCloud self, std::string const & pname, doubleVector properties)
Adds a new property with name pname, and sets values for this property
to the given list (a n-list)
"""
return _robotsim.PointCloud_addProperty(self, *args)
def setProperties(self, *args):
"""
setProperties(PointCloud self, doubleVector properties)
setProperties(PointCloud self, int pindex, doubleVector properties)
Sets property pindex of all points to the given list (a n-list)
"""
return _robotsim.PointCloud_setProperties(self, *args)
def setProperty(self, *args):
"""
setProperty(PointCloud self, int index, int pindex, double value)
setProperty(PointCloud self, int index, std::string const & pname, double value)
Sets the property named pname of point index to the given value.
"""
return _robotsim.PointCloud_setProperty(self, *args)
def getProperty(self, *args):
"""
getProperty(PointCloud self, int index, int pindex) -> double
getProperty(PointCloud self, int index, std::string const & pname) -> double
Gets the property named pname of point index.
"""
return _robotsim.PointCloud_getProperty(self, *args)
def translate(self, *args):
"""
translate(PointCloud self, double const [3] t)
Translates all the points by v=v+t.
"""
return _robotsim.PointCloud_translate(self, *args)
def transform(self, *args):
"""
transform(PointCloud self, double const [9] R, double const [3] t)
Transforms all the points by the rigid transform v=R*v+t.
"""
return _robotsim.PointCloud_transform(self, *args)
def join(self, *args):
"""
join(PointCloud self, PointCloud pc)
Adds the given point cloud to this one. They must share the same
properties or else an exception is raised.
"""
return _robotsim.PointCloud_join(self, *args)
def setSetting(self, *args):
"""
setSetting(PointCloud self, std::string const & key, std::string const & value)
Sets the given setting.
"""
return _robotsim.PointCloud_setSetting(self, *args)
def getSetting(self, *args):
"""
getSetting(PointCloud self, std::string const & key) -> std::string
Retrieves the given setting.
"""
return _robotsim.PointCloud_getSetting(self, *args)
__swig_setmethods__["vertices"] = _robotsim.PointCloud_vertices_set
__swig_getmethods__["vertices"] = _robotsim.PointCloud_vertices_get
if _newclass:vertices = _swig_property(_robotsim.PointCloud_vertices_get, _robotsim.PointCloud_vertices_set)
__swig_setmethods__["propertyNames"] = _robotsim.PointCloud_propertyNames_set
__swig_getmethods__["propertyNames"] = _robotsim.PointCloud_propertyNames_get
if _newclass:propertyNames = _swig_property(_robotsim.PointCloud_propertyNames_get, _robotsim.PointCloud_propertyNames_set)
__swig_setmethods__["properties"] = _robotsim.PointCloud_properties_set
__swig_getmethods__["properties"] = _robotsim.PointCloud_properties_get
if _newclass:properties = _swig_property(_robotsim.PointCloud_properties_get, _robotsim.PointCloud_properties_set)
__swig_setmethods__["settings"] = _robotsim.PointCloud_settings_set
__swig_getmethods__["settings"] = _robotsim.PointCloud_settings_get
if _newclass:settings = _swig_property(_robotsim.PointCloud_settings_get, _robotsim.PointCloud_settings_set)
def __init__(self):
"""
__init__(PointCloud self) -> PointCloud
A 3D point cloud class.
Attributes: vertices: a list of vertices, given as a list [x1, y1, z1,
x2, y2, ... zn]
properties: a list of vertex properties, given as a list [p11, p21,
..., pk1, p12, p22, ..., pk2, ... , pn1, pn2, ..., pn2] where each
vertex has k properties. The name of each property is given by the
propertyNames member.
Note: because the bindings are generated by SWIG, you can access the
vertices/properties/propertyName members via some automatically
generated accessors / modifiers. In particular len(), append(), and
indexing via [] are useful. Some other methods like resize() are also
provided. However, you CANNOT set these items via assignment.
Examples:
pc = PointCloud() pc.propertyNames.append('rgb') pc.vertices.append(0)
pc.vertices.append(0) pc.vertices.append(0) pc.properties.append(0)
print len(pc.vertices) #prints 3 print pc.numPoints() #prints 1
pc.addPoint([1,2,3]) print pc.numPoints() #prints 2 print
len(pc.properties.size()) #prints 2: 1 property category x 2 points
print pc.getProperty(1,0) #prints 0; this is the default value added
when addPoint is called
C++ includes: geometry.h
"""
this = _robotsim.new_PointCloud()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_PointCloud
__del__ = lambda self : None;
PointCloud_swigregister = _robotsim.PointCloud_swigregister
PointCloud_swigregister(PointCloud)
class GeometricPrimitive(_object):
"""
A geometric primitive. So far only points, spheres, segments, and
AABBs can be constructed manually in the Python API.
C++ includes: geometry.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GeometricPrimitive, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GeometricPrimitive, name)
__repr__ = _swig_repr
def setPoint(self, *args):
"""setPoint(GeometricPrimitive self, double const [3] pt)"""
return _robotsim.GeometricPrimitive_setPoint(self, *args)
def setSphere(self, *args):
"""setSphere(GeometricPrimitive self, double const [3] c, double r)"""
return _robotsim.GeometricPrimitive_setSphere(self, *args)
def setSegment(self, *args):
"""setSegment(GeometricPrimitive self, double const [3] a, double const [3] b)"""
return _robotsim.GeometricPrimitive_setSegment(self, *args)
def setAABB(self, *args):
"""setAABB(GeometricPrimitive self, double const [3] bmin, double const [3] bmax)"""
return _robotsim.GeometricPrimitive_setAABB(self, *args)
def loadString(self, *args):
"""loadString(GeometricPrimitive self, char const * str) -> bool"""
return _robotsim.GeometricPrimitive_loadString(self, *args)
def saveString(self):
"""saveString(GeometricPrimitive self) -> std::string"""
return _robotsim.GeometricPrimitive_saveString(self)
__swig_setmethods__["type"] = _robotsim.GeometricPrimitive_type_set
__swig_getmethods__["type"] = _robotsim.GeometricPrimitive_type_get
if _newclass:type = _swig_property(_robotsim.GeometricPrimitive_type_get, _robotsim.GeometricPrimitive_type_set)
__swig_setmethods__["properties"] = _robotsim.GeometricPrimitive_properties_set
__swig_getmethods__["properties"] = _robotsim.GeometricPrimitive_properties_get
if _newclass:properties = _swig_property(_robotsim.GeometricPrimitive_properties_get, _robotsim.GeometricPrimitive_properties_set)
def __init__(self):
"""
__init__(GeometricPrimitive self) -> GeometricPrimitive
A geometric primitive. So far only points, spheres, segments, and
AABBs can be constructed manually in the Python API.
C++ includes: geometry.h
"""
this = _robotsim.new_GeometricPrimitive()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_GeometricPrimitive
__del__ = lambda self : None;
GeometricPrimitive_swigregister = _robotsim.GeometricPrimitive_swigregister
GeometricPrimitive_swigregister(GeometricPrimitive)
class Geometry3D(_object):
"""
A three-D geometry. Can either be a reference to a world item's
geometry, in which case modifiers change the world item's geometry, or
it can be a standalone geometry.
There are four currently supported types of geometry: primitives (
GeometricPrimitive)
triangle meshes ( TriangleMesh)
point clouds ( PointCloud)
groups (Group) This class acts as a uniform container of all of these
types.
Each geometry stores a "current" transform, which is automatically
updated for world items' geometries. The proximity queries are
performed with respect to the transformed geometries (note the
underlying geometry is not changed, which could be computationally
expensive. The query is performed, however, as though they were).
If you want to set a world item's geometry to be equal to a standalone
geometry, use the set(rhs) function rather than the assignment (=)
operator.
Modifiers include any setX() functions, translate(), and transform().
Proximity queries include collides(), withinDistance(), distance(),
closestPoint(), and rayCast().
Each object also has a "collision margin" which may virtually fatten
the object, as far as proximity queries are concerned. This is useful
for setting collision avoidance margins in motion planning. By default
it is zero. (Note that this is NOT the same thing as simulation body
collision padding!)
C++ includes: geometry.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Geometry3D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Geometry3D, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(Geometry3D self) -> Geometry3D
__init__(Geometry3D self, Geometry3D arg2) -> Geometry3D
__init__(Geometry3D self, GeometricPrimitive arg2) -> Geometry3D
__init__(Geometry3D self, TriangleMesh arg2) -> Geometry3D
__init__(Geometry3D self, PointCloud arg2) -> Geometry3D
"""
this = _robotsim.new_Geometry3D(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_Geometry3D
__del__ = lambda self : None;
def clone(self):
"""
clone(Geometry3D self) -> Geometry3D
Creates a standalone geometry from this geometry.
"""
return _robotsim.Geometry3D_clone(self)
def set(self, *args):
"""
set(Geometry3D self, Geometry3D arg2)
Copies the geometry of the argument into this geometry.
"""
return _robotsim.Geometry3D_set(self, *args)
def isStandalone(self):
"""
isStandalone(Geometry3D self) -> bool
Returns true if this is a standalone geometry.
"""
return _robotsim.Geometry3D_isStandalone(self)
def free(self):
"""
free(Geometry3D self)
Frees the data associated with this geometry, if standalone.
"""
return _robotsim.Geometry3D_free(self)
def type(self):
"""
type(Geometry3D self) -> std::string
Returns the type of geometry: TriangleMesh, PointCloud, or
GeometricPrimitive.
"""
return _robotsim.Geometry3D_type(self)
def empty(self):
"""
empty(Geometry3D self) -> bool
Returns true if this has no contents (not the same as
numElements()==0)
"""
return _robotsim.Geometry3D_empty(self)
def getTriangleMesh(self):
"""
getTriangleMesh(Geometry3D self) -> TriangleMesh
Returns a TriangleMesh if this geometry is of type TriangleMesh.
"""
return _robotsim.Geometry3D_getTriangleMesh(self)
def getPointCloud(self):
"""
getPointCloud(Geometry3D self) -> PointCloud
Returns a PointCloud if this geometry is of type PointCloud.
"""
return _robotsim.Geometry3D_getPointCloud(self)
def getGeometricPrimitive(self):
"""
getGeometricPrimitive(Geometry3D self) -> GeometricPrimitive
Returns a GeometricPrimitive if this geometry is of type
GeometricPrimitive.
"""
return _robotsim.Geometry3D_getGeometricPrimitive(self)
def setTriangleMesh(self, *args):
"""
setTriangleMesh(Geometry3D self, TriangleMesh arg2)
Sets this Geometry3D to a TriangleMesh.
"""
return _robotsim.Geometry3D_setTriangleMesh(self, *args)
def setPointCloud(self, *args):
"""
setPointCloud(Geometry3D self, PointCloud arg2)
Sets this Geometry3D to a PointCloud.
"""
return _robotsim.Geometry3D_setPointCloud(self, *args)
def setGeometricPrimitive(self, *args):
"""
setGeometricPrimitive(Geometry3D self, GeometricPrimitive arg2)
Sets this Geometry3D to a GeometricPrimitive.
"""
return _robotsim.Geometry3D_setGeometricPrimitive(self, *args)
def setGroup(self):
"""
setGroup(Geometry3D self)
Sets this Geometry3D to a group geometry. To add sub-geometries,
repeatedly call setElement()
"""
return _robotsim.Geometry3D_setGroup(self)
def getElement(self, *args):
"""
getElement(Geometry3D self, int element) -> Geometry3D
Returns an element of the Geometry3D if it is a group. Raises an error
if this is of any other type.
"""
return _robotsim.Geometry3D_getElement(self, *args)
def setElement(self, *args):
"""
setElement(Geometry3D self, int element, Geometry3D data)
Sets an element of the Geometry3D if it is a group. Raises an error if
this is of any other type.
"""
return _robotsim.Geometry3D_setElement(self, *args)
def numElements(self):
"""
numElements(Geometry3D self) -> int
Returns the number of sub-elements in this geometry.
"""
return _robotsim.Geometry3D_numElements(self)
def loadFile(self, *args):
"""
loadFile(Geometry3D self, char const * fn) -> bool
Loads from file. Standard mesh types, PCD files, and .geom files are
supported.
"""
return _robotsim.Geometry3D_loadFile(self, *args)
def saveFile(self, *args):
"""
saveFile(Geometry3D self, char const * fn) -> bool
Saves to file. Standard mesh types, PCD files, and .geom files are
supported.
"""
return _robotsim.Geometry3D_saveFile(self, *args)
def attachToStream(self, *args):
"""
attachToStream(Geometry3D self, char const * protocol, char const * name, char const * type="") -> bool
attachToStream(Geometry3D self, char const * protocol, char const * name) -> bool
Attaches this geometry to a given stream.
Currently only "ros" protocol is supported. For "ros" protocol,
name is the ROS topic to attach to. type indicates the datatype that
the stream source should have, and this will return false if that type
is not obeyed. Currently only the "PointCloud" or default empty
("") types are supported.
Note: you will need to call Appearance.refresh(True) to get the
appearance to update.
"""
return _robotsim.Geometry3D_attachToStream(self, *args)
def detachFromStream(self, *args):
"""
detachFromStream(Geometry3D self, char const * protocol, char const * name) -> bool
Detaches this geometry from a given stream. This must be called before
deleting a piece of geometry.
"""
return _robotsim.Geometry3D_detachFromStream(self, *args)
def setCurrentTransform(self, *args):
"""
setCurrentTransform(Geometry3D self, double const [9] R, double const [3] t)
Sets the current transformation (not modifying the underlying data)
"""
return _robotsim.Geometry3D_setCurrentTransform(self, *args)
def getCurrentTransform(self):
"""
getCurrentTransform(Geometry3D self)
Gets the current transformation.
"""
return _robotsim.Geometry3D_getCurrentTransform(self)
def translate(self, *args):
"""
translate(Geometry3D self, double const [3] t)
Translates the geometry data. Permanently modifies the data and resets
any collision data structures.
"""
return _robotsim.Geometry3D_translate(self, *args)
def scale(self, *args):
"""
scale(Geometry3D self, double s)
scale(Geometry3D self, double sx, double sy, double sz)
Scales the geometry data with different factors on each axis.
Permanently modifies the data and resets any collision data
structures.
"""
return _robotsim.Geometry3D_scale(self, *args)
def rotate(self, *args):
"""
rotate(Geometry3D self, double const [9] R)
Rotates the geometry data. Permanently modifies the data and resets
any collision data structures.
"""
return _robotsim.Geometry3D_rotate(self, *args)
def transform(self, *args):
"""
transform(Geometry3D self, double const [9] R, double const [3] t)
Translates/rotates/scales the geometry data. Permanently modifies the
data and resets any collision data structures.
"""
return _robotsim.Geometry3D_transform(self, *args)
def setCollisionMargin(self, *args):
"""
setCollisionMargin(Geometry3D self, double margin)
Sets a padding around the base geometry which affects the results of
proximity queries.
"""
return _robotsim.Geometry3D_setCollisionMargin(self, *args)
def getCollisionMargin(self):
"""
getCollisionMargin(Geometry3D self) -> double
Returns the padding around the base geometry. Default 0.
"""
return _robotsim.Geometry3D_getCollisionMargin(self)
def getBB(self):
"""
getBB(Geometry3D self)
Returns the axis-aligned bounding box of the object.
"""
return _robotsim.Geometry3D_getBB(self)
def collides(self, *args):
"""
collides(Geometry3D self, Geometry3D other) -> bool
Returns true if this geometry collides with the other.
"""
return _robotsim.Geometry3D_collides(self, *args)
def withinDistance(self, *args):
"""
withinDistance(Geometry3D self, Geometry3D other, double tol) -> bool
Returns true if this geometry is within distance tol to other.
"""
return _robotsim.Geometry3D_withinDistance(self, *args)
def distance(self, *args):
"""
distance(Geometry3D self, Geometry3D other, double relErr=0, double absErr=0) -> double
distance(Geometry3D self, Geometry3D other, double relErr=0) -> double
distance(Geometry3D self, Geometry3D other) -> double
Returns the distance from this geometry to the other.
"""
return _robotsim.Geometry3D_distance(self, *args)
def closestPoint(self, *args):
"""
closestPoint(Geometry3D self, double const [3] pt) -> bool
Returns (success,cp) giving the closest point to the input point.
success is false if that operation is not supported with the given
geometry type. cp are given in world coordinates.
"""
return _robotsim.Geometry3D_closestPoint(self, *args)
def rayCast(self, *args):
"""
rayCast(Geometry3D self, double const [3] s, double const [3] d) -> bool
Returns (hit,pt) where hit is true if the ray starting at s and
pointing in direction d hits the geometry (given in world
coordinates); pt is the hit point, in world coordinates.
"""
return _robotsim.Geometry3D_rayCast(self, *args)
__swig_setmethods__["world"] = _robotsim.Geometry3D_world_set
__swig_getmethods__["world"] = _robotsim.Geometry3D_world_get
if _newclass:world = _swig_property(_robotsim.Geometry3D_world_get, _robotsim.Geometry3D_world_set)
__swig_setmethods__["id"] = _robotsim.Geometry3D_id_set
__swig_getmethods__["id"] = _robotsim.Geometry3D_id_get
if _newclass:id = _swig_property(_robotsim.Geometry3D_id_get, _robotsim.Geometry3D_id_set)
__swig_setmethods__["geomPtr"] = _robotsim.Geometry3D_geomPtr_set
__swig_getmethods__["geomPtr"] = _robotsim.Geometry3D_geomPtr_get
if _newclass:geomPtr = _swig_property(_robotsim.Geometry3D_geomPtr_get, _robotsim.Geometry3D_geomPtr_set)
Geometry3D_swigregister = _robotsim.Geometry3D_swigregister
Geometry3D_swigregister(Geometry3D)
class Appearance(_object):
"""Proxy of C++ Appearance class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Appearance, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Appearance, name)
__repr__ = _swig_repr
ALL = _robotsim.Appearance_ALL
VERTICES = _robotsim.Appearance_VERTICES
EDGES = _robotsim.Appearance_EDGES
FACES = _robotsim.Appearance_FACES
def __init__(self, *args):
"""
__init__(Appearance self) -> Appearance
__init__(Appearance self, Appearance app) -> Appearance
"""
this = _robotsim.new_Appearance(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_Appearance
__del__ = lambda self : None;
def refresh(self, deep=True):
"""
refresh(Appearance self, bool deep=True)
refresh(Appearance self)
"""
return _robotsim.Appearance_refresh(self, deep)
def clone(self):
"""clone(Appearance self) -> Appearance"""
return _robotsim.Appearance_clone(self)
def set(self, *args):
"""set(Appearance self, Appearance arg2)"""
return _robotsim.Appearance_set(self, *args)
def isStandalone(self):
"""isStandalone(Appearance self) -> bool"""
return _robotsim.Appearance_isStandalone(self)
def free(self):
"""free(Appearance self)"""
return _robotsim.Appearance_free(self)
def setDraw(self, *args):
"""
setDraw(Appearance self, bool draw)
setDraw(Appearance self, int primitive, bool draw)
"""
return _robotsim.Appearance_setDraw(self, *args)
def getDraw(self, *args):
"""
getDraw(Appearance self) -> bool
getDraw(Appearance self, int primitive) -> bool
"""
return _robotsim.Appearance_getDraw(self, *args)
def setColor(self, *args):
"""
setColor(Appearance self, float r, float g, float b, float a=1)
setColor(Appearance self, float r, float g, float b)
setColor(Appearance self, int primitive, float r, float g, float b, float a)
"""
return _robotsim.Appearance_setColor(self, *args)
def getColor(self, *args):
"""
getColor(Appearance self)
getColor(Appearance self, int primitive)
"""
return _robotsim.Appearance_getColor(self, *args)
def setColors(self, *args):
"""
setColors(Appearance self, int primitive, floatVector colors, bool alpha=False)
setColors(Appearance self, int primitive, floatVector colors)
"""
return _robotsim.Appearance_setColors(self, *args)
def setTexture1D(self, *args):
"""setTexture1D(Appearance self, int w, char const * format, std::vector< unsigned char,std::allocator< unsigned char > > const & bytes)"""
return _robotsim.Appearance_setTexture1D(self, *args)
def setTexture2D(self, *args):
"""setTexture2D(Appearance self, int w, int h, char const * format, std::vector< unsigned char,std::allocator< unsigned char > > const & bytes)"""
return _robotsim.Appearance_setTexture2D(self, *args)
def setTexcoords(self, *args):
"""setTexcoords(Appearance self, doubleVector uvs)"""
return _robotsim.Appearance_setTexcoords(self, *args)
def setPointSize(self, *args):
"""setPointSize(Appearance self, float size)"""
return _robotsim.Appearance_setPointSize(self, *args)
def drawGL(self, *args):
"""
drawGL(Appearance self)
drawGL(Appearance self, Geometry3D geom)
"""
return _robotsim.Appearance_drawGL(self, *args)
def drawWorldGL(self, *args):
"""drawWorldGL(Appearance self, Geometry3D geom)"""
return _robotsim.Appearance_drawWorldGL(self, *args)
__swig_setmethods__["world"] = _robotsim.Appearance_world_set
__swig_getmethods__["world"] = _robotsim.Appearance_world_get
if _newclass:world = _swig_property(_robotsim.Appearance_world_get, _robotsim.Appearance_world_set)
__swig_setmethods__["id"] = _robotsim.Appearance_id_set
__swig_getmethods__["id"] = _robotsim.Appearance_id_get
if _newclass:id = _swig_property(_robotsim.Appearance_id_get, _robotsim.Appearance_id_set)
__swig_setmethods__["appearancePtr"] = _robotsim.Appearance_appearancePtr_set
__swig_getmethods__["appearancePtr"] = _robotsim.Appearance_appearancePtr_get
if _newclass:appearancePtr = _swig_property(_robotsim.Appearance_appearancePtr_get, _robotsim.Appearance_appearancePtr_set)
Appearance_swigregister = _robotsim.Appearance_swigregister
Appearance_swigregister(Appearance)
class Viewport(_object):
"""Proxy of C++ Viewport class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Viewport, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Viewport, name)
__repr__ = _swig_repr
def fromJson(self, *args):
"""fromJson(Viewport self, std::string const & str) -> bool"""
return _robotsim.Viewport_fromJson(self, *args)
def toJson(self):
"""toJson(Viewport self) -> std::string"""
return _robotsim.Viewport_toJson(self)
def setModelviewMatrix(self, *args):
"""setModelviewMatrix(Viewport self, double const [16] M)"""
return _robotsim.Viewport_setModelviewMatrix(self, *args)
def setRigidTransform(self, *args):
"""setRigidTransform(Viewport self, double const [9] R, double const [3] t)"""
return _robotsim.Viewport_setRigidTransform(self, *args)
def getRigidTransform(self):
"""getRigidTransform(Viewport self)"""
return _robotsim.Viewport_getRigidTransform(self)
__swig_setmethods__["perspective"] = _robotsim.Viewport_perspective_set
__swig_getmethods__["perspective"] = _robotsim.Viewport_perspective_get
if _newclass:perspective = _swig_property(_robotsim.Viewport_perspective_get, _robotsim.Viewport_perspective_set)
__swig_setmethods__["scale"] = _robotsim.Viewport_scale_set
__swig_getmethods__["scale"] = _robotsim.Viewport_scale_get
if _newclass:scale = _swig_property(_robotsim.Viewport_scale_get, _robotsim.Viewport_scale_set)
__swig_setmethods__["x"] = _robotsim.Viewport_x_set
__swig_getmethods__["x"] = _robotsim.Viewport_x_get
if _newclass:x = _swig_property(_robotsim.Viewport_x_get, _robotsim.Viewport_x_set)
__swig_setmethods__["y"] = _robotsim.Viewport_y_set
__swig_getmethods__["y"] = _robotsim.Viewport_y_get
if _newclass:y = _swig_property(_robotsim.Viewport_y_get, _robotsim.Viewport_y_set)
__swig_setmethods__["w"] = _robotsim.Viewport_w_set
__swig_getmethods__["w"] = _robotsim.Viewport_w_get
if _newclass:w = _swig_property(_robotsim.Viewport_w_get, _robotsim.Viewport_w_set)
__swig_setmethods__["h"] = _robotsim.Viewport_h_set
__swig_getmethods__["h"] = _robotsim.Viewport_h_get
if _newclass:h = _swig_property(_robotsim.Viewport_h_get, _robotsim.Viewport_h_set)
__swig_setmethods__["n"] = _robotsim.Viewport_n_set
__swig_getmethods__["n"] = _robotsim.Viewport_n_get
if _newclass:n = _swig_property(_robotsim.Viewport_n_get, _robotsim.Viewport_n_set)
__swig_setmethods__["f"] = _robotsim.Viewport_f_set
__swig_getmethods__["f"] = _robotsim.Viewport_f_get
if _newclass:f = _swig_property(_robotsim.Viewport_f_get, _robotsim.Viewport_f_set)
__swig_setmethods__["xform"] = _robotsim.Viewport_xform_set
__swig_getmethods__["xform"] = _robotsim.Viewport_xform_get
if _newclass:xform = _swig_property(_robotsim.Viewport_xform_get, _robotsim.Viewport_xform_set)
def __init__(self):
"""__init__(Viewport self) -> Viewport"""
this = _robotsim.new_Viewport()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_Viewport
__del__ = lambda self : None;
Viewport_swigregister = _robotsim.Viewport_swigregister
Viewport_swigregister(Viewport)
class Widget(_object):
"""Proxy of C++ Widget class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Widget, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Widget, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(Widget self) -> Widget"""
this = _robotsim.new_Widget()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_Widget
__del__ = lambda self : None;
def hover(self, *args):
"""hover(Widget self, int x, int y, Viewport viewport) -> bool"""
return _robotsim.Widget_hover(self, *args)
def beginDrag(self, *args):
"""beginDrag(Widget self, int x, int y, Viewport viewport) -> bool"""
return _robotsim.Widget_beginDrag(self, *args)
def drag(self, *args):
"""drag(Widget self, int dx, int dy, Viewport viewport)"""
return _robotsim.Widget_drag(self, *args)
def endDrag(self):
"""endDrag(Widget self)"""
return _robotsim.Widget_endDrag(self)
def keypress(self, *args):
"""keypress(Widget self, char c)"""
return _robotsim.Widget_keypress(self, *args)
def drawGL(self, *args):
"""drawGL(Widget self, Viewport viewport)"""
return _robotsim.Widget_drawGL(self, *args)
def idle(self):
"""idle(Widget self)"""
return _robotsim.Widget_idle(self)
def wantsRedraw(self):
"""wantsRedraw(Widget self) -> bool"""
return _robotsim.Widget_wantsRedraw(self)
def hasHighlight(self):
"""hasHighlight(Widget self) -> bool"""
return _robotsim.Widget_hasHighlight(self)
def hasFocus(self):
"""hasFocus(Widget self) -> bool"""
return _robotsim.Widget_hasFocus(self)
__swig_setmethods__["index"] = _robotsim.Widget_index_set
__swig_getmethods__["index"] = _robotsim.Widget_index_get
if _newclass:index = _swig_property(_robotsim.Widget_index_get, _robotsim.Widget_index_set)
Widget_swigregister = _robotsim.Widget_swigregister
Widget_swigregister(Widget)
class WidgetSet(Widget):
"""Proxy of C++ WidgetSet class"""
__swig_setmethods__ = {}
for _s in [Widget]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, WidgetSet, name, value)
__swig_getmethods__ = {}
for _s in [Widget]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, WidgetSet, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(WidgetSet self) -> WidgetSet"""
this = _robotsim.new_WidgetSet()
try: self.this.append(this)
except: self.this = this
def add(self, *args):
"""add(WidgetSet self, Widget subwidget)"""
return _robotsim.WidgetSet_add(self, *args)
def remove(self, *args):
"""remove(WidgetSet self, Widget subwidget)"""
return _robotsim.WidgetSet_remove(self, *args)
def enable(self, *args):
"""enable(WidgetSet self, Widget subwidget, bool enabled)"""
return _robotsim.WidgetSet_enable(self, *args)
__swig_destroy__ = _robotsim.delete_WidgetSet
__del__ = lambda self : None;
WidgetSet_swigregister = _robotsim.WidgetSet_swigregister
WidgetSet_swigregister(WidgetSet)
class PointPoser(Widget):
"""Proxy of C++ PointPoser class"""
__swig_setmethods__ = {}
for _s in [Widget]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PointPoser, name, value)
__swig_getmethods__ = {}
for _s in [Widget]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, PointPoser, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(PointPoser self) -> PointPoser"""
this = _robotsim.new_PointPoser()
try: self.this.append(this)
except: self.this = this
def set(self, *args):
"""set(PointPoser self, double const [3] t)"""
return _robotsim.PointPoser_set(self, *args)
def get(self):
"""get(PointPoser self)"""
return _robotsim.PointPoser_get(self)
def setAxes(self, *args):
"""
setAxes(PointPoser self, double const [9] R)
Sets the reference axes (by default aligned to x,y,z)
"""
return _robotsim.PointPoser_setAxes(self, *args)
__swig_destroy__ = _robotsim.delete_PointPoser
__del__ = lambda self : None;
PointPoser_swigregister = _robotsim.PointPoser_swigregister
PointPoser_swigregister(PointPoser)
class TransformPoser(Widget):
"""Proxy of C++ TransformPoser class"""
__swig_setmethods__ = {}
for _s in [Widget]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, TransformPoser, name, value)
__swig_getmethods__ = {}
for _s in [Widget]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, TransformPoser, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(TransformPoser self) -> TransformPoser"""
this = _robotsim.new_TransformPoser()
try: self.this.append(this)
except: self.this = this
def set(self, *args):
"""set(TransformPoser self, double const [9] R, double const [3] t)"""
return _robotsim.TransformPoser_set(self, *args)
def get(self):
"""get(TransformPoser self)"""
return _robotsim.TransformPoser_get(self)
def enableTranslation(self, *args):
"""enableTranslation(TransformPoser self, bool arg2)"""
return _robotsim.TransformPoser_enableTranslation(self, *args)
def enableRotation(self, *args):
"""enableRotation(TransformPoser self, bool arg2)"""
return _robotsim.TransformPoser_enableRotation(self, *args)
__swig_destroy__ = _robotsim.delete_TransformPoser
__del__ = lambda self : None;
TransformPoser_swigregister = _robotsim.TransformPoser_swigregister
TransformPoser_swigregister(TransformPoser)
class ObjectPoser(Widget):
"""Proxy of C++ ObjectPoser class"""
__swig_setmethods__ = {}
for _s in [Widget]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ObjectPoser, name, value)
__swig_getmethods__ = {}
for _s in [Widget]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, ObjectPoser, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""__init__(ObjectPoser self, RigidObjectModel object) -> ObjectPoser"""
this = _robotsim.new_ObjectPoser(*args)
try: self.this.append(this)
except: self.this = this
def set(self, *args):
"""set(ObjectPoser self, double const [9] R, double const [3] t)"""
return _robotsim.ObjectPoser_set(self, *args)
def get(self):
"""get(ObjectPoser self)"""
return _robotsim.ObjectPoser_get(self)
__swig_destroy__ = _robotsim.delete_ObjectPoser
__del__ = lambda self : None;
ObjectPoser_swigregister = _robotsim.ObjectPoser_swigregister
ObjectPoser_swigregister(ObjectPoser)
class RobotPoser(Widget):
"""Proxy of C++ RobotPoser class"""
__swig_setmethods__ = {}
for _s in [Widget]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, RobotPoser, name, value)
__swig_getmethods__ = {}
for _s in [Widget]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, RobotPoser, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""__init__(RobotPoser self, RobotModel robot) -> RobotPoser"""
this = _robotsim.new_RobotPoser(*args)
try: self.this.append(this)
except: self.this = this
def setActiveDofs(self, *args):
"""setActiveDofs(RobotPoser self, intVector dofs)"""
return _robotsim.RobotPoser_setActiveDofs(self, *args)
def set(self, *args):
"""set(RobotPoser self, doubleVector q)"""
return _robotsim.RobotPoser_set(self, *args)
def get(self):
"""get(RobotPoser self)"""
return _robotsim.RobotPoser_get(self)
def getConditioned(self, *args):
"""getConditioned(RobotPoser self, doubleVector qref)"""
return _robotsim.RobotPoser_getConditioned(self, *args)
__swig_destroy__ = _robotsim.delete_RobotPoser
__del__ = lambda self : None;
RobotPoser_swigregister = _robotsim.RobotPoser_swigregister
RobotPoser_swigregister(RobotPoser)
class Mass(_object):
"""
Stores mass information for a rigid body or robot link. Note: you
should use the set/get functions rather than changing the members
directly due to strangeness in SWIG's handling of vectors.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Mass, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Mass, name)
__repr__ = _swig_repr
def setMass(self, *args):
"""setMass(Mass self, double _mass)"""
return _robotsim.Mass_setMass(self, *args)
def getMass(self):
"""getMass(Mass self) -> double"""
return _robotsim.Mass_getMass(self)
def setCom(self, *args):
"""setCom(Mass self, doubleVector _com)"""
return _robotsim.Mass_setCom(self, *args)
def getCom(self):
"""getCom(Mass self)"""
return _robotsim.Mass_getCom(self)
def setInertia(self, *args):
"""setInertia(Mass self, doubleVector _inertia)"""
return _robotsim.Mass_setInertia(self, *args)
def getInertia(self):
"""getInertia(Mass self)"""
return _robotsim.Mass_getInertia(self)
__swig_setmethods__["mass"] = _robotsim.Mass_mass_set
__swig_getmethods__["mass"] = _robotsim.Mass_mass_get
if _newclass:mass = _swig_property(_robotsim.Mass_mass_get, _robotsim.Mass_mass_set)
__swig_setmethods__["com"] = _robotsim.Mass_com_set
__swig_getmethods__["com"] = _robotsim.Mass_com_get
if _newclass:com = _swig_property(_robotsim.Mass_com_get, _robotsim.Mass_com_set)
__swig_setmethods__["inertia"] = _robotsim.Mass_inertia_set
__swig_getmethods__["inertia"] = _robotsim.Mass_inertia_get
if _newclass:inertia = _swig_property(_robotsim.Mass_inertia_get, _robotsim.Mass_inertia_set)
def __init__(self):
"""
__init__(Mass self) -> Mass
Stores mass information for a rigid body or robot link. Note: you
should use the set/get functions rather than changing the members
directly due to strangeness in SWIG's handling of vectors.
C++ includes: robotmodel.h
"""
this = _robotsim.new_Mass()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_Mass
__del__ = lambda self : None;
Mass_swigregister = _robotsim.Mass_swigregister
Mass_swigregister(Mass)
class ContactParameters(_object):
"""
Stores contact parameters for an entity. Currently only used for
simulation, but could be used for contact mechanics in the future.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ContactParameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ContactParameters, name)
__repr__ = _swig_repr
__swig_setmethods__["kFriction"] = _robotsim.ContactParameters_kFriction_set
__swig_getmethods__["kFriction"] = _robotsim.ContactParameters_kFriction_get
if _newclass:kFriction = _swig_property(_robotsim.ContactParameters_kFriction_get, _robotsim.ContactParameters_kFriction_set)
__swig_setmethods__["kRestitution"] = _robotsim.ContactParameters_kRestitution_set
__swig_getmethods__["kRestitution"] = _robotsim.ContactParameters_kRestitution_get
if _newclass:kRestitution = _swig_property(_robotsim.ContactParameters_kRestitution_get, _robotsim.ContactParameters_kRestitution_set)
__swig_setmethods__["kStiffness"] = _robotsim.ContactParameters_kStiffness_set
__swig_getmethods__["kStiffness"] = _robotsim.ContactParameters_kStiffness_get
if _newclass:kStiffness = _swig_property(_robotsim.ContactParameters_kStiffness_get, _robotsim.ContactParameters_kStiffness_set)
__swig_setmethods__["kDamping"] = _robotsim.ContactParameters_kDamping_set
__swig_getmethods__["kDamping"] = _robotsim.ContactParameters_kDamping_get
if _newclass:kDamping = _swig_property(_robotsim.ContactParameters_kDamping_get, _robotsim.ContactParameters_kDamping_set)
def __init__(self):
"""
__init__(ContactParameters self) -> ContactParameters
Stores contact parameters for an entity. Currently only used for
simulation, but could be used for contact mechanics in the future.
C++ includes: robotmodel.h
"""
this = _robotsim.new_ContactParameters()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_ContactParameters
__del__ = lambda self : None;
ContactParameters_swigregister = _robotsim.ContactParameters_swigregister
ContactParameters_swigregister(ContactParameters)
class RobotModelLink(_object):
"""
A reference to a link of a RobotModel.
The link stores many mostly-constant items (id, name, parent,
geometry, appearance, mass, joint axes). The exception is the link's
current transform, which is affected by the RobotModel's current
configuration, i.e., the last RobotModel.setConfig(q) call. The
various Jacobians of points on the link, accessed by getJacobianX, are
configuration dependent.
These are not created by hand, but instead accessed using
RobotModel.link([index or name])
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RobotModelLink, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RobotModelLink, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(RobotModelLink self) -> RobotModelLink"""
this = _robotsim.new_RobotModelLink()
try: self.this.append(this)
except: self.this = this
def getID(self):
"""
getID(RobotModelLink self) -> int
Returns the ID of the robot link in its world (Note: not the same as
getIndex())
"""
return _robotsim.RobotModelLink_getID(self)
def getName(self):
"""
getName(RobotModelLink self) -> char const *
Returns the name of the robot link.
"""
return _robotsim.RobotModelLink_getName(self)
def setName(self, *args):
"""
setName(RobotModelLink self, char const * name)
Sets the name of the robot link.
"""
return _robotsim.RobotModelLink_setName(self, *args)
def robot(self):
"""
robot(RobotModelLink self) -> RobotModel
Returns a reference to the link's robot.
"""
return _robotsim.RobotModelLink_robot(self)
def getIndex(self):
"""
getIndex(RobotModelLink self) -> int
Returns the index of the link (on its robot).
"""
return _robotsim.RobotModelLink_getIndex(self)
def getParent(self):
"""
getParent(RobotModelLink self) -> int
Returns the index of the link's parent (on its robot).
"""
return _robotsim.RobotModelLink_getParent(self)
def parent(self):
"""
parent(RobotModelLink self) -> RobotModelLink
Returns a reference to the link's parent, or a NULL link if it has no
parent.
"""
return _robotsim.RobotModelLink_parent(self)
def setParent(self, *args):
"""
setParent(RobotModelLink self, int p)
setParent(RobotModelLink self, RobotModelLink l)
Sets the link's parent (must be on the same robot).
"""
return _robotsim.RobotModelLink_setParent(self, *args)
def geometry(self):
"""
geometry(RobotModelLink self) -> Geometry3D
Returns a reference to the link's geometry.
"""
return _robotsim.RobotModelLink_geometry(self)
def appearance(self):
"""
appearance(RobotModelLink self) -> Appearance
Returns a reference to the link's appearance.
"""
return _robotsim.RobotModelLink_appearance(self)
def getMass(self):
"""
getMass(RobotModelLink self) -> Mass
Retrieves the inertial properties of the link. (Note that the Mass is
given with origin at the link frame, not about the COM.)
"""
return _robotsim.RobotModelLink_getMass(self)
def setMass(self, *args):
"""
setMass(RobotModelLink self, Mass mass)
Sets the inertial proerties of the link. (Note that the Mass is given
with origin at the link frame, not about the COM.)
"""
return _robotsim.RobotModelLink_setMass(self, *args)
def getParentTransform(self):
"""
getParentTransform(RobotModelLink self)
Gets transformation (R,t) to the parent link.
"""
return _robotsim.RobotModelLink_getParentTransform(self)
def setParentTransform(self, *args):
"""
setParentTransform(RobotModelLink self, double const [9] R, double const [3] t)
Sets transformation (R,t) to the parent link.
"""
return _robotsim.RobotModelLink_setParentTransform(self, *args)
def getAxis(self):
"""
getAxis(RobotModelLink self)
Gets the local rotational / translational axis.
"""
return _robotsim.RobotModelLink_getAxis(self)
def setAxis(self, *args):
"""
setAxis(RobotModelLink self, double const [3] axis)
Sets the local rotational / translational axis.
"""
return _robotsim.RobotModelLink_setAxis(self, *args)
def getWorldPosition(self, *args):
"""
getWorldPosition(RobotModelLink self, double const [3] plocal)
Converts point from local to world coordinates.
"""
return _robotsim.RobotModelLink_getWorldPosition(self, *args)
def getWorldDirection(self, *args):
"""
getWorldDirection(RobotModelLink self, double const [3] vlocal)
Converts direction from local to world coordinates.
"""
return _robotsim.RobotModelLink_getWorldDirection(self, *args)
def getLocalPosition(self, *args):
"""
getLocalPosition(RobotModelLink self, double const [3] pworld)
Converts point from world to local coordinates.
"""
return _robotsim.RobotModelLink_getLocalPosition(self, *args)
def getLocalDirection(self, *args):
"""
getLocalDirection(RobotModelLink self, double const [3] vworld)
Converts direction from world to local coordinates.
"""
return _robotsim.RobotModelLink_getLocalDirection(self, *args)
def getTransform(self):
"""
getTransform(RobotModelLink self)
Gets transformation (R,t) to the world frame.
"""
return _robotsim.RobotModelLink_getTransform(self)
def setTransform(self, *args):
"""
setTransform(RobotModelLink self, double const [9] R, double const [3] t)
Sets transformation (R,t) to the world frame. Note: this does NOT
perform inverse kinematics. The transform is overwritten when the
robot's setConfig() method is called.
"""
return _robotsim.RobotModelLink_setTransform(self, *args)
def getJacobian(self, *args):
"""
getJacobian(RobotModelLink self, double const [3] p)
Returns the total jacobian of the local point p (row-major matrix)
(orientation jacobian is stacked on position jacobian)
"""
return _robotsim.RobotModelLink_getJacobian(self, *args)
def getPositionJacobian(self, *args):
"""
getPositionJacobian(RobotModelLink self, double const [3] p)
Returns the jacobian of the local point p (row-major matrix)
"""
return _robotsim.RobotModelLink_getPositionJacobian(self, *args)
def getOrientationJacobian(self):
"""
getOrientationJacobian(RobotModelLink self)
Returns the orientation jacobian of the link (row-major matrix)
"""
return _robotsim.RobotModelLink_getOrientationJacobian(self)
def getVelocity(self):
"""
getVelocity(RobotModelLink self)
Returns the velocity of the origin given the robot's current velocity.
"""
return _robotsim.RobotModelLink_getVelocity(self)
def getAngularVelocity(self):
"""
getAngularVelocity(RobotModelLink self)
Returns the angular velocity given the robot's current velocity.
"""
return _robotsim.RobotModelLink_getAngularVelocity(self)
def getPointVelocity(self, *args):
"""
getPointVelocity(RobotModelLink self, double const [3] plocal)
Returns the world velocity of the point given the robot's current
velocity.
"""
return _robotsim.RobotModelLink_getPointVelocity(self, *args)
def drawLocalGL(self, keepAppearance=True):
"""
drawLocalGL(RobotModelLink self, bool keepAppearance=True)
drawLocalGL(RobotModelLink self)
Draws the link's geometry in its local frame. If keepAppearance=true,
the current Appearance is honored. Otherwise, just the geometry is
drawn.
"""
return _robotsim.RobotModelLink_drawLocalGL(self, keepAppearance)
def drawWorldGL(self, keepAppearance=True):
"""
drawWorldGL(RobotModelLink self, bool keepAppearance=True)
drawWorldGL(RobotModelLink self)
Draws the link's geometry in the world frame. If keepAppearance=true,
the current Appearance is honored. Otherwise, just the geometry is
drawn.
"""
return _robotsim.RobotModelLink_drawWorldGL(self, keepAppearance)
__swig_setmethods__["world"] = _robotsim.RobotModelLink_world_set
__swig_getmethods__["world"] = _robotsim.RobotModelLink_world_get
if _newclass:world = _swig_property(_robotsim.RobotModelLink_world_get, _robotsim.RobotModelLink_world_set)
__swig_setmethods__["robotIndex"] = _robotsim.RobotModelLink_robotIndex_set
__swig_getmethods__["robotIndex"] = _robotsim.RobotModelLink_robotIndex_get
if _newclass:robotIndex = _swig_property(_robotsim.RobotModelLink_robotIndex_get, _robotsim.RobotModelLink_robotIndex_set)
__swig_setmethods__["robotPtr"] = _robotsim.RobotModelLink_robotPtr_set
__swig_getmethods__["robotPtr"] = _robotsim.RobotModelLink_robotPtr_get
if _newclass:robotPtr = _swig_property(_robotsim.RobotModelLink_robotPtr_get, _robotsim.RobotModelLink_robotPtr_set)
__swig_setmethods__["index"] = _robotsim.RobotModelLink_index_set
__swig_getmethods__["index"] = _robotsim.RobotModelLink_index_get
if _newclass:index = _swig_property(_robotsim.RobotModelLink_index_get, _robotsim.RobotModelLink_index_set)
__swig_destroy__ = _robotsim.delete_RobotModelLink
__del__ = lambda self : None;
RobotModelLink_swigregister = _robotsim.RobotModelLink_swigregister
RobotModelLink_swigregister(RobotModelLink)
class RobotModelDriver(_object):
"""
A reference to a driver of a RobotModel.
A driver corresponds to one of the robot's actuators and its
transmission.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RobotModelDriver, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RobotModelDriver, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(RobotModelDriver self) -> RobotModelDriver"""
this = _robotsim.new_RobotModelDriver()
try: self.this.append(this)
except: self.this = this
def getName(self):
"""getName(RobotModelDriver self) -> char const *"""
return _robotsim.RobotModelDriver_getName(self)
def robot(self):
"""
robot(RobotModelDriver self) -> RobotModel
Returns a reference to the driver's robot.
"""
return _robotsim.RobotModelDriver_robot(self)
def getType(self):
"""
getType(RobotModelDriver self) -> char const *
Currently can be "normal", "affine", "rotation",
"translation", or "custom".
"""
return _robotsim.RobotModelDriver_getType(self)
def getAffectedLink(self):
"""
getAffectedLink(RobotModelDriver self) -> int
Returns the single affected link for "normal" links.
"""
return _robotsim.RobotModelDriver_getAffectedLink(self)
def getAffectedLinks(self, *args):
"""
getAffectedLinks(RobotModelDriver self, intVector links)
Returns the driver's affected links.
"""
return _robotsim.RobotModelDriver_getAffectedLinks(self, *args)
def getAffineCoeffs(self, *args):
"""
getAffineCoeffs(RobotModelDriver self, doubleVector scale, doubleVector offset)
For "affine" links, returns the scale and offset of the driver value
mapped to the world.
"""
return _robotsim.RobotModelDriver_getAffineCoeffs(self, *args)
def setValue(self, *args):
"""
setValue(RobotModelDriver self, double val)
Sets the robot's config to correspond to the given driver value.
"""
return _robotsim.RobotModelDriver_setValue(self, *args)
def getValue(self):
"""
getValue(RobotModelDriver self) -> double
Gets the current driver value from the robot's config.
"""
return _robotsim.RobotModelDriver_getValue(self)
def setVelocity(self, *args):
"""
setVelocity(RobotModelDriver self, double val)
Sets the robot's velocity to correspond to the given driver velocity
value.
"""
return _robotsim.RobotModelDriver_setVelocity(self, *args)
def getVelocity(self):
"""
getVelocity(RobotModelDriver self) -> double
Gets the current driver velocity value from the robot's velocity.
"""
return _robotsim.RobotModelDriver_getVelocity(self)
__swig_setmethods__["world"] = _robotsim.RobotModelDriver_world_set
__swig_getmethods__["world"] = _robotsim.RobotModelDriver_world_get
if _newclass:world = _swig_property(_robotsim.RobotModelDriver_world_get, _robotsim.RobotModelDriver_world_set)
__swig_setmethods__["robotIndex"] = _robotsim.RobotModelDriver_robotIndex_set
__swig_getmethods__["robotIndex"] = _robotsim.RobotModelDriver_robotIndex_get
if _newclass:robotIndex = _swig_property(_robotsim.RobotModelDriver_robotIndex_get, _robotsim.RobotModelDriver_robotIndex_set)
__swig_setmethods__["robotPtr"] = _robotsim.RobotModelDriver_robotPtr_set
__swig_getmethods__["robotPtr"] = _robotsim.RobotModelDriver_robotPtr_get
if _newclass:robotPtr = _swig_property(_robotsim.RobotModelDriver_robotPtr_get, _robotsim.RobotModelDriver_robotPtr_set)
__swig_setmethods__["index"] = _robotsim.RobotModelDriver_index_set
__swig_getmethods__["index"] = _robotsim.RobotModelDriver_index_get
if _newclass:index = _swig_property(_robotsim.RobotModelDriver_index_get, _robotsim.RobotModelDriver_index_set)
__swig_destroy__ = _robotsim.delete_RobotModelDriver
__del__ = lambda self : None;
RobotModelDriver_swigregister = _robotsim.RobotModelDriver_swigregister
RobotModelDriver_swigregister(RobotModelDriver)
class RobotModel(_object):
"""
A model of a dynamic and kinematic robot.
Stores both constant information, like the reference placement of the
links, joint limits, velocity limits, etc, as well as a current
configuration and current velocity which are state-dependent. Several
functions depend on the robot's current configuration and/or velocity.
To update that, use the setConfig() and setVelocity() functions.
setConfig() also update's the robot's link transforms via forward
kinematics. You may also use setDOFPosition and setDOFVelocity for
individual changes, but this is more expensive because each call
updates all of the affected the link transforms.
It is important to understand that changing the configuration of the
model doesn't actually send a command to the physical / simulated
robot. Moreover, the model does not automatically get updated when the
physical / simulated robot moves. In essence, the model maintains
temporary storage for performing kinematics, dynamics, and planning
computations, as well as for visualization.
The state of the robot is retrieved using getConfig/getVelocity calls,
and is set using setConfig/setVelocity. Because many routines change
the robot's configuration, like IK and motion planning, a common
design pattern is to save/restore the configuration as follows: q =
robot.getConfig()
do some stuff that may touch the robot's configuration...
robot.setConfig(q)
The model maintains configuration/velocity/acceleration/torque bounds.
However, these are not enforced by the model, so you can happily set
configurations outside must rather be enforced by the planner /
simulator.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RobotModel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RobotModel, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(RobotModel self) -> RobotModel"""
this = _robotsim.new_RobotModel()
try: self.this.append(this)
except: self.this = this
def getID(self):
"""
getID(RobotModel self) -> int
Returns the ID of the robot in its world (Note: not the same as the
robot index)
"""
return _robotsim.RobotModel_getID(self)
def getName(self):
"""getName(RobotModel self) -> char const *"""
return _robotsim.RobotModel_getName(self)
def setName(self, *args):
"""setName(RobotModel self, char const * name)"""
return _robotsim.RobotModel_setName(self, *args)
def numLinks(self):
"""
numLinks(RobotModel self) -> int
Returns the number of links = number of DOF's.
"""
return _robotsim.RobotModel_numLinks(self)
def link(self, *args):
"""
link(RobotModel self, int index) -> RobotModelLink
link(RobotModel self, char const * name) -> RobotModelLink
Returns a reference to the named link.
"""
return _robotsim.RobotModel_link(self, *args)
def numDrivers(self):
"""
numDrivers(RobotModel self) -> int
Returns the number of drivers.
"""
return _robotsim.RobotModel_numDrivers(self)
def driver(self, *args):
"""
driver(RobotModel self, int index) -> RobotModelDriver
driver(RobotModel self, char const * name) -> RobotModelDriver
Returns a reference to the named driver.
"""
return _robotsim.RobotModel_driver(self, *args)
def getConfig(self):
"""
getConfig(RobotModel self)
Retrieves the current configuration of the robot model.
"""
return _robotsim.RobotModel_getConfig(self)
def getVelocity(self):
"""
getVelocity(RobotModel self)
Retreives the current velocity of the robot model.
"""
return _robotsim.RobotModel_getVelocity(self)
def setConfig(self, *args):
"""
setConfig(RobotModel self, doubleVector q)
Sets the current configuration of the robot. Input q is a vector of
length numLinks(). This also updates forward kinematics of all links.
Again, it is important to realize that the RobotModel is not the same
as a simulated robot, and this will not change the simulation world.
Many functions such as IK and motion planning use the RobotModel
configuration as a temporary variable, so if you need to keep the
configuration through a robot-modifying function call, you should call
q = robot.getConfig() before the call, and then robot.setConfig(q)
after it.
"""
return _robotsim.RobotModel_setConfig(self, *args)
def setVelocity(self, *args):
"""
setVelocity(RobotModel self, doubleVector dq)
Sets the current velocity of the robot model. Like the configuration,
this is also essentially a temporary variable.
"""
return _robotsim.RobotModel_setVelocity(self, *args)
def getJointLimits(self):
"""
getJointLimits(RobotModel self)
Retrieves a pair (qmin,qmax) of min/max joint limit vectors.
"""
return _robotsim.RobotModel_getJointLimits(self)
def setJointLimits(self, *args):
"""
setJointLimits(RobotModel self, doubleVector qmin, doubleVector qmax)
Sets the min/max joint limit vectors (must have length numLinks())
"""
return _robotsim.RobotModel_setJointLimits(self, *args)
def getVelocityLimits(self):
"""
getVelocityLimits(RobotModel self)
Retrieve the velocity limit vector vmax, the constraint is |dq[i]| <=
vmax[i].
"""
return _robotsim.RobotModel_getVelocityLimits(self)
def setVelocityLimits(self, *args):
"""
setVelocityLimits(RobotModel self, doubleVector vmax)
Sets the velocity limit vector vmax, the constraint is |dq[i]| <=
vmax[i].
"""
return _robotsim.RobotModel_setVelocityLimits(self, *args)
def getAccelerationLimits(self):
"""
getAccelerationLimits(RobotModel self)
Retrieve the acceleration limit vector amax, the constraint is
|ddq[i]| <= amax[i].
"""
return _robotsim.RobotModel_getAccelerationLimits(self)
def setAccelerationLimits(self, *args):
"""
setAccelerationLimits(RobotModel self, doubleVector amax)
Sets the acceleration limit vector amax, the constraint is |ddq[i]| <=
amax[i].
"""
return _robotsim.RobotModel_setAccelerationLimits(self, *args)
def getTorqueLimits(self):
"""
getTorqueLimits(RobotModel self)
Retrieve the torque limit vector tmax, the constraint is |torque[i]|
<= tmax[i].
"""
return _robotsim.RobotModel_getTorqueLimits(self)
def setTorqueLimits(self, *args):
"""
setTorqueLimits(RobotModel self, doubleVector tmax)
Sets the torque limit vector tmax, the constraint is |torque[i]| <=
tmax[i].
"""
return _robotsim.RobotModel_setTorqueLimits(self, *args)
def setDOFPosition(self, *args):
"""
setDOFPosition(RobotModel self, int i, double qi)
setDOFPosition(RobotModel self, char const * name, double qi)
Sets a single DOF's position (by name). Note: if you are setting
several joints at once, use setConfig because this function computes
forward kinematics every time.
"""
return _robotsim.RobotModel_setDOFPosition(self, *args)
def getDOFPosition(self, *args):
"""
getDOFPosition(RobotModel self, int i) -> double
getDOFPosition(RobotModel self, char const * name) -> double
Returns a single DOF's position (by name)
"""
return _robotsim.RobotModel_getDOFPosition(self, *args)
def getCom(self):
"""
getCom(RobotModel self)
Returns the 3D center of mass at the current config.
"""
return _robotsim.RobotModel_getCom(self)
def getComJacobian(self):
"""
getComJacobian(RobotModel self)
Returns the 3xn Jacobian matrix of the current center of mass.
"""
return _robotsim.RobotModel_getComJacobian(self)
def getMassMatrix(self):
"""
getMassMatrix(RobotModel self)
Returns the nxn mass matrix B(q)
"""
return _robotsim.RobotModel_getMassMatrix(self)
def getMassMatrixInv(self):
"""
getMassMatrixInv(RobotModel self)
Returns the inverse of the nxn mass matrix B(q)^-1 (faster than
inverting result of getMassMatrix)
"""
return _robotsim.RobotModel_getMassMatrixInv(self)
def getCoriolisForceMatrix(self):
"""
getCoriolisForceMatrix(RobotModel self)
Returns the Coriolis force matrix C(q,dq) for current config and
velocity.
"""
return _robotsim.RobotModel_getCoriolisForceMatrix(self)
def getCoriolisForces(self):
"""
getCoriolisForces(RobotModel self)
Returns the Coriolis forces C(q,dq)*dq for current config and velocity
(faster than computing matrix and doing product). ("Forces" is
somewhat of a misnomer; the result is a joint torque vector)
"""
return _robotsim.RobotModel_getCoriolisForces(self)
def getGravityForces(self, *args):
"""
getGravityForces(RobotModel self, double const [3] g)
Returns the generalized gravity vector G(q) for the given workspace
gravity vector g (usually (0,0,-9.8)). ("Forces" is somewhat of a
misnomer; the result is a joint torque vector)
"""
return _robotsim.RobotModel_getGravityForces(self, *args)
def torquesFromAccel(self, *args):
"""
torquesFromAccel(RobotModel self, doubleVector ddq)
Computes the inverse dynamics (using Recursive Newton Euler solver).
Note: does not include gravity term G(q)
"""
return _robotsim.RobotModel_torquesFromAccel(self, *args)
def accelFromTorques(self, *args):
"""
accelFromTorques(RobotModel self, doubleVector t)
Computes the foward dynamics (using Recursive Newton Euler solver)
Note: does not include gravity term G(q)
"""
return _robotsim.RobotModel_accelFromTorques(self, *args)
def interpolate(self, *args):
"""
interpolate(RobotModel self, doubleVector a, doubleVector b, double u)
Interpolates smoothly between two configurations, properly taking into
account nonstandard joints.
"""
return _robotsim.RobotModel_interpolate(self, *args)
def distance(self, *args):
"""
distance(RobotModel self, doubleVector a, doubleVector b) -> double
Computes a distance between two configurations, properly taking into
account nonstandard joints.
"""
return _robotsim.RobotModel_distance(self, *args)
def interpolateDeriv(self, *args):
"""
interpolateDeriv(RobotModel self, doubleVector a, doubleVector b)
Returns the configuration derivative at a as you interpolate toward b
at unit speed.
"""
return _robotsim.RobotModel_interpolateDeriv(self, *args)
def randomizeConfig(self, unboundedScale=1.0):
"""
randomizeConfig(RobotModel self, double unboundedScale=1.0)
randomizeConfig(RobotModel self)
Samples a random configuration and updates the robot's pose. Properly
handles non-normal joints and handles DOFs with infinite bounds using
a centered Laplacian distribution with the given scaling term. (Note
that the python random seeding does not affect the result.)
"""
return _robotsim.RobotModel_randomizeConfig(self, unboundedScale)
def selfCollisionEnabled(self, *args):
"""
selfCollisionEnabled(RobotModel self, int link1, int link2) -> bool
Queries whether self collisions between two links is enabled.
"""
return _robotsim.RobotModel_selfCollisionEnabled(self, *args)
def enableSelfCollision(self, *args):
"""
enableSelfCollision(RobotModel self, int link1, int link2, bool value)
Enables/disables self collisions between two links (depending on
value)
"""
return _robotsim.RobotModel_enableSelfCollision(self, *args)
def selfCollides(self):
"""
selfCollides(RobotModel self) -> bool
Returns true if the robot is in self collision (faster than manual
testing)
"""
return _robotsim.RobotModel_selfCollides(self)
def drawGL(self, keepAppearance=True):
"""
drawGL(RobotModel self, bool keepAppearance=True)
drawGL(RobotModel self)
Draws the robot geometry. If keepAppearance=true, the current
appearance is honored. Otherwise, only the raw geometry is drawn.
PERFORMANCE WARNING: if keepAppearance is false, then this does not
properly reuse OpenGL display lists. A better approach to changing the
robot's appearances is to set the link Appearance's directly.
"""
return _robotsim.RobotModel_drawGL(self, keepAppearance)
__swig_setmethods__["world"] = _robotsim.RobotModel_world_set
__swig_getmethods__["world"] = _robotsim.RobotModel_world_get
if _newclass:world = _swig_property(_robotsim.RobotModel_world_get, _robotsim.RobotModel_world_set)
__swig_setmethods__["index"] = _robotsim.RobotModel_index_set
__swig_getmethods__["index"] = _robotsim.RobotModel_index_get
if _newclass:index = _swig_property(_robotsim.RobotModel_index_get, _robotsim.RobotModel_index_set)
__swig_setmethods__["robot"] = _robotsim.RobotModel_robot_set
__swig_getmethods__["robot"] = _robotsim.RobotModel_robot_get
if _newclass:robot = _swig_property(_robotsim.RobotModel_robot_get, _robotsim.RobotModel_robot_set)
__swig_destroy__ = _robotsim.delete_RobotModel
__del__ = lambda self : None;
RobotModel_swigregister = _robotsim.RobotModel_swigregister
RobotModel_swigregister(RobotModel)
class RigidObjectModel(_object):
"""
A rigid movable object.
A rigid object has a name, geometry, appearance, mass, surface
properties, and current transform / velocity.
State is retrieved/set using get/setTransform, and get/setVelocity
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RigidObjectModel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RigidObjectModel, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(RigidObjectModel self) -> RigidObjectModel"""
this = _robotsim.new_RigidObjectModel()
try: self.this.append(this)
except: self.this = this
def getID(self):
"""getID(RigidObjectModel self) -> int"""
return _robotsim.RigidObjectModel_getID(self)
def getName(self):
"""getName(RigidObjectModel self) -> char const *"""
return _robotsim.RigidObjectModel_getName(self)
def setName(self, *args):
"""setName(RigidObjectModel self, char const * name)"""
return _robotsim.RigidObjectModel_setName(self, *args)
def geometry(self):
"""geometry(RigidObjectModel self) -> Geometry3D"""
return _robotsim.RigidObjectModel_geometry(self)
def appearance(self):
"""appearance(RigidObjectModel self) -> Appearance"""
return _robotsim.RigidObjectModel_appearance(self)
def getMass(self):
"""getMass(RigidObjectModel self) -> Mass"""
return _robotsim.RigidObjectModel_getMass(self)
def setMass(self, *args):
"""setMass(RigidObjectModel self, Mass mass)"""
return _robotsim.RigidObjectModel_setMass(self, *args)
def getContactParameters(self):
"""getContactParameters(RigidObjectModel self) -> ContactParameters"""
return _robotsim.RigidObjectModel_getContactParameters(self)
def setContactParameters(self, *args):
"""setContactParameters(RigidObjectModel self, ContactParameters params)"""
return _robotsim.RigidObjectModel_setContactParameters(self, *args)
def getTransform(self):
"""
getTransform(RigidObjectModel self)
Retrieves the rotation / translation of the rigid object (R,t)
"""
return _robotsim.RigidObjectModel_getTransform(self)
def setTransform(self, *args):
"""
setTransform(RigidObjectModel self, double const [9] R, double const [3] t)
Sets the rotation / translation (R,t) of the rigid object.
"""
return _robotsim.RigidObjectModel_setTransform(self, *args)
def getVelocity(self):
"""
getVelocity(RigidObjectModel self)
Retrieves the (angular velocity, velocity) of the rigid object.
"""
return _robotsim.RigidObjectModel_getVelocity(self)
def setVelocity(self, *args):
"""
setVelocity(RigidObjectModel self, double const [3] angularVelocity, double const [3] velocity)
Sets the (angular velocity, velocity) of the rigid object.
"""
return _robotsim.RigidObjectModel_setVelocity(self, *args)
def drawGL(self, keepAppearance=True):
"""
drawGL(RigidObjectModel self, bool keepAppearance=True)
drawGL(RigidObjectModel self)
Draws the object's geometry. If keepAppearance=true, the current
appearance is honored. Otherwise, only the raw geometry is drawn.
PERFORMANCE WARNING: if keepAppearance is false, then this does not
properly reuse OpenGL display lists. A better approach to changing
object's Appearance directly.
"""
return _robotsim.RigidObjectModel_drawGL(self, keepAppearance)
__swig_setmethods__["world"] = _robotsim.RigidObjectModel_world_set
__swig_getmethods__["world"] = _robotsim.RigidObjectModel_world_get
if _newclass:world = _swig_property(_robotsim.RigidObjectModel_world_get, _robotsim.RigidObjectModel_world_set)
__swig_setmethods__["index"] = _robotsim.RigidObjectModel_index_set
__swig_getmethods__["index"] = _robotsim.RigidObjectModel_index_get
if _newclass:index = _swig_property(_robotsim.RigidObjectModel_index_get, _robotsim.RigidObjectModel_index_set)
__swig_setmethods__["object"] = _robotsim.RigidObjectModel_object_set
__swig_getmethods__["object"] = _robotsim.RigidObjectModel_object_get
if _newclass:object = _swig_property(_robotsim.RigidObjectModel_object_get, _robotsim.RigidObjectModel_object_set)
__swig_destroy__ = _robotsim.delete_RigidObjectModel
__del__ = lambda self : None;
RigidObjectModel_swigregister = _robotsim.RigidObjectModel_swigregister
RigidObjectModel_swigregister(RigidObjectModel)
class TerrainModel(_object):
"""
Static environment geometry.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TerrainModel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TerrainModel, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(TerrainModel self) -> TerrainModel"""
this = _robotsim.new_TerrainModel()
try: self.this.append(this)
except: self.this = this
def getID(self):
"""getID(TerrainModel self) -> int"""
return _robotsim.TerrainModel_getID(self)
def getName(self):
"""getName(TerrainModel self) -> char const *"""
return _robotsim.TerrainModel_getName(self)
def setName(self, *args):
"""setName(TerrainModel self, char const * name)"""
return _robotsim.TerrainModel_setName(self, *args)
def geometry(self):
"""geometry(TerrainModel self) -> Geometry3D"""
return _robotsim.TerrainModel_geometry(self)
def appearance(self):
"""appearance(TerrainModel self) -> Appearance"""
return _robotsim.TerrainModel_appearance(self)
def setFriction(self, *args):
"""setFriction(TerrainModel self, double friction)"""
return _robotsim.TerrainModel_setFriction(self, *args)
def drawGL(self, keepAppearance=True):
"""
drawGL(TerrainModel self, bool keepAppearance=True)
drawGL(TerrainModel self)
Draws the object's geometry. If keepAppearance=true, the current
appearance is honored. Otherwise, only the raw geometry is drawn.
PERFORMANCE WARNING: if keepAppearance is false, then this does not
properly reuse OpenGL display lists. A better approach to changing
object's Appearance directly.
"""
return _robotsim.TerrainModel_drawGL(self, keepAppearance)
__swig_setmethods__["world"] = _robotsim.TerrainModel_world_set
__swig_getmethods__["world"] = _robotsim.TerrainModel_world_get
if _newclass:world = _swig_property(_robotsim.TerrainModel_world_get, _robotsim.TerrainModel_world_set)
__swig_setmethods__["index"] = _robotsim.TerrainModel_index_set
__swig_getmethods__["index"] = _robotsim.TerrainModel_index_get
if _newclass:index = _swig_property(_robotsim.TerrainModel_index_get, _robotsim.TerrainModel_index_set)
__swig_setmethods__["terrain"] = _robotsim.TerrainModel_terrain_set
__swig_getmethods__["terrain"] = _robotsim.TerrainModel_terrain_get
if _newclass:terrain = _swig_property(_robotsim.TerrainModel_terrain_get, _robotsim.TerrainModel_terrain_set)
__swig_destroy__ = _robotsim.delete_TerrainModel
__del__ = lambda self : None;
TerrainModel_swigregister = _robotsim.TerrainModel_swigregister
TerrainModel_swigregister(TerrainModel)
class WorldModel(_object):
"""
The main world class, containing robots, rigid objects, and static
environment geometry.
Note that this is just a model and can be changed at will in fact
planners and simulators will make use of a model to "display"
computed
Every robot/robot link/terrain/rigid object is given a unique ID in
the world. This is potentially a source of confusion because some
functions take IDs and some take indices. Only the WorldModel and
Simulator classes use IDs when the argument has 'id' as a suffix,
e.g., geometry(), appearance(), Simulator.inContact(). All other
functions use indices, e.g. robot(0), terrain(0), etc.
To get an object's ID, you can see the value returned by loadElement
and/or object.getID(). states.
To save/restore the state of the model, you must manually maintain
copies of the states of whichever objects you wish to save/restore.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, WorldModel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, WorldModel, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(WorldModel self) -> WorldModel
__init__(WorldModel self, void * ptrRobotWorld) -> WorldModel
__init__(WorldModel self, WorldModel w) -> WorldModel
"""
this = _robotsim.new_WorldModel(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_WorldModel
__del__ = lambda self : None;
def copy(self):
"""
copy(WorldModel self) -> WorldModel
Creates a copy of the world model. Note that geometries and
appearances are shared...
"""
return _robotsim.WorldModel_copy(self)
def readFile(self, *args):
"""
readFile(WorldModel self, char const * fn) -> bool
Reads from a world XML file.
"""
return _robotsim.WorldModel_readFile(self, *args)
def loadFile(self, *args):
"""
loadFile(WorldModel self, char const * fn) -> bool
Alias of readFile.
"""
return _robotsim.WorldModel_loadFile(self, *args)
def saveFile(self, *args):
"""
saveFile(WorldModel self, char const * fn, char const * elementDir=None) -> bool
saveFile(WorldModel self, char const * fn) -> bool
Saves to a world XML file. If elementDir is provided, then robots,
terrains, etc. will be saved there. Otherwise they will be saved to a
folder with the same base name as fn (without the trailing .xml)
"""
return _robotsim.WorldModel_saveFile(self, *args)
def numRobots(self):
"""numRobots(WorldModel self) -> int"""
return _robotsim.WorldModel_numRobots(self)
def numRobotLinks(self, *args):
"""numRobotLinks(WorldModel self, int robot) -> int"""
return _robotsim.WorldModel_numRobotLinks(self, *args)
def numRigidObjects(self):
"""numRigidObjects(WorldModel self) -> int"""
return _robotsim.WorldModel_numRigidObjects(self)
def numTerrains(self):
"""numTerrains(WorldModel self) -> int"""
return _robotsim.WorldModel_numTerrains(self)
def numIDs(self):
"""numIDs(WorldModel self) -> int"""
return _robotsim.WorldModel_numIDs(self)
def robot(self, *args):
"""
robot(WorldModel self, int index) -> RobotModel
robot(WorldModel self, char const * name) -> RobotModel
"""
return _robotsim.WorldModel_robot(self, *args)
def robotLink(self, *args):
"""
robotLink(WorldModel self, int robot, int index) -> RobotModelLink
robotLink(WorldModel self, char const * robot, char const * name) -> RobotModelLink
"""
return _robotsim.WorldModel_robotLink(self, *args)
def rigidObject(self, *args):
"""
rigidObject(WorldModel self, int index) -> RigidObjectModel
rigidObject(WorldModel self, char const * name) -> RigidObjectModel
"""
return _robotsim.WorldModel_rigidObject(self, *args)
def terrain(self, *args):
"""
terrain(WorldModel self, int index) -> TerrainModel
terrain(WorldModel self, char const * name) -> TerrainModel
"""
return _robotsim.WorldModel_terrain(self, *args)
def makeRobot(self, *args):
"""
makeRobot(WorldModel self, char const * name) -> RobotModel
Creates a new empty robot. (Not terribly useful now since you can't
resize the number of links yet)
"""
return _robotsim.WorldModel_makeRobot(self, *args)
def makeRigidObject(self, *args):
"""
makeRigidObject(WorldModel self, char const * name) -> RigidObjectModel
Creates a new empty rigid object.
"""
return _robotsim.WorldModel_makeRigidObject(self, *args)
def makeTerrain(self, *args):
"""
makeTerrain(WorldModel self, char const * name) -> TerrainModel
Creates a new empty terrain.
"""
return _robotsim.WorldModel_makeTerrain(self, *args)
def loadRobot(self, *args):
"""
loadRobot(WorldModel self, char const * fn) -> RobotModel
Loads a robot from a .rob or .urdf file. An empty robot is returned if
loading fails.
"""
return _robotsim.WorldModel_loadRobot(self, *args)
def loadRigidObject(self, *args):
"""
loadRigidObject(WorldModel self, char const * fn) -> RigidObjectModel
Loads a rigid object from a .obj or a mesh file. An empty rigid object
is returned if loading fails.
"""
return _robotsim.WorldModel_loadRigidObject(self, *args)
def loadTerrain(self, *args):
"""
loadTerrain(WorldModel self, char const * fn) -> TerrainModel
Loads a rigid object from a mesh file. An empty terrain is returned if
loading fails.
"""
return _robotsim.WorldModel_loadTerrain(self, *args)
def loadElement(self, *args):
"""
loadElement(WorldModel self, char const * fn) -> int
Loads some element from a file, automatically detecting its type.
Meshes are interpreted as terrains. The ID is returned, or -1 if
loading failed.
"""
return _robotsim.WorldModel_loadElement(self, *args)
def add(self, *args):
"""
add(WorldModel self, char const * name, RobotModel robot) -> RobotModel
add(WorldModel self, char const * name, RigidObjectModel obj) -> RigidObjectModel
add(WorldModel self, char const * name, TerrainModel terrain) -> TerrainModel
Adds a copy of the given terrain to this world, either from this
WorldModel or another.
"""
return _robotsim.WorldModel_add(self, *args)
def remove(self, *args):
"""
remove(WorldModel self, RobotModel robot)
remove(WorldModel self, RigidObjectModel object)
remove(WorldModel self, TerrainModel terrain)
Removes a terrain. It must be in this world or an exception is raised.
IMPORTANT: all other references to terrains will be invalidated.
"""
return _robotsim.WorldModel_remove(self, *args)
def getName(self, *args):
"""
getName(WorldModel self, int id) -> std::string
Retrieves a name for a given element ID.
"""
return _robotsim.WorldModel_getName(self, *args)
def geometry(self, *args):
"""
geometry(WorldModel self, int id) -> Geometry3D
Retrieves a geometry for a given element ID.
"""
return _robotsim.WorldModel_geometry(self, *args)
def appearance(self, *args):
"""
appearance(WorldModel self, int id) -> Appearance
Retrieves an appearance for a given element ID.
"""
return _robotsim.WorldModel_appearance(self, *args)
def drawGL(self):
"""
drawGL(WorldModel self)
Draws the entire world using OpenGL.
"""
return _robotsim.WorldModel_drawGL(self)
def enableGeometryLoading(self, *args):
"""
enableGeometryLoading(WorldModel self, bool enabled)
If geometry loading is set to false, then only the kinematics are
loaded from disk, and no geometry / visualization / collision
detection structures will be loaded. Useful for quick scripts that
just use kinematics / dynamics of a robot.
"""
return _robotsim.WorldModel_enableGeometryLoading(self, *args)
def enableInitCollisions(self, *args):
"""
enableInitCollisions(WorldModel self, bool enabled)
If collision detection is set to true, then collision acceleration
data structures will be automatically initialized, with debugging
information. Useful for scripts that do planning and for which
collision initialization may take a long time. Note that even when
this flag is off, the collision acceleration data structures will
indeed be initialized whenever geometry collision, distance, or ray-
casting routines are called.
"""
return _robotsim.WorldModel_enableInitCollisions(self, *args)
__swig_setmethods__["index"] = _robotsim.WorldModel_index_set
__swig_getmethods__["index"] = _robotsim.WorldModel_index_get
if _newclass:index = _swig_property(_robotsim.WorldModel_index_get, _robotsim.WorldModel_index_set)
WorldModel_swigregister = _robotsim.WorldModel_swigregister
WorldModel_swigregister(WorldModel)
class IKObjective(_object):
"""
A class defining an inverse kinematic target. Either a link on a robot
can take on a fixed position/orientation in the world frame, or a
relative position/orientation to another frame.
Currently only fixed-point constraints and fixed-transform constraints
are implemented in the Python API.
C++ includes: robotik.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IKObjective, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IKObjective, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(IKObjective self) -> IKObjective"""
this = _robotsim.new_IKObjective()
try: self.this.append(this)
except: self.this = this
def link(self):
"""
link(IKObjective self) -> int
The index of the robot link that is constrained.
"""
return _robotsim.IKObjective_link(self)
def destLink(self):
"""
destLink(IKObjective self) -> int
The index of the destination link, or -1 if fixed to the world.
"""
return _robotsim.IKObjective_destLink(self)
def numPosDims(self):
"""
numPosDims(IKObjective self) -> int
Returns the number of position dimensions constrained (0-3)
"""
return _robotsim.IKObjective_numPosDims(self)
def numRotDims(self):
"""
numRotDims(IKObjective self) -> int
Returns the number of rotation dimensions constrained (0-3)
"""
return _robotsim.IKObjective_numRotDims(self)
def setFixedPoint(self, *args):
"""
setFixedPoint(IKObjective self, int link, double const [3] plocal, double const [3] pworld)
Sets a fixed-point constraint.
"""
return _robotsim.IKObjective_setFixedPoint(self, *args)
def setFixedPoints(self, *args):
"""
setFixedPoints(IKObjective self, int link, PyObject * plocals, PyObject * pworlds)
Sets a multiple fixed-point constraint.
"""
return _robotsim.IKObjective_setFixedPoints(self, *args)
def setFixedTransform(self, *args):
"""
setFixedTransform(IKObjective self, int link, double const [9] R, double const [3] t)
Sets a fixed-transform constraint (R,t)
"""
return _robotsim.IKObjective_setFixedTransform(self, *args)
def setRelativePoint(self, *args):
"""
setRelativePoint(IKObjective self, int link1, int link2, double const [3] p1, double const [3] p2)
Sets a fixed-point constraint relative to link2.
"""
return _robotsim.IKObjective_setRelativePoint(self, *args)
def setRelativePoints(self, *args):
"""
setRelativePoints(IKObjective self, int link1, int link2, PyObject * p1s, PyObject * p2s)
Sets a multiple fixed-point constraint relative to link2.
"""
return _robotsim.IKObjective_setRelativePoints(self, *args)
def setRelativeTransform(self, *args):
"""
setRelativeTransform(IKObjective self, int link, int linkTgt, double const [9] R, double const [3] t)
Sets a fixed-transform constraint (R,t) relative to linkTgt.
"""
return _robotsim.IKObjective_setRelativeTransform(self, *args)
def setLinks(self, *args):
"""
setLinks(IKObjective self, int link, int link2=-1)
setLinks(IKObjective self, int link)
Manual construction.
"""
return _robotsim.IKObjective_setLinks(self, *args)
def setFreePosition(self):
"""
setFreePosition(IKObjective self)
Manual: Sets a free position constraint.
"""
return _robotsim.IKObjective_setFreePosition(self)
def setFixedPosConstraint(self, *args):
"""
setFixedPosConstraint(IKObjective self, double const [3] tlocal, double const [3] tworld)
Manual: Sets a fixed position constraint.
"""
return _robotsim.IKObjective_setFixedPosConstraint(self, *args)
def setPlanarPosConstraint(self, *args):
"""
setPlanarPosConstraint(IKObjective self, double const [3] tlocal, double const [3] nworld, double oworld)
Manual: Sets a planar position constraint nworld^T T(link)*tlocal +
oworld = 0.
"""
return _robotsim.IKObjective_setPlanarPosConstraint(self, *args)
def setLinearPosConstraint(self, *args):
"""
setLinearPosConstraint(IKObjective self, double const [3] tlocal, double const [3] sworld, double const [3] dworld)
Manual: Sets a linear position constraint T(link)*tlocal = sworld +
u*dworld for some real value u.
"""
return _robotsim.IKObjective_setLinearPosConstraint(self, *args)
def setFreeRotConstraint(self):
"""
setFreeRotConstraint(IKObjective self)
Manual: Sets a free rotation constraint.
"""
return _robotsim.IKObjective_setFreeRotConstraint(self)
def setFixedRotConstraint(self, *args):
"""
setFixedRotConstraint(IKObjective self, double const [9] R)
Manual: Sets a fixed rotation constraint.
"""
return _robotsim.IKObjective_setFixedRotConstraint(self, *args)
def setAxialRotConstraint(self, *args):
"""
setAxialRotConstraint(IKObjective self, double const [3] alocal, double const [3] aworld)
Manual: Sets an axial rotation constraint.
"""
return _robotsim.IKObjective_setAxialRotConstraint(self, *args)
def getPosition(self):
"""
getPosition(IKObjective self)
Returns the local and global position of the position constraint.
"""
return _robotsim.IKObjective_getPosition(self)
def getPositionDirection(self):
"""
getPositionDirection(IKObjective self)
For linear and planar constraints, returns the direction.
"""
return _robotsim.IKObjective_getPositionDirection(self)
def getRotation(self):
"""
getRotation(IKObjective self)
For fixed rotation constraints, returns the orientation.
"""
return _robotsim.IKObjective_getRotation(self)
def getRotationAxis(self):
"""
getRotationAxis(IKObjective self)
For axis rotation constraints, returns the local and global axes.
"""
return _robotsim.IKObjective_getRotationAxis(self)
def getTransform(self):
"""
getTransform(IKObjective self)
For fixed-transform constraints, returns the transform (R,t)
"""
return _robotsim.IKObjective_getTransform(self)
def transform(self, *args):
"""
transform(IKObjective self, double const [9] R, double const [3] t)
Tranforms the target position/rotation of this IK constraint by
transform (R,t)
"""
return _robotsim.IKObjective_transform(self, *args)
def transformLocal(self, *args):
"""
transformLocal(IKObjective self, double const [9] R, double const [3] t)
Tranforms the local position/rotation of this IK constraint by
transform (R,t)
"""
return _robotsim.IKObjective_transformLocal(self, *args)
def matchDestination(self, *args):
"""
matchDestination(IKObjective self, double const [9] R, double const [3] t)
Sets the destination coordinates of this constraint to fit the given
target transform. In other words, if (R,t) is the current link
transform, this sets the destination position / orientation so that
this objective has zero error. The current position/rotation
constraint types are kept.
"""
return _robotsim.IKObjective_matchDestination(self, *args)
def loadString(self, *args):
"""
loadString(IKObjective self, char const * str) -> bool
Loads the objective from a Klamp't-native formatted string. For a more
readable but verbose format, try the JSON IO routines
loader.toJson/fromJson()
"""
return _robotsim.IKObjective_loadString(self, *args)
def saveString(self):
"""
saveString(IKObjective self) -> std::string
Saves the objective to a Klamp't-native formatted string. For a more
readable but verbose format, try the JSON IO routines
loader.toJson/fromJson()
"""
return _robotsim.IKObjective_saveString(self)
__swig_setmethods__["goal"] = _robotsim.IKObjective_goal_set
__swig_getmethods__["goal"] = _robotsim.IKObjective_goal_get
if _newclass:goal = _swig_property(_robotsim.IKObjective_goal_get, _robotsim.IKObjective_goal_set)
__swig_destroy__ = _robotsim.delete_IKObjective
__del__ = lambda self : None;
IKObjective_swigregister = _robotsim.IKObjective_swigregister
IKObjective_swigregister(IKObjective)
class IKSolver(_object):
"""
An inverse kinematics solver based on the Newton-Raphson technique.
Typical calling pattern is s = IKSolver(robot) s.add(objective1)
s.add(objective2) s.setMaxIters(100) s.setTolerance(1e-4) res =
s.solve() if res: print "IK solution:",robot.getConfig(),"found
in",s.lastSolveIters(),"iterations, residual",s.getResidual() else:
print "IK failed:",robot.getConfig(),"found
in",s.lastSolveIters(),"iterations, residual",s.getResidual()
sampleInitial() is a convenience routine. More initial configurations
can be sampled in case the prior configs lead to local minima.
C++ includes: robotik.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IKSolver, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IKSolver, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(IKSolver self, RobotModel robot) -> IKSolver
__init__(IKSolver self, IKSolver solver) -> IKSolver
"""
this = _robotsim.new_IKSolver(*args)
try: self.this.append(this)
except: self.this = this
def add(self, *args):
"""
add(IKSolver self, IKObjective objective)
Adds a new simultaneous objective.
"""
return _robotsim.IKSolver_add(self, *args)
def set(self, *args):
"""
set(IKSolver self, int i, IKObjective objective)
Assigns an existing objective added by add.
"""
return _robotsim.IKSolver_set(self, *args)
def clear(self):
"""
clear(IKSolver self)
Clears objectives.
"""
return _robotsim.IKSolver_clear(self)
def setMaxIters(self, *args):
"""
setMaxIters(IKSolver self, int iters)
Sets the max # of iterations (default 100)
"""
return _robotsim.IKSolver_setMaxIters(self, *args)
def getMaxIters(self):
"""
getMaxIters(IKSolver self) -> int
Gets the max # of iterations.
"""
return _robotsim.IKSolver_getMaxIters(self)
def setTolerance(self, *args):
"""
setTolerance(IKSolver self, double res)
Sets the constraint solve tolerance (default 1e-3)
"""
return _robotsim.IKSolver_setTolerance(self, *args)
def getTolerance(self):
"""
getTolerance(IKSolver self) -> double
Gets the constraint solve tolerance.
"""
return _robotsim.IKSolver_getTolerance(self)
def setActiveDofs(self, *args):
"""
setActiveDofs(IKSolver self, intVector active)
Sets the active degrees of freedom.
"""
return _robotsim.IKSolver_setActiveDofs(self, *args)
def getActiveDofs(self):
"""
getActiveDofs(IKSolver self)
Gets the active degrees of freedom.
"""
return _robotsim.IKSolver_getActiveDofs(self)
def setJointLimits(self, *args):
"""
setJointLimits(IKSolver self, doubleVector qmin, doubleVector qmax)
Sets limits on the robot's configuration. If empty, this turns off
joint limits.
"""
return _robotsim.IKSolver_setJointLimits(self, *args)
def getJointLimits(self):
"""
getJointLimits(IKSolver self)
Gets the limits on the robot's configuration (by default this is the
robot's joint limits.
"""
return _robotsim.IKSolver_getJointLimits(self)
def setBiasConfig(self, *args):
"""
setBiasConfig(IKSolver self, doubleVector biasConfig)
Biases the solver to approach a given configuration. Setting an empty
vector clears the bias term.
"""
return _robotsim.IKSolver_setBiasConfig(self, *args)
def getBiasConfig(self):
"""
getBiasConfig(IKSolver self)
Gets the solvers' bias configuration.
"""
return _robotsim.IKSolver_getBiasConfig(self)
def isSolved(self):
"""
isSolved(IKSolver self) -> bool
Returns true if the current configuration residual is less than tol.
"""
return _robotsim.IKSolver_isSolved(self)
def getResidual(self):
"""
getResidual(IKSolver self)
Returns a vector describing the error of the objective at the current
configuration.
"""
return _robotsim.IKSolver_getResidual(self)
def getJacobian(self):
"""
getJacobian(IKSolver self)
Returns a matrix describing the instantaneous derivative of the
objective with respect to the active Dofs.
"""
return _robotsim.IKSolver_getJacobian(self)
def solve(self, *args):
"""
solve(IKSolver self) -> bool
solve(IKSolver self, int iters, double tol) -> PyObject *
Old-style: will be deprecated. Specify # of iterations and tolerance.
Tries to find a configuration that satifies all simultaneous
objectives up to the desired tolerance. Returns (res,iterations) where
res is true if x converged.
"""
return _robotsim.IKSolver_solve(self, *args)
def lastSolveIters(self):
"""
lastSolveIters(IKSolver self) -> int
Returns the number of Newton-Raphson iterations used in the last
solve() call.
"""
return _robotsim.IKSolver_lastSolveIters(self)
def sampleInitial(self):
"""
sampleInitial(IKSolver self)
Samples an initial random configuration.
"""
return _robotsim.IKSolver_sampleInitial(self)
__swig_setmethods__["robot"] = _robotsim.IKSolver_robot_set
__swig_getmethods__["robot"] = _robotsim.IKSolver_robot_get
if _newclass:robot = _swig_property(_robotsim.IKSolver_robot_get, _robotsim.IKSolver_robot_set)
__swig_setmethods__["objectives"] = _robotsim.IKSolver_objectives_set
__swig_getmethods__["objectives"] = _robotsim.IKSolver_objectives_get
if _newclass:objectives = _swig_property(_robotsim.IKSolver_objectives_get, _robotsim.IKSolver_objectives_set)
__swig_setmethods__["tol"] = _robotsim.IKSolver_tol_set
__swig_getmethods__["tol"] = _robotsim.IKSolver_tol_get
if _newclass:tol = _swig_property(_robotsim.IKSolver_tol_get, _robotsim.IKSolver_tol_set)
__swig_setmethods__["maxIters"] = _robotsim.IKSolver_maxIters_set
__swig_getmethods__["maxIters"] = _robotsim.IKSolver_maxIters_get
if _newclass:maxIters = _swig_property(_robotsim.IKSolver_maxIters_get, _robotsim.IKSolver_maxIters_set)
__swig_setmethods__["activeDofs"] = _robotsim.IKSolver_activeDofs_set
__swig_getmethods__["activeDofs"] = _robotsim.IKSolver_activeDofs_get
if _newclass:activeDofs = _swig_property(_robotsim.IKSolver_activeDofs_get, _robotsim.IKSolver_activeDofs_set)
__swig_setmethods__["useJointLimits"] = _robotsim.IKSolver_useJointLimits_set
__swig_getmethods__["useJointLimits"] = _robotsim.IKSolver_useJointLimits_get
if _newclass:useJointLimits = _swig_property(_robotsim.IKSolver_useJointLimits_get, _robotsim.IKSolver_useJointLimits_set)
__swig_setmethods__["qmin"] = _robotsim.IKSolver_qmin_set
__swig_getmethods__["qmin"] = _robotsim.IKSolver_qmin_get
if _newclass:qmin = _swig_property(_robotsim.IKSolver_qmin_get, _robotsim.IKSolver_qmin_set)
__swig_setmethods__["qmax"] = _robotsim.IKSolver_qmax_set
__swig_getmethods__["qmax"] = _robotsim.IKSolver_qmax_get
if _newclass:qmax = _swig_property(_robotsim.IKSolver_qmax_get, _robotsim.IKSolver_qmax_set)
__swig_setmethods__["biasConfig"] = _robotsim.IKSolver_biasConfig_set
__swig_getmethods__["biasConfig"] = _robotsim.IKSolver_biasConfig_get
if _newclass:biasConfig = _swig_property(_robotsim.IKSolver_biasConfig_get, _robotsim.IKSolver_biasConfig_set)
__swig_setmethods__["lastIters"] = _robotsim.IKSolver_lastIters_set
__swig_getmethods__["lastIters"] = _robotsim.IKSolver_lastIters_get
if _newclass:lastIters = _swig_property(_robotsim.IKSolver_lastIters_get, _robotsim.IKSolver_lastIters_set)
__swig_destroy__ = _robotsim.delete_IKSolver
__del__ = lambda self : None;
IKSolver_swigregister = _robotsim.IKSolver_swigregister
IKSolver_swigregister(IKSolver)
class GeneralizedIKObjective(_object):
"""
An inverse kinematics target for matching points between two robots
and/or objects.
The objects are chosen upon construction, so the following are valid:
GeneralizedIKObjective(a) is an objective for object a to be
constrained relative to the environment.
GeneralizedIKObjective(a,b) is an objective for object a to be
constrained relative to b. Here a and b can be links on any robot or
rigid objects.
Once constructed, call setPoint, setPoints, or setTransform to specify
the nature of the constraint.
C++ includes: robotik.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GeneralizedIKObjective, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GeneralizedIKObjective, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(GeneralizedIKObjective self, GeneralizedIKObjective obj) -> GeneralizedIKObjective
__init__(GeneralizedIKObjective self, RobotModelLink link) -> GeneralizedIKObjective
__init__(GeneralizedIKObjective self, RigidObjectModel obj) -> GeneralizedIKObjective
__init__(GeneralizedIKObjective self, RobotModelLink link, RobotModelLink link2) -> GeneralizedIKObjective
__init__(GeneralizedIKObjective self, RobotModelLink link, RigidObjectModel obj2) -> GeneralizedIKObjective
__init__(GeneralizedIKObjective self, RigidObjectModel obj, RobotModelLink link2) -> GeneralizedIKObjective
__init__(GeneralizedIKObjective self, RigidObjectModel obj, RigidObjectModel obj2) -> GeneralizedIKObjective
"""
this = _robotsim.new_GeneralizedIKObjective(*args)
try: self.this.append(this)
except: self.this = this
def setPoint(self, *args):
"""setPoint(GeneralizedIKObjective self, double const [3] p1, double const [3] p2)"""
return _robotsim.GeneralizedIKObjective_setPoint(self, *args)
def setPoints(self, *args):
"""setPoints(GeneralizedIKObjective self, PyObject * p1s, PyObject * p2s)"""
return _robotsim.GeneralizedIKObjective_setPoints(self, *args)
def setTransform(self, *args):
"""setTransform(GeneralizedIKObjective self, double const [9] R, double const [3] t)"""
return _robotsim.GeneralizedIKObjective_setTransform(self, *args)
__swig_setmethods__["link1"] = _robotsim.GeneralizedIKObjective_link1_set
__swig_getmethods__["link1"] = _robotsim.GeneralizedIKObjective_link1_get
if _newclass:link1 = _swig_property(_robotsim.GeneralizedIKObjective_link1_get, _robotsim.GeneralizedIKObjective_link1_set)
__swig_setmethods__["link2"] = _robotsim.GeneralizedIKObjective_link2_set
__swig_getmethods__["link2"] = _robotsim.GeneralizedIKObjective_link2_get
if _newclass:link2 = _swig_property(_robotsim.GeneralizedIKObjective_link2_get, _robotsim.GeneralizedIKObjective_link2_set)
__swig_setmethods__["obj1"] = _robotsim.GeneralizedIKObjective_obj1_set
__swig_getmethods__["obj1"] = _robotsim.GeneralizedIKObjective_obj1_get
if _newclass:obj1 = _swig_property(_robotsim.GeneralizedIKObjective_obj1_get, _robotsim.GeneralizedIKObjective_obj1_set)
__swig_setmethods__["obj2"] = _robotsim.GeneralizedIKObjective_obj2_set
__swig_getmethods__["obj2"] = _robotsim.GeneralizedIKObjective_obj2_get
if _newclass:obj2 = _swig_property(_robotsim.GeneralizedIKObjective_obj2_get, _robotsim.GeneralizedIKObjective_obj2_set)
__swig_setmethods__["isObj1"] = _robotsim.GeneralizedIKObjective_isObj1_set
__swig_getmethods__["isObj1"] = _robotsim.GeneralizedIKObjective_isObj1_get
if _newclass:isObj1 = _swig_property(_robotsim.GeneralizedIKObjective_isObj1_get, _robotsim.GeneralizedIKObjective_isObj1_set)
__swig_setmethods__["isObj2"] = _robotsim.GeneralizedIKObjective_isObj2_set
__swig_getmethods__["isObj2"] = _robotsim.GeneralizedIKObjective_isObj2_get
if _newclass:isObj2 = _swig_property(_robotsim.GeneralizedIKObjective_isObj2_get, _robotsim.GeneralizedIKObjective_isObj2_set)
__swig_setmethods__["goal"] = _robotsim.GeneralizedIKObjective_goal_set
__swig_getmethods__["goal"] = _robotsim.GeneralizedIKObjective_goal_get
if _newclass:goal = _swig_property(_robotsim.GeneralizedIKObjective_goal_get, _robotsim.GeneralizedIKObjective_goal_set)
__swig_destroy__ = _robotsim.delete_GeneralizedIKObjective
__del__ = lambda self : None;
GeneralizedIKObjective_swigregister = _robotsim.GeneralizedIKObjective_swigregister
GeneralizedIKObjective_swigregister(GeneralizedIKObjective)
class GeneralizedIKSolver(_object):
"""
An inverse kinematics solver between multiple robots and/or objects.
NOT IMPLEMENTED YET.
C++ includes: robotik.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GeneralizedIKSolver, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GeneralizedIKSolver, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""__init__(GeneralizedIKSolver self, WorldModel world) -> GeneralizedIKSolver"""
this = _robotsim.new_GeneralizedIKSolver(*args)
try: self.this.append(this)
except: self.this = this
def add(self, *args):
"""
add(GeneralizedIKSolver self, GeneralizedIKObjective objective)
Adds a new simultaneous objective.
"""
return _robotsim.GeneralizedIKSolver_add(self, *args)
def setMaxIters(self, *args):
"""
setMaxIters(GeneralizedIKSolver self, int iters)
Sets the max # of iterations (default 100)
"""
return _robotsim.GeneralizedIKSolver_setMaxIters(self, *args)
def setTolerance(self, *args):
"""
setTolerance(GeneralizedIKSolver self, double res)
Sets the constraint solve tolerance (default 1e-3)
"""
return _robotsim.GeneralizedIKSolver_setTolerance(self, *args)
def getResidual(self):
"""
getResidual(GeneralizedIKSolver self)
Returns a vector describing the error of the objective.
"""
return _robotsim.GeneralizedIKSolver_getResidual(self)
def getJacobian(self):
"""
getJacobian(GeneralizedIKSolver self)
Returns a matrix describing the instantaneous derivative of the
objective with respect to the active parameters.
"""
return _robotsim.GeneralizedIKSolver_getJacobian(self)
def solve(self):
"""
solve(GeneralizedIKSolver self) -> PyObject *
Tries to find a configuration that satifies all simultaneous
objectives up to the desired tolerance. Returns (res,iters) where res
indicates whether x converged.
"""
return _robotsim.GeneralizedIKSolver_solve(self)
def sampleInitial(self):
"""
sampleInitial(GeneralizedIKSolver self)
Samples an initial random configuration.
"""
return _robotsim.GeneralizedIKSolver_sampleInitial(self)
__swig_setmethods__["world"] = _robotsim.GeneralizedIKSolver_world_set
__swig_getmethods__["world"] = _robotsim.GeneralizedIKSolver_world_get
if _newclass:world = _swig_property(_robotsim.GeneralizedIKSolver_world_get, _robotsim.GeneralizedIKSolver_world_set)
__swig_setmethods__["objectives"] = _robotsim.GeneralizedIKSolver_objectives_set
__swig_getmethods__["objectives"] = _robotsim.GeneralizedIKSolver_objectives_get
if _newclass:objectives = _swig_property(_robotsim.GeneralizedIKSolver_objectives_get, _robotsim.GeneralizedIKSolver_objectives_set)
__swig_setmethods__["tol"] = _robotsim.GeneralizedIKSolver_tol_set
__swig_getmethods__["tol"] = _robotsim.GeneralizedIKSolver_tol_get
if _newclass:tol = _swig_property(_robotsim.GeneralizedIKSolver_tol_get, _robotsim.GeneralizedIKSolver_tol_set)
__swig_setmethods__["maxIters"] = _robotsim.GeneralizedIKSolver_maxIters_set
__swig_getmethods__["maxIters"] = _robotsim.GeneralizedIKSolver_maxIters_get
if _newclass:maxIters = _swig_property(_robotsim.GeneralizedIKSolver_maxIters_get, _robotsim.GeneralizedIKSolver_maxIters_set)
__swig_setmethods__["useJointLimits"] = _robotsim.GeneralizedIKSolver_useJointLimits_set
__swig_getmethods__["useJointLimits"] = _robotsim.GeneralizedIKSolver_useJointLimits_get
if _newclass:useJointLimits = _swig_property(_robotsim.GeneralizedIKSolver_useJointLimits_get, _robotsim.GeneralizedIKSolver_useJointLimits_set)
__swig_destroy__ = _robotsim.delete_GeneralizedIKSolver
__del__ = lambda self : None;
GeneralizedIKSolver_swigregister = _robotsim.GeneralizedIKSolver_swigregister
GeneralizedIKSolver_swigregister(GeneralizedIKSolver)
def SampleTransform(*args):
"""
SampleTransform(IKObjective obj)
SampleTransform(GeneralizedIKObjective obj)
"""
return _robotsim.SampleTransform(*args)
class SimRobotSensor(_object):
"""
A sensor on a simulated robot. Retreive this from the controller, and
use getMeasurements to get the currently simulated measurement vector.
Sensors are automatically updated through the sim.simulate call, and
getMeasurements() retrieves the previously updated values. As a
result, you may get garbage measurements before the first sim.simulate
call is made.
There is also a new mode for doing kinematic simulation, which is
supported (i.e., makes sensible measurements) for some types of
sensors when just a robot / world model is given. This is similar to
Simulation.fakeSimulate but the entire controller structure is
bypassed. You can randomly set the robot's position, call
kinematicReset(), and then call kinematicSimulate(). Subsequent calls
assume the robot is being driven along a trajectory until the next
kinematicReset() is called. LaserSensor, CameraSensor, TiltSensor,
AccelerometerSensor, GyroSensor, JointPositionSensor,
JointVelocitySensor support kinematic simulation mode. FilteredSensor
and TimeDelayedSensor also work. The force-related sensors
(ContactSensor and ForceTorqueSensor) return 0's in kinematic
simulation.
To use get/setSetting, you will need to know the sensor attribute
names and types as described in Klampt/Control/*Sensor.h (same as in
the world or sensor XML file).
C++ includes: robotsim.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SimRobotSensor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SimRobotSensor, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""__init__(SimRobotSensor self, Robot * robot, SensorBase * sensor) -> SimRobotSensor"""
this = _robotsim.new_SimRobotSensor(*args)
try: self.this.append(this)
except: self.this = this
def name(self):
"""
name(SimRobotSensor self) -> std::string
Returns the name of the sensor.
"""
return _robotsim.SimRobotSensor_name(self)
def type(self):
"""
type(SimRobotSensor self) -> std::string
Returns the type of the sensor.
"""
return _robotsim.SimRobotSensor_type(self)
def measurementNames(self):
"""
measurementNames(SimRobotSensor self) -> stringVector
Returns a list of names for the measurements (one per measurement).
"""
return _robotsim.SimRobotSensor_measurementNames(self)
def getMeasurements(self):
"""
getMeasurements(SimRobotSensor self)
Returns a list of measurements from the previous simulation (or
kinematicSimulate) timestep.
"""
return _robotsim.SimRobotSensor_getMeasurements(self)
def getSetting(self, *args):
"""
getSetting(SimRobotSensor self, std::string const & name) -> std::string
Returns the value of the named setting (you will need to manually
parse this)
"""
return _robotsim.SimRobotSensor_getSetting(self, *args)
def setSetting(self, *args):
"""
setSetting(SimRobotSensor self, std::string const & name, std::string const & val)
Sets the value of the named setting (you will need to manually cast an
int/float/etc to a str)
"""
return _robotsim.SimRobotSensor_setSetting(self, *args)
def drawGL(self, *args):
"""
drawGL(SimRobotSensor self)
drawGL(SimRobotSensor self, doubleVector measurements)
Draws a sensor indicator and its measurements using OpenGL.
"""
return _robotsim.SimRobotSensor_drawGL(self, *args)
def kinematicSimulate(self, *args):
"""
kinematicSimulate(SimRobotSensor self, WorldModel world, double dt)
simulates / advances the kinematic simulation
"""
return _robotsim.SimRobotSensor_kinematicSimulate(self, *args)
def kinematicReset(self):
"""
kinematicReset(SimRobotSensor self)
resets a kinematic simulation so that a new initial condition can be
set
"""
return _robotsim.SimRobotSensor_kinematicReset(self)
__swig_setmethods__["robot"] = _robotsim.SimRobotSensor_robot_set
__swig_getmethods__["robot"] = _robotsim.SimRobotSensor_robot_get
if _newclass:robot = _swig_property(_robotsim.SimRobotSensor_robot_get, _robotsim.SimRobotSensor_robot_set)
__swig_setmethods__["sensor"] = _robotsim.SimRobotSensor_sensor_set
__swig_getmethods__["sensor"] = _robotsim.SimRobotSensor_sensor_get
if _newclass:sensor = _swig_property(_robotsim.SimRobotSensor_sensor_get, _robotsim.SimRobotSensor_sensor_set)
__swig_destroy__ = _robotsim.delete_SimRobotSensor
__del__ = lambda self : None;
SimRobotSensor_swigregister = _robotsim.SimRobotSensor_swigregister
SimRobotSensor_swigregister(SimRobotSensor)
class SimRobotController(_object):
"""
A controller for a simulated robot.
By default a SimRobotController has three possible modes: Motion queue
+ PID mode: the controller has an internal trajectory queue that may
be added to and modified. This queue supports piecewise linear
interpolation, cubic interpolation, and time-optimal move-to commands.
PID mode: the user controls the motor's PID setpoints directly
Torque control: the user controlls the motor torques directly.
The "standard" way of using this is in move-to mode which accepts a
milestone (setMilestone) or list of milestones (repeated calls to
addMilestone) and interpolates dynamically from the current
configuration/velocity. To handle disturbances, a PID loop is run
automatically at the controller's specified rate.
To get finer-grained control over the motion queue's timing, you may
use the setLinear/setCubic/addLinear/addCubic functions. In these
functions it is up to the user to respect velocity, acceleration, and
torque limits.
Whether in motion queue or PID mode, the constants of the PID loop are
initially set in the robot file. You can programmatically tune these
via the setPIDGains function.
Arbitrary trajectories can be tracked by using setVelocity over short
time steps. Force controllers can be implemented using setTorque,
again using short time steps.
If setVelocity, setTorque, or setPID command are called, the motion
queue behavior will be completely overridden. To reset back to motion
queue control, the function setManualMode(False) must be called.
C++ includes: robotsim.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SimRobotController, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SimRobotController, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(SimRobotController self) -> SimRobotController"""
this = _robotsim.new_SimRobotController()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_SimRobotController
__del__ = lambda self : None;
def model(self):
"""
model(SimRobotController self) -> RobotModel
Retrieves the robot model associated with this controller.
"""
return _robotsim.SimRobotController_model(self)
def setRate(self, *args):
"""
setRate(SimRobotController self, double dt)
Sets the current feedback control rate.
"""
return _robotsim.SimRobotController_setRate(self, *args)
def getCommandedConfig(self):
"""
getCommandedConfig(SimRobotController self)
Returns the current commanded configuration.
"""
return _robotsim.SimRobotController_getCommandedConfig(self)
def getCommandedVelocity(self):
"""
getCommandedVelocity(SimRobotController self)
Returns the current commanded velocity.
"""
return _robotsim.SimRobotController_getCommandedVelocity(self)
def getSensedConfig(self):
"""
getSensedConfig(SimRobotController self)
Returns the current "sensed" configuration from the simulator.
"""
return _robotsim.SimRobotController_getSensedConfig(self)
def getSensedVelocity(self):
"""
getSensedVelocity(SimRobotController self)
Returns the current "sensed" velocity from the simulator.
"""
return _robotsim.SimRobotController_getSensedVelocity(self)
def sensor(self, *args):
"""
sensor(SimRobotController self, int index) -> SimRobotSensor
sensor(SimRobotController self, char const * name) -> SimRobotSensor
Returns a sensor by name. If unavailable, a null sensor is returned.
"""
return _robotsim.SimRobotController_sensor(self, *args)
def commands(self):
"""
commands(SimRobotController self) -> stringVector
gets a command list
"""
return _robotsim.SimRobotController_commands(self)
def sendCommand(self, *args):
"""
sendCommand(SimRobotController self, std::string const & name, std::string const & args) -> bool
sends a command to the controller
"""
return _robotsim.SimRobotController_sendCommand(self, *args)
def getSetting(self, *args):
"""
getSetting(SimRobotController self, std::string const & name) -> std::string
gets a setting of the controller
"""
return _robotsim.SimRobotController_getSetting(self, *args)
def setSetting(self, *args):
"""
setSetting(SimRobotController self, std::string const & name, std::string const & val) -> bool
sets a setting of the controller
"""
return _robotsim.SimRobotController_setSetting(self, *args)
def setMilestone(self, *args):
"""
setMilestone(SimRobotController self, doubleVector q)
setMilestone(SimRobotController self, doubleVector q, doubleVector dq)
Uses a dynamic interpolant to get from the current state to the
desired milestone (with optional ending velocity). This interpolant is
time-optimal with respect to the velocity and acceleration bounds.
"""
return _robotsim.SimRobotController_setMilestone(self, *args)
def addMilestone(self, *args):
"""
addMilestone(SimRobotController self, doubleVector q)
addMilestone(SimRobotController self, doubleVector q, doubleVector dq)
Same as setMilestone, but appends an interpolant onto an internal
motion queue starting at the current queued end state.
"""
return _robotsim.SimRobotController_addMilestone(self, *args)
def addMilestoneLinear(self, *args):
"""
addMilestoneLinear(SimRobotController self, doubleVector q)
Same as addMilestone, but enforces that the motion should move along a
straight-line joint-space path.
"""
return _robotsim.SimRobotController_addMilestoneLinear(self, *args)
def setLinear(self, *args):
"""
setLinear(SimRobotController self, doubleVector q, double dt)
Uses linear interpolation to get from the current configuration to the
desired configuration after time dt.
"""
return _robotsim.SimRobotController_setLinear(self, *args)
def setCubic(self, *args):
"""
setCubic(SimRobotController self, doubleVector q, doubleVector v, double dt)
Uses cubic (Hermite) interpolation to get from the current
configuration/velocity to the desired configuration/velocity after
time dt.
"""
return _robotsim.SimRobotController_setCubic(self, *args)
def addLinear(self, *args):
"""
addLinear(SimRobotController self, doubleVector q, double dt)
Same as setLinear but appends an interpolant onto the motion queue.
"""
return _robotsim.SimRobotController_addLinear(self, *args)
def addCubic(self, *args):
"""
addCubic(SimRobotController self, doubleVector q, doubleVector v, double dt)
Same as setCubic but appends an interpolant onto the motion queue.
"""
return _robotsim.SimRobotController_addCubic(self, *args)
def remainingTime(self):
"""
remainingTime(SimRobotController self) -> double
Returns the remaining duration of the motion queue.
"""
return _robotsim.SimRobotController_remainingTime(self)
def setVelocity(self, *args):
"""
setVelocity(SimRobotController self, doubleVector dq, double dt)
Sets a rate controller from the current commanded config to move at
rate dq for time dt.
"""
return _robotsim.SimRobotController_setVelocity(self, *args)
def setTorque(self, *args):
"""
setTorque(SimRobotController self, doubleVector t)
Sets a torque command controller.
"""
return _robotsim.SimRobotController_setTorque(self, *args)
def setPIDCommand(self, *args):
"""
setPIDCommand(SimRobotController self, doubleVector qdes, doubleVector dqdes)
setPIDCommand(SimRobotController self, doubleVector qdes, doubleVector dqdes, doubleVector tfeedforward)
Sets a PID command controller. If tfeedforward is used, it is the
feedforward torque vector.
"""
return _robotsim.SimRobotController_setPIDCommand(self, *args)
def setManualMode(self, *args):
"""
setManualMode(SimRobotController self, bool enabled)
Turns on/off manual mode, if either the setTorque or setPID command
were previously set.
"""
return _robotsim.SimRobotController_setManualMode(self, *args)
def getControlType(self):
"""
getControlType(SimRobotController self) -> std::string
Returns the control type for the active controller.
Valid values are: unknown
off
torque
PID
locked_velocity
"""
return _robotsim.SimRobotController_getControlType(self)
def setPIDGains(self, *args):
"""
setPIDGains(SimRobotController self, doubleVector kP, doubleVector kI, doubleVector kD)
Sets the PID gains.
"""
return _robotsim.SimRobotController_setPIDGains(self, *args)
def getPIDGains(self):
"""
getPIDGains(SimRobotController self)
Gets the PID gains for the PID controller.
"""
return _robotsim.SimRobotController_getPIDGains(self)
__swig_setmethods__["index"] = _robotsim.SimRobotController_index_set
__swig_getmethods__["index"] = _robotsim.SimRobotController_index_get
if _newclass:index = _swig_property(_robotsim.SimRobotController_index_get, _robotsim.SimRobotController_index_set)
__swig_setmethods__["sim"] = _robotsim.SimRobotController_sim_set
__swig_getmethods__["sim"] = _robotsim.SimRobotController_sim_get
if _newclass:sim = _swig_property(_robotsim.SimRobotController_sim_get, _robotsim.SimRobotController_sim_set)
__swig_setmethods__["controller"] = _robotsim.SimRobotController_controller_set
__swig_getmethods__["controller"] = _robotsim.SimRobotController_controller_get
if _newclass:controller = _swig_property(_robotsim.SimRobotController_controller_get, _robotsim.SimRobotController_controller_set)
SimRobotController_swigregister = _robotsim.SimRobotController_swigregister
SimRobotController_swigregister(SimRobotController)
class SimBody(_object):
"""
A reference to a rigid body inside a Simulator (either a
RigidObjectModel, TerrainModel, or a link of a RobotModel).
Can use this class to directly apply forces to or control positions /
velocities of objects in the simulation. However, note that the
changes are only applied in the current simulation substep, not the
duration provided to Simulation.simulate(). If you need fine-grained
control, make sure to call simulate() with time steps equal to the
value provided to Simulation.setSimStep() (this is 0.001s by default).
Important: the transform of the object is centered at the object's
center of mass rather than the reference frame given in the
RobotModelLink or RigidObjectModel.
C++ includes: robotsim.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SimBody, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SimBody, name)
__repr__ = _swig_repr
def getID(self):
"""
getID(SimBody self) -> int
Returns the object ID that this body associated with.
"""
return _robotsim.SimBody_getID(self)
def enable(self, enabled=True):
"""
enable(SimBody self, bool enabled=True)
enable(SimBody self)
Sets the simulation of this body on/off.
"""
return _robotsim.SimBody_enable(self, enabled)
def isEnabled(self):
"""
isEnabled(SimBody self) -> bool
Returns true if this body is being simulated.
"""
return _robotsim.SimBody_isEnabled(self)
def enableDynamics(self, enabled=True):
"""
enableDynamics(SimBody self, bool enabled=True)
enableDynamics(SimBody self)
Sets the dynamic simulation of the body on/off. If false, velocities
will simply be integrated forward, and forces will not affect velocity
i.e., it will be pure kinematic simulation.
"""
return _robotsim.SimBody_enableDynamics(self, enabled)
def isDynamicsEnabled(self):
"""isDynamicsEnabled(SimBody self) -> bool"""
return _robotsim.SimBody_isDynamicsEnabled(self)
def applyWrench(self, *args):
"""
applyWrench(SimBody self, double const [3] f, double const [3] t)
Applies a force and torque about the COM over the duration of the next
Simulator.simulate(t) call.
"""
return _robotsim.SimBody_applyWrench(self, *args)
def applyForceAtPoint(self, *args):
"""
applyForceAtPoint(SimBody self, double const [3] f, double const [3] pworld)
Applies a force at a given point (in world coordinates) over the
duration of the next Simulator.simulate(t) call.
"""
return _robotsim.SimBody_applyForceAtPoint(self, *args)
def applyForceAtLocalPoint(self, *args):
"""
applyForceAtLocalPoint(SimBody self, double const [3] f, double const [3] plocal)
Applies a force at a given point (in local center-of-mass-centered
coordinates) over the duration of the next Simulator.simulate(t) call.
"""
return _robotsim.SimBody_applyForceAtLocalPoint(self, *args)
def setTransform(self, *args):
"""
setTransform(SimBody self, double const [9] R, double const [3] t)
Sets the body's transformation at the current simulation time step (in
center-of-mass centered coordinates).
"""
return _robotsim.SimBody_setTransform(self, *args)
def getTransform(self):
"""
getTransform(SimBody self)
Gets the body's transformation at the current simulation time step (in
center-of-mass centered coordinates).
"""
return _robotsim.SimBody_getTransform(self)
def setObjectTransform(self, *args):
"""
setObjectTransform(SimBody self, double const [9] R, double const [3] t)
Sets the body's transformation at the current simulation time step (in
object-native coordinates)
"""
return _robotsim.SimBody_setObjectTransform(self, *args)
def getObjectTransform(self):
"""
getObjectTransform(SimBody self)
Gets the body's transformation at the current simulation time step (in
object-native coordinates).
"""
return _robotsim.SimBody_getObjectTransform(self)
def setVelocity(self, *args):
"""
setVelocity(SimBody self, double const [3] w, double const [3] v)
Sets the angular velocity and translational velocity at the current
simulation time step.
"""
return _robotsim.SimBody_setVelocity(self, *args)
def getVelocity(self):
"""
getVelocity(SimBody self)
Returns the angular velocity and translational velocity.
"""
return _robotsim.SimBody_getVelocity(self)
def setCollisionPadding(self, *args):
"""
setCollisionPadding(SimBody self, double padding)
Sets the collision padding (useful for thin objects). Default is
0.0025.
"""
return _robotsim.SimBody_setCollisionPadding(self, *args)
def getCollisionPadding(self):
"""getCollisionPadding(SimBody self) -> double"""
return _robotsim.SimBody_getCollisionPadding(self)
def setCollisionPreshrink(self, shrinkVisualization=False):
"""
setCollisionPreshrink(SimBody self, bool shrinkVisualization=False)
setCollisionPreshrink(SimBody self)
If set, preshrinks the geometry so that the padded geometry better
matches the original mesh. If shrinkVisualization=true, the underlying
mesh is also shrunk (helps debug)
"""
return _robotsim.SimBody_setCollisionPreshrink(self, shrinkVisualization)
def getSurface(self):
"""
getSurface(SimBody self) -> ContactParameters
Gets (a copy of) the surface properties.
"""
return _robotsim.SimBody_getSurface(self)
def setSurface(self, *args):
"""
setSurface(SimBody self, ContactParameters params)
Sets the surface properties.
"""
return _robotsim.SimBody_setSurface(self, *args)
__swig_setmethods__["sim"] = _robotsim.SimBody_sim_set
__swig_getmethods__["sim"] = _robotsim.SimBody_sim_get
if _newclass:sim = _swig_property(_robotsim.SimBody_sim_get, _robotsim.SimBody_sim_set)
__swig_setmethods__["objectID"] = _robotsim.SimBody_objectID_set
__swig_getmethods__["objectID"] = _robotsim.SimBody_objectID_get
if _newclass:objectID = _swig_property(_robotsim.SimBody_objectID_get, _robotsim.SimBody_objectID_set)
__swig_setmethods__["geometry"] = _robotsim.SimBody_geometry_set
__swig_getmethods__["geometry"] = _robotsim.SimBody_geometry_get
if _newclass:geometry = _swig_property(_robotsim.SimBody_geometry_get, _robotsim.SimBody_geometry_set)
__swig_setmethods__["body"] = _robotsim.SimBody_body_set
__swig_getmethods__["body"] = _robotsim.SimBody_body_get
if _newclass:body = _swig_property(_robotsim.SimBody_body_get, _robotsim.SimBody_body_set)
def __init__(self):
"""
__init__(SimBody self) -> SimBody
A reference to a rigid body inside a Simulator (either a
RigidObjectModel, TerrainModel, or a link of a RobotModel).
Can use this class to directly apply forces to or control positions /
velocities of objects in the simulation. However, note that the
changes are only applied in the current simulation substep, not the
duration provided to Simulation.simulate(). If you need fine-grained
control, make sure to call simulate() with time steps equal to the
value provided to Simulation.setSimStep() (this is 0.001s by default).
Important: the transform of the object is centered at the object's
center of mass rather than the reference frame given in the
RobotModelLink or RigidObjectModel.
C++ includes: robotsim.h
"""
this = _robotsim.new_SimBody()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_SimBody
__del__ = lambda self : None;
SimBody_swigregister = _robotsim.SimBody_swigregister
SimBody_swigregister(SimBody)
class Simulator(_object):
"""
A dynamics simulator for a WorldModel.
C++ includes: robotsim.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Simulator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Simulator, name)
__repr__ = _swig_repr
STATUS_NORMAL = _robotsim.Simulator_STATUS_NORMAL
STATUS_ADAPTIVE_TIME_STEPPING = _robotsim.Simulator_STATUS_ADAPTIVE_TIME_STEPPING
STATUS_CONTACT_UNRELIABLE = _robotsim.Simulator_STATUS_CONTACT_UNRELIABLE
STATUS_UNSTABLE = _robotsim.Simulator_STATUS_UNSTABLE
STATUS_ERROR = _robotsim.Simulator_STATUS_ERROR
def __init__(self, *args):
"""
__init__(Simulator self, WorldModel model) -> Simulator
Constructs the simulator from a WorldModel. If the WorldModel was
loaded from an XML file, then the simulation setup is loaded from it.
"""
this = _robotsim.new_Simulator(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _robotsim.delete_Simulator
__del__ = lambda self : None;
def reset(self):
"""
reset(Simulator self)
Resets to the initial state (same as setState(initialState))
"""
return _robotsim.Simulator_reset(self)
def getStatus(self):
"""
getStatus(Simulator self) -> int
Returns an indicator code for the simulator status. The return result
is one of the STATUS_X flags. (Technically, this returns the worst
status over the last simulate() call)
"""
return _robotsim.Simulator_getStatus(self)
def getStatusString(self, *args):
"""
getStatusString(Simulator self, int s=-1) -> std::string
getStatusString(Simulator self) -> std::string
Returns a string indicating the simulator's status. If s is provided
and >= 0, this function maps the indicator code s to a string.
"""
return _robotsim.Simulator_getStatusString(self, *args)
def getState(self):
"""
getState(Simulator self) -> std::string
Returns a Base64 string representing the binary data for the current
simulation state, including controller parameters, etc.
"""
return _robotsim.Simulator_getState(self)
def setState(self, *args):
"""
setState(Simulator self, std::string const & str)
Sets the current simulation state from a Base64 string returned by a
prior getState call.
"""
return _robotsim.Simulator_setState(self, *args)
def simulate(self, *args):
"""
simulate(Simulator self, double t)
Advances the simulation by time t, and updates the world model from
the simulation state.
"""
return _robotsim.Simulator_simulate(self, *args)
def fakeSimulate(self, *args):
"""
fakeSimulate(Simulator self, double t)
Advances a faked simulation by time t, and updates the world model
from the faked simulation state.
"""
return _robotsim.Simulator_fakeSimulate(self, *args)
def getTime(self):
"""
getTime(Simulator self) -> double
Returns the simulation time.
"""
return _robotsim.Simulator_getTime(self)
def updateWorld(self):
"""
updateWorld(Simulator self)
Updates the world model from the current simulation state. This only
needs to be called if you change the world model and want to revert
back to the simulation state.
"""
return _robotsim.Simulator_updateWorld(self)
def getActualConfig(self, *args):
"""
getActualConfig(Simulator self, int robot)
Returns the current actual configuration of the robot from the
simulator.
"""
return _robotsim.Simulator_getActualConfig(self, *args)
def getActualVelocity(self, *args):
"""
getActualVelocity(Simulator self, int robot)
Returns the current actual velocity of the robot from the simulator.
"""
return _robotsim.Simulator_getActualVelocity(self, *args)
def getActualTorques(self, *args):
"""
getActualTorques(Simulator self, int robot)
Returns the current actual torques on the robot's drivers from the
simulator.
"""
return _robotsim.Simulator_getActualTorques(self, *args)
def enableContactFeedback(self, *args):
"""
enableContactFeedback(Simulator self, int obj1, int obj2)
Call this to enable contact feedback between the two objects
(arguments are indexes returned by object.getID()). Contact feedback
has a small overhead so you may want to do this selectively. This must
be called before using inContact, getContacts, getContactForces,
contactForce, contactTorque, hadContact, hadSeparation,
hadPenetration, and meanContactForce.
"""
return _robotsim.Simulator_enableContactFeedback(self, *args)
def enableContactFeedbackAll(self):
"""
enableContactFeedbackAll(Simulator self)
Call this to enable contact feedback between all pairs of objects.
Contact feedback has a small overhead so you may want to do this
selectively.
"""
return _robotsim.Simulator_enableContactFeedbackAll(self)
def inContact(self, *args):
"""
inContact(Simulator self, int aid, int bid) -> bool
Returns true if the objects (indexes returned by object.getID()) are
in contact on the current time step. You can set bid=-1 to tell if
object a is in contact with any object.
"""
return _robotsim.Simulator_inContact(self, *args)
def getContacts(self, *args):
"""
getContacts(Simulator self, int aid, int bid)
Returns the list of contacts (x,n,kFriction) at the last time step.
Normals point into object a. The contact point (x,n,kFriction) is
represented as a 7-element vector.
"""
return _robotsim.Simulator_getContacts(self, *args)
def getContactForces(self, *args):
"""
getContactForces(Simulator self, int aid, int bid)
Returns the list of contact forces on object a at the last time step.
"""
return _robotsim.Simulator_getContactForces(self, *args)
def contactForce(self, *args):
"""
contactForce(Simulator self, int aid, int bid)
Returns the contact force on object a at the last time step. You can
set bid to -1 to get the overall contact force on object a.
"""
return _robotsim.Simulator_contactForce(self, *args)
def contactTorque(self, *args):
"""
contactTorque(Simulator self, int aid, int bid)
Returns the contact force on object a (about a's origin) at the last
time step. You can set bid to -1 to get the overall contact force on
object a.
"""
return _robotsim.Simulator_contactTorque(self, *args)
def hadContact(self, *args):
"""
hadContact(Simulator self, int aid, int bid) -> bool
Returns true if the objects had contact over the last simulate() call.
You can set bid to -1 to determine if object a had contact with any
other object.
"""
return _robotsim.Simulator_hadContact(self, *args)
def hadSeparation(self, *args):
"""
hadSeparation(Simulator self, int aid, int bid) -> bool
Returns true if the objects had ever separated during the last
simulate() call. You can set bid to -1 to determine if object a had no
contact with any other object.
"""
return _robotsim.Simulator_hadSeparation(self, *args)
def hadPenetration(self, *args):
"""
hadPenetration(Simulator self, int aid, int bid) -> bool
Returns true if the objects interpenetrated during the last simulate()
call. If so, the simulation may lead to very inaccurate results or
artifacts. You can set bid to -1 to determine if object a penetrated
any object, or you can set aid=bid=-1 to determine whether any object
is penetrating any other (indicating that the simulation will not be
functioning properly in general).
"""
return _robotsim.Simulator_hadPenetration(self, *args)
def meanContactForce(self, *args):
"""
meanContactForce(Simulator self, int aid, int bid)
Returns the average contact force on object a over the last simulate()
call.
"""
return _robotsim.Simulator_meanContactForce(self, *args)
def controller(self, *args):
"""
controller(Simulator self, int robot) -> SimRobotController
controller(Simulator self, RobotModel robot) -> SimRobotController
"""
return _robotsim.Simulator_controller(self, *args)
def body(self, *args):
"""
body(Simulator self, RobotModelLink link) -> SimBody
body(Simulator self, RigidObjectModel object) -> SimBody
body(Simulator self, TerrainModel terrain) -> SimBody
Returns the SimBody corresponding to the given terrain.
"""
return _robotsim.Simulator_body(self, *args)
def getJointForces(self, *args):
"""
getJointForces(Simulator self, RobotModelLink link)
Returns the joint force and torque local to the link, as would be read
by a force-torque sensor mounted at the given link's origin. The 6
entries are (fx,fy,fz,mx,my,mz)
"""
return _robotsim.Simulator_getJointForces(self, *args)
def setGravity(self, *args):
"""
setGravity(Simulator self, double const [3] g)
Sets the overall gravity vector.
"""
return _robotsim.Simulator_setGravity(self, *args)
def setSimStep(self, *args):
"""
setSimStep(Simulator self, double dt)
Sets the internal simulation substep. Values < 0.01 are recommended.
"""
return _robotsim.Simulator_setSimStep(self, *args)
def getSetting(self, *args):
"""
getSetting(Simulator self, std::string const & name) -> std::string
Retrieves some simulation setting. Valid names are gravity, simStep,
boundaryLayerCollisions, rigidObjectCollisions, robotSelfCollisions,
robotRobotCollisions, adaptiveTimeStepping, minimumAdaptiveTimeStep,
maxContacts, clusterNormalScale, errorReductionParameter,
dampedLeastSquaresParameter, instabilityConstantEnergyThreshold,
instabilityLinearEnergyThreshold, instabilityMaxEnergyThreshold, and
instabilityPostCorrectionEnergy. See Klampt/Simulation/ODESimulator.h
for detailed descriptions of these parameters.
Retreives some simulation setting. Valid names are gravity, simStep,
boundaryLayerCollisions, rigidObjectCollisions, robotSelfCollisions,
robotRobotCollisions, adaptiveTimeStepping, maxContacts,
clusterNormalScale, errorReductionParameter, and
dampedLeastSquaresParameter.
"""
return _robotsim.Simulator_getSetting(self, *args)
def setSetting(self, *args):
"""
setSetting(Simulator self, std::string const & name, std::string const & value)
Sets some simulation setting. Raises an exception if the name is
unknown or the value is of improper format.
"""
return _robotsim.Simulator_setSetting(self, *args)
__swig_setmethods__["index"] = _robotsim.Simulator_index_set
__swig_getmethods__["index"] = _robotsim.Simulator_index_get
if _newclass:index = _swig_property(_robotsim.Simulator_index_get, _robotsim.Simulator_index_set)
__swig_setmethods__["world"] = _robotsim.Simulator_world_set
__swig_getmethods__["world"] = _robotsim.Simulator_world_get
if _newclass:world = _swig_property(_robotsim.Simulator_world_get, _robotsim.Simulator_world_set)
__swig_setmethods__["sim"] = _robotsim.Simulator_sim_set
__swig_getmethods__["sim"] = _robotsim.Simulator_sim_get
if _newclass:sim = _swig_property(_robotsim.Simulator_sim_get, _robotsim.Simulator_sim_set)
__swig_setmethods__["initialState"] = _robotsim.Simulator_initialState_set
__swig_getmethods__["initialState"] = _robotsim.Simulator_initialState_get
if _newclass:initialState = _swig_property(_robotsim.Simulator_initialState_get, _robotsim.Simulator_initialState_set)
Simulator_swigregister = _robotsim.Simulator_swigregister
Simulator_swigregister(Simulator)
def setRandomSeed(*args):
"""
setRandomSeed(int seed)
Sets the random seed used by the configuration sampler.
"""
return _robotsim.setRandomSeed(*args)
def destroy():
"""
destroy()
destroys internal data structures
destroys internal data structures
"""
return _robotsim.destroy()
def setFrictionConeApproximationEdges(*args):
"""setFrictionConeApproximationEdges(int numEdges)"""
return _robotsim.setFrictionConeApproximationEdges(*args)
def forceClosure(*args):
"""
forceClosure(doubleMatrix contacts) -> bool
forceClosure(doubleMatrix contactPositions, doubleMatrix frictionCones) -> bool
"""
return _robotsim.forceClosure(*args)
def forceClosure2D(*args):
"""
forceClosure2D(doubleMatrix contacts) -> bool
forceClosure2D(doubleMatrix contactPositions, doubleMatrix frictionCones) -> bool
"""
return _robotsim.forceClosure2D(*args)
def comEquilibrium(*args):
"""
comEquilibrium(doubleMatrix contacts, doubleVector fext, PyObject * com) -> PyObject
comEquilibrium(doubleMatrix contactPositions, doubleMatrix frictionCones, doubleVector fext, PyObject * com) -> PyObject *
"""
return _robotsim.comEquilibrium(*args)
def comEquilibrium2D(*args):
"""
comEquilibrium2D(doubleMatrix contacts, doubleVector fext, PyObject * com) -> PyObject
comEquilibrium2D(doubleMatrix contactPositions, doubleMatrix frictionCones, doubleVector fext, PyObject * com) -> PyObject *
"""
return _robotsim.comEquilibrium2D(*args)
def supportPolygon(*args):
"""
supportPolygon(doubleMatrix contacts) -> PyObject
supportPolygon(doubleMatrix contactPositions, doubleMatrix frictionCones) -> PyObject *
A fancy version of the normal supportPolygon test. contactPositions is
a list of 3-lists giving the contact point positions. The i'th element
in the list frictionCones has length (k*4), and gives the contact
force constraints (ax,ay,az,b) where ax*fx+ay*fy+az*fz <= b limits the
contact force (fx,fy,fz) at the i'th contact. Each of the k 4-tuples
is laid out sequentially per-contact.
The return value is a list of 3-tuples giving the sorted plane
boundaries of the polygon. The format of a plane is (nx,ny,ofs) where
(nx,ny) are the outward facing normals, and ofs is the offset from 0.
In other words to test stability of a com [x,y], you can test whether
dot([nx,ny],[x,y]) <= ofs for all planes.
"""
return _robotsim.supportPolygon(*args)
def supportPolygon2D(*args):
"""
supportPolygon2D(doubleMatrix contacts) -> PyObject
supportPolygon2D(doubleMatrix contacts, doubleMatrix frictionCones) -> PyObject *
"""
return _robotsim.supportPolygon2D(*args)
def equilibriumTorques(*args):
"""
equilibriumTorques(RobotModel robot, doubleMatrix contacts, intVector links, doubleVector fext, double norm=0) -> PyObject
equilibriumTorques(RobotModel robot, doubleMatrix contacts, intVector links, doubleVector fext) -> PyObject
equilibriumTorques(RobotModel robot, doubleMatrix contacts, intVector links, doubleVector fext, doubleVector internalTorques,
double norm=0) -> PyObject
equilibriumTorques(RobotModel robot, doubleMatrix contacts, intVector links, doubleVector fext, doubleVector internalTorques) -> PyObject *
"""
return _robotsim.equilibriumTorques(*args)
# This file is compatible with both classic and new-style classes.
| bsd-3-clause |
cryptickp/heat | heat/tests/db/test_migrations.py | 2 | 27420 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
import datetime
import os
import uuid
from migrate.versioning import repository
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils
from oslo_serialization import jsonutils
import six
import sqlalchemy
import testtools
from heat.db.sqlalchemy import migrate_repo
from heat.db.sqlalchemy import migration
from heat.db.sqlalchemy import models
from heat.tests import common
class HeatMigrationsCheckers(test_migrations.WalkVersionsMixin,
common.FakeLogMixin):
"""Test sqlalchemy-migrate migrations."""
snake_walk = False
downgrade = False
@property
def INIT_VERSION(self):
return migration.INIT_VERSION
@property
def REPOSITORY(self):
migrate_file = migrate_repo.__file__
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_file))
)
@property
def migration_api(self):
temp = __import__('oslo_db.sqlalchemy.migration', globals(),
locals(), ['versioning_api'], 0)
return temp.versioning_api
@property
def migrate_engine(self):
return self.engine
@testtools.skipIf(six.PY3, "skip until a new release of oslo.db is cut")
def test_walk_versions(self):
self.walk_versions(self.snake_walk, self.downgrade)
def assertColumnExists(self, engine, table, column):
t = utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnType(self, engine, table, column, sqltype):
t = utils.get_table(engine, table)
col = getattr(t.c, column)
self.assertIsInstance(col.type, sqltype)
def assertColumnNotExists(self, engine, table, column):
t = utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertColumnIsNullable(self, engine, table, column):
t = utils.get_table(engine, table)
col = getattr(t.c, column)
self.assertTrue(col.nullable)
def assertColumnIsNotNullable(self, engine, table, column_name):
table = utils.get_table(engine, table)
column = getattr(table.c, column_name)
self.assertFalse(column.nullable)
def assertIndexExists(self, engine, table, index):
t = utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = six.iterkeys(idx.columns)
break
self.assertEqual(sorted(members), sorted(index_columns))
def _pre_upgrade_031(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(300, 303, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=4, username='angus', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
stack_ids = [('967aaefb-152e-405d-b13a-35d4c816390c', 0),
('9e9deba9-a303-4f29-84d3-c8165647c47e', 1),
('9a4bd1ec-8b21-46cd-964a-f66cb1cfa2f9', 2)]
data = [dict(id=ll_id, name='fruity',
raw_template_id=templ[templ_id]['id'],
user_creds_id=user[0]['id'],
username='angus', disable_rollback=True)
for ll_id, templ_id in stack_ids]
engine.execute(stack.insert(), data)
return data
def _check_031(self, engine, data):
self.assertColumnExists(engine, 'stack_lock', 'stack_id')
self.assertColumnExists(engine, 'stack_lock', 'engine_id')
self.assertColumnExists(engine, 'stack_lock', 'created_at')
self.assertColumnExists(engine, 'stack_lock', 'updated_at')
def _check_034(self, engine, data):
self.assertColumnExists(engine, 'raw_template', 'files')
def _pre_upgrade_035(self, engine):
# The stacks id are for the 33 version migration
event_table = utils.get_table(engine, 'event')
data = [{
'id': '22222222-152e-405d-b13a-35d4c816390c',
'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
'resource_action': 'Test',
'resource_status': 'TEST IN PROGRESS',
'resource_name': 'Testing Resource',
'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
'resource_status_reason': '',
'resource_type': '',
'resource_properties': None,
'created_at': datetime.datetime.now()},
{'id': '11111111-152e-405d-b13a-35d4c816390c',
'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
'resource_action': 'Test',
'resource_status': 'TEST COMPLETE',
'resource_name': 'Testing Resource',
'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
'resource_status_reason': '',
'resource_type': '',
'resource_properties': None,
'created_at': datetime.datetime.now() +
datetime.timedelta(days=5)}]
engine.execute(event_table.insert(), data)
return data
def _check_035(self, engine, data):
self.assertColumnExists(engine, 'event', 'id')
self.assertColumnExists(engine, 'event', 'uuid')
event_table = utils.get_table(engine, 'event')
events_in_db = list(event_table.select().execute())
last_id = 0
for index, event in enumerate(data):
last_id = index + 1
self.assertEqual(last_id, events_in_db[index].id)
self.assertEqual(event['id'], events_in_db[index].uuid)
# Check that the autoincremental id is ok
data = [{
'uuid': '33333333-152e-405d-b13a-35d4c816390c',
'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
'resource_action': 'Test',
'resource_status': 'TEST COMPLEATE AGAIN',
'resource_name': 'Testing Resource',
'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
'resource_status_reason': '',
'resource_type': '',
'resource_properties': None,
'created_at': datetime.datetime.now()}]
result = engine.execute(event_table.insert(), data)
self.assertEqual(last_id + 1, result.inserted_primary_key[0])
def _check_036(self, engine, data):
self.assertColumnExists(engine, 'stack', 'stack_user_project_id')
def _pre_upgrade_037(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = '''{"heat_template_version": "2013-05-23",
"parameters": {
"key_name": {
"Type": "string"
}
}
}'''
data = [dict(id=4, template=templ, files='{}')]
engine.execute(raw_template.insert(), data)
return data[0]
def _check_037(self, engine, data):
raw_template = utils.get_table(engine, 'raw_template')
templs = list(raw_template.select().
where(raw_template.c.id == str(data['id'])).
execute())
template = jsonutils.loads(templs[0].template)
data_template = jsonutils.loads(data['template'])
self.assertNotIn('Type', template['parameters']['key_name'])
self.assertIn('type', template['parameters']['key_name'])
self.assertEqual(template['parameters']['key_name']['type'],
data_template['parameters']['key_name']['Type'])
def _check_038(self, engine, data):
self.assertColumnNotExists(engine, 'software_config', 'io')
def _check_039(self, engine, data):
self.assertColumnIsNullable(engine, 'stack', 'user_creds_id')
def _check_040(self, engine, data):
self.assertColumnNotExists(engine, 'software_deployment', 'signal_id')
def _pre_upgrade_041(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = '''{"heat_template_version": "2013-05-23",
"resources": {
"my_instance": {
"Type": "OS::Nova::Server"
}
},
"outputs": {
"instance_ip": {
"Value": { "get_attr": "[my_instance, first_address]" }
}
}
}'''
data = [dict(id=7, template=templ, files='{}')]
engine.execute(raw_template.insert(), data)
return data[0]
def _check_041(self, engine, data):
raw_template = utils.get_table(engine, 'raw_template')
templs = list(raw_template.select().
where(raw_template.c.id == str(data['id'])).
execute())
template = jsonutils.loads(templs[0].template)
self.assertIn('type', template['resources']['my_instance'])
self.assertNotIn('Type', template['resources']['my_instance'])
self.assertIn('value', template['outputs']['instance_ip'])
self.assertNotIn('Value', template['outputs']['instance_ip'])
def _pre_upgrade_043(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = '''{"HeatTemplateFormatVersion" : "2012-12-11",
"Parameters" : {
"foo" : { "Type" : "String", "NoEcho": "True" },
"bar" : { "Type" : "String", "NoEcho": "True", "Default": "abc" },
"blarg" : { "Type" : "String", "Default": "quux" }
}
}'''
data = [dict(id=8, template=templ, files='{}')]
engine.execute(raw_template.insert(), data)
return data[0]
def _check_043(self, engine, data):
raw_template = utils.get_table(engine, 'raw_template')
templ = list(raw_template.select().
where(raw_template.c.id == data['id']).execute())
template = jsonutils.loads(templ[0].template)
self.assertEqual(template['HeatTemplateFormatVersion'], '2012-12-12')
def _pre_upgrade_045(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(200, 203, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=6, username='steve', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
stack_ids = [('s1', '967aaefb-152e-505d-b13a-35d4c816390c', 0),
('s2', '9e9deba9-a303-5f29-84d3-c8165647c47e', 1),
('s1*', '9a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9', 2)]
data = [dict(id=ll_id, name=name,
raw_template_id=templ[templ_id]['id'],
user_creds_id=user[0]['id'],
username='steve', disable_rollback=True)
for name, ll_id, templ_id in stack_ids]
data[2]['owner_id'] = '967aaefb-152e-505d-b13a-35d4c816390c'
engine.execute(stack.insert(), data)
return data
def _check_045(self, engine, data):
self.assertColumnExists(engine, 'stack', 'backup')
stack_table = utils.get_table(engine, 'stack')
stacks_in_db = list(stack_table.select().execute())
stack_names_in_db = [s.name for s in stacks_in_db]
# Assert the expected stacks are still there
for stack in data:
self.assertIn(stack['name'], stack_names_in_db)
# And that the backup flag is set as expected
for stack in stacks_in_db:
if stack.name.endswith('*'):
self.assertTrue(stack.backup)
else:
self.assertFalse(stack.backup)
def _check_046(self, engine, data):
self.assertColumnExists(engine, 'resource', 'properties_data')
def _pre_upgrade_047(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(100, 105, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=7, username='steve', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
stack_ids = [
('s9', '167aaefb-152e-505d-b13a-35d4c816390c', 0),
('n1', '1e9deba9-a303-5f29-84d3-c8165647c47e', 1),
('n2', '1e9deba9-a304-5f29-84d3-c8165647c47e', 2),
('n3', '1e9deba9-a305-5f29-84d3-c8165647c47e', 3),
('s9*', '1a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9', 4)]
data = [dict(id=ll_id, name=name,
raw_template_id=templ[tmpl_id]['id'],
user_creds_id=user[0]['id'],
owner_id=None,
backup=False,
username='steve', disable_rollback=True)
for name, ll_id, tmpl_id in stack_ids]
# Make a nested tree s1->s2->s3->s4 with a s1 backup
data[1]['owner_id'] = '167aaefb-152e-505d-b13a-35d4c816390c'
data[2]['owner_id'] = '1e9deba9-a303-5f29-84d3-c8165647c47e'
data[3]['owner_id'] = '1e9deba9-a304-5f29-84d3-c8165647c47e'
data[4]['owner_id'] = '167aaefb-152e-505d-b13a-35d4c816390c'
data[4]['backup'] = True
engine.execute(stack.insert(), data)
return data
def _check_047(self, engine, data):
self.assertColumnExists(engine, 'stack', 'nested_depth')
stack_table = utils.get_table(engine, 'stack')
stacks_in_db = list(stack_table.select().execute())
stack_ids_in_db = [s.id for s in stacks_in_db]
# Assert the expected stacks are still there
for stack in data:
self.assertIn(stack['id'], stack_ids_in_db)
# And that the depth is set as expected
def n_depth(sid):
s = [s for s in stacks_in_db if s.id == sid][0]
return s.nested_depth
self.assertEqual(0, n_depth('167aaefb-152e-505d-b13a-35d4c816390c'))
self.assertEqual(1, n_depth('1e9deba9-a303-5f29-84d3-c8165647c47e'))
self.assertEqual(2, n_depth('1e9deba9-a304-5f29-84d3-c8165647c47e'))
self.assertEqual(3, n_depth('1e9deba9-a305-5f29-84d3-c8165647c47e'))
self.assertEqual(0, n_depth('1a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9'))
def _check_049(self, engine, data):
self.assertColumnExists(engine, 'user_creds', 'region_name')
def _check_051(self, engine, data):
column_list = [('id', False),
('host', False),
('topic', False),
('binary', False),
('hostname', False),
('engine_id', False),
('report_interval', False),
('updated_at', True),
('created_at', True),
('deleted_at', True)]
for column in column_list:
self.assertColumnExists(engine, 'service', column[0])
if not column[1]:
self.assertColumnIsNotNullable(engine, 'service', column[0])
else:
self.assertColumnIsNullable(engine, 'service', column[0])
def _check_052(self, engine, data):
self.assertColumnExists(engine, 'stack', 'convergence')
def _check_055(self, engine, data):
self.assertColumnExists(engine, 'stack', 'prev_raw_template_id')
self.assertColumnExists(engine, 'stack', 'current_traversal')
self.assertColumnExists(engine, 'stack', 'current_deps')
def _pre_upgrade_056(self, engine):
raw_template = utils.get_table(engine, 'raw_template')
templ = []
for i in range(900, 903, 1):
t = dict(id=i, template='{}', files='{}')
engine.execute(raw_template.insert(), [t])
templ.append(t)
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=uid, username='test_user', password='password',
tenant='test_project', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='') for uid in range(900, 903)]
engine.execute(user_creds.insert(), user)
stack = utils.get_table(engine, 'stack')
stack_ids = [('967aaefa-152e-405d-b13a-35d4c816390c', 0),
('9e9debab-a303-4f29-84d3-c8165647c47e', 1),
('9a4bd1e9-8b21-46cd-964a-f66cb1cfa2f9', 2)]
data = [dict(id=ll_id, name=ll_id,
raw_template_id=templ[templ_id]['id'],
user_creds_id=user[templ_id]['id'],
username='test_user',
disable_rollback=True,
parameters='test_params',
created_at=datetime.datetime.utcnow(),
deleted_at=None)
for ll_id, templ_id in stack_ids]
data[-1]['deleted_at'] = datetime.datetime.utcnow()
engine.execute(stack.insert(), data)
return data
def _check_056(self, engine, data):
self.assertColumnNotExists(engine, 'stack', 'parameters')
self.assertColumnExists(engine, 'raw_template', 'environment')
self.assertColumnExists(engine, 'raw_template', 'predecessor')
# Get the parameters in stack table
stack_parameters = {}
for stack in data:
templ_id = stack['raw_template_id']
stack_parameters[templ_id] = (stack['parameters'],
stack.get('deleted_at'))
# validate whether its moved to raw_template
raw_template_table = utils.get_table(engine, 'raw_template')
raw_templates = raw_template_table.select().execute()
for raw_template in raw_templates:
if raw_template.id in stack_parameters:
stack_param, deleted_at = stack_parameters[raw_template.id]
tmpl_env = raw_template.environment
if engine.name == 'sqlite' and deleted_at is None:
stack_param = '"%s"' % stack_param
if deleted_at is None:
self.assertEqual(stack_param,
tmpl_env,
'parameters migration from stack to '
'raw_template failed')
else:
self.assertIsNone(tmpl_env,
'parameters migration did not skip '
'deleted stack')
def _pre_upgrade_057(self, engine):
# template
raw_template = utils.get_table(engine, 'raw_template')
templ = [dict(id=11, template='{}', files='{}')]
engine.execute(raw_template.insert(), templ)
# credentials
user_creds = utils.get_table(engine, 'user_creds')
user = [dict(id=11, username='steve', password='notthis',
tenant='mine', auth_url='bla',
tenant_id=str(uuid.uuid4()),
trust_id='',
trustor_user_id='')]
engine.execute(user_creds.insert(), user)
# stack
stack = utils.get_table(engine, 'stack')
stack_data = [dict(id='867aaefb-152e-505d-b13a-35d4c816390c',
name='s1',
raw_template_id=templ[0]['id'],
user_creds_id=user[0]['id'],
username='steve', disable_rollback=True)]
engine.execute(stack.insert(), stack_data)
# resource
resource = utils.get_table(engine, 'resource')
res_data = [dict(id='167aaefb-152e-505d-b13a-35d4c816390c',
name='res-4',
stack_id=stack_data[0]['id'],
user_creds_id=user[0]['id']),
dict(id='177aaefb-152e-505d-b13a-35d4c816390c',
name='res-5',
stack_id=stack_data[0]['id'],
user_creds_id=user[0]['id'])]
engine.execute(resource.insert(), res_data)
# resource_data
resource_data = utils.get_table(engine, 'resource_data')
rd_data = [dict(key='fruit',
value='blueberries',
reduct=False,
resource_id=res_data[0]['id']),
dict(key='fruit',
value='apples',
reduct=False,
resource_id=res_data[1]['id'])]
engine.execute(resource_data.insert(), rd_data)
return {'resource': res_data, 'resource_data': rd_data}
def _check_057(self, engine, data):
def uuid_in_res_data(res_uuid):
for rd in data['resource']:
if rd['id'] == res_uuid:
return True
return False
def rd_matches_old_data(key, value, res_uuid):
for rd in data['resource_data']:
if (rd['resource_id'] == res_uuid and rd['key'] == key
and rd['value'] == value):
return True
return False
self.assertColumnIsNotNullable(engine, 'resource', 'id')
res_table = utils.get_table(engine, 'resource')
res_in_db = list(res_table.select().execute())
# confirm the resource.id is an int and the uuid field has been
# copied from the old id.
for r in res_in_db:
self.assertIsInstance(r.id, six.integer_types)
self.assertTrue(uuid_in_res_data(r.uuid))
# confirm that the new resource_id points to the correct resource.
rd_table = utils.get_table(engine, 'resource_data')
rd_in_db = list(rd_table.select().execute())
for rd in rd_in_db:
for r in res_in_db:
if rd.resource_id == r.id:
self.assertTrue(rd_matches_old_data(rd.key, rd.value,
r.uuid))
def _check_058(self, engine, data):
self.assertColumnExists(engine, 'resource', 'engine_id')
self.assertColumnExists(engine, 'resource', 'atomic_key')
def _check_059(self, engine, data):
column_list = [('entity_id', False),
('traversal_id', False),
('is_update', False),
('atomic_key', False),
('stack_id', False),
('input_data', True),
('updated_at', True),
('created_at', True)]
for column in column_list:
self.assertColumnExists(engine, 'sync_point', column[0])
if not column[1]:
self.assertColumnIsNotNullable(engine, 'sync_point',
column[0])
else:
self.assertColumnIsNullable(engine, 'sync_point', column[0])
def _check_060(self, engine, data):
column_list = ['needed_by', 'requires', 'replaces', 'replaced_by',
'current_template_id']
for column in column_list:
self.assertColumnExists(engine, 'resource', column)
def _check_061(self, engine, data):
for tab_name in ['stack', 'resource', 'software_deployment']:
self.assertColumnType(engine, tab_name, 'status_reason',
sqlalchemy.Text)
def _check_062(self, engine, data):
self.assertColumnExists(engine, 'stack', 'parent_resource_name')
def _check_063(self, engine, data):
self.assertColumnExists(engine, 'resource',
'properties_data_encrypted')
def _check_064(self, engine, data):
self.assertColumnNotExists(engine, 'raw_template',
'predecessor')
class TestHeatMigrationsMySQL(HeatMigrationsCheckers,
test_base.MySQLOpportunisticTestCase):
pass
class TestHeatMigrationsPostgreSQL(HeatMigrationsCheckers,
test_base.PostgreSQLOpportunisticTestCase):
pass
class TestHeatMigrationsSQLite(HeatMigrationsCheckers,
test_base.DbTestCase):
pass
class ModelsMigrationSyncMixin(object):
def get_metadata(self):
return models.BASE.metadata
def get_engine(self):
return self.engine
def db_sync(self, engine):
migration.db_sync(engine=engine)
def include_object(self, object_, name, type_, reflected, compare_to):
if name in ['migrate_version'] and type_ == 'table':
return False
return True
class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.MySQLOpportunisticTestCase):
pass
class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.PostgreSQLOpportunisticTestCase):
pass
class ModelsMigrationsSyncSQLite(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.DbTestCase):
pass
| apache-2.0 |
gphat/dd-agent | tests/core/test_watchdog.py | 2 | 5692 | # stdlib
from contextlib import contextmanager
from random import random, randrange
import os
import subprocess
import sys
import time
import unittest
import urllib as url
# 3p
from mock import patch
from nose.plugins.attrib import attr
# project
# needed because of the subprocess calls
sys.path.append(os.getcwd())
from ddagent import Application
from util import Watchdog
class WatchdogKill(Exception):
"""
The watchdog attempted to kill the process.
"""
pass
@attr(requires='core_integration')
class TestWatchdog(unittest.TestCase):
"""
Test watchdog in various conditions
"""
JITTER_FACTOR = 2
@contextmanager
def set_time(self, time):
"""
Helper, a context manager to set the current time value.
"""
# Set the current time within `util` module
mock_time = patch("util.time.time")
mock_time.start().return_value = time
# Yield
yield
# Unset the time mock
mock_time.stop()
@patch.object(Watchdog, 'self_destruct', side_effect=WatchdogKill)
def test_watchdog_frenesy_detection(self, mock_restarted):
"""
Watchdog restarts the process on suspicious high activity.
"""
# Limit the restart timeframe for test purpose
Watchdog._RESTART_TIMEFRAME = 1
# Create a watchdog with a low activity tolerancy
process_watchdog = Watchdog(10, max_resets=3)
ping_watchdog = process_watchdog.reset
with self.set_time(1):
# Can be reset 3 times within the watchdog timeframe
for x in xrange(0, 3):
ping_watchdog()
# On the 4th attempt, the watchdog detects a suspicously high activity
self.assertRaises(WatchdogKill, ping_watchdog)
with self.set_time(3):
# Gets back to normal when the activity timeframe expires.
ping_watchdog()
def test_watchdog(self):
"""
Verify that watchdog kills ourselves even when spinning
Verify that watchdog kills ourselves when hanging
"""
start = time.time()
try:
subprocess.check_call(["python", __file__, "busy"], stderr=subprocess.STDOUT)
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Start pseudo web server
subprocess.Popen(["nc", "-l", "31834"])
start = time.time()
try:
subprocess.check_call(["python", __file__, "net"])
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Normal loop, should run 5 times
start = time.time()
try:
subprocess.check_call(["python", __file__, "normal"])
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
except subprocess.CalledProcessError:
self.fail("Watchdog killed normal process after %s seconds" % int(time.time() - start))
# Fast tornado, not killed
start = time.time()
p = subprocess.Popen(["python", __file__, "fast"])
p.wait()
duration = int(time.time() - start)
# should die as soon as flush_trs has been called
self.assertTrue(duration < self.JITTER_FACTOR * 10)
# Slow tornado, killed by the Watchdog
start = time.time()
p = subprocess.Popen(["python", __file__, "slow"])
p.wait()
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 4)
class MockTxManager(object):
def flush(self):
"Pretend to flush for a long time"
time.sleep(5)
sys.exit(0)
class MemoryHogTxManager(object):
def __init__(self, watchdog):
self._watchdog = watchdog
def flush(self):
rand_data = []
while True:
rand_data.append('%030x' % randrange(256**15))
self._watchdog.reset()
class PseudoAgent(object):
"""Same logic as the agent, simplified"""
def busy_run(self):
w = Watchdog(5)
w.reset()
while True:
random()
def hanging_net(self):
w = Watchdog(5)
w.reset()
x = url.urlopen("http://localhost:31834")
print "ERROR Net call returned", x
return True
def normal_run(self):
w = Watchdog(2)
w.reset()
for i in range(5):
time.sleep(1)
w.reset()
def slow_tornado(self):
a = Application(12345, {"bind_host": "localhost"})
a._watchdog = Watchdog(4)
a._tr_manager = MockTxManager()
a.run()
def fast_tornado(self):
a = Application(12345, {"bind_host": "localhost"})
a._watchdog = Watchdog(6)
a._tr_manager = MockTxManager()
a.run()
if __name__ == "__main__":
if sys.argv[1] == "busy":
a = PseudoAgent()
a.busy_run()
elif sys.argv[1] == "net":
a = PseudoAgent()
a.hanging_net()
elif sys.argv[1] == "normal":
a = PseudoAgent()
a.normal_run()
elif sys.argv[1] == "slow":
a = PseudoAgent()
a.slow_tornado()
elif sys.argv[1] == "fast":
a = PseudoAgent()
a.fast_tornado()
elif sys.argv[1] == "test":
t = TestWatchdog()
t.runTest()
elif sys.argv[1] == "memory":
a = PseudoAgent()
a.use_lots_of_memory()
| bsd-3-clause |
maestrano/odoo | addons/l10n_be_coda/wizard/__init__.py | 439 | 1098 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_coda_import
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
simonpanay/flatsearch | ad/urls.py | 1 | 1562 | from django.conf.urls import url
from .views import UserCriteriaCreateView
from .views import UserCriteriaDeleteView
from .views import UserCriteriaListView
from .views import UserCriteriaUpdateView
from .views import FlatAdDetailView
from .views import FlatAdListView
from .views import review
from .views import interesting
from .views import notinteresting
from .views import unreview
from .views import update_ads
from .views import AddressCreateView
from .views import AddressUpdateView
urlpatterns = [
url(r'^$', FlatAdListView.as_view(), name='ad-list'),
url(r'^update$', update_ads, name='ad-list-update'),
url(r'^(?P<pk>\d+)/$', FlatAdDetailView.as_view(), name='ad-detail'),
url(r'^(?P<pk>\d+)/address$', AddressCreateView.as_view(), name='ad-create-address'),
url(r'^updateaddress/(?P<pk>\d+)$', AddressUpdateView.as_view(), name='ad-update-address'),
url(r'^(?P<pk>\d+)/review$', review, name='ad-review'),
url(r'^(?P<pk>\d+)/unreview$', unreview, name='ad-unreview'),
url(r'^(?P<pk>\d+)/interesting$', interesting, name='ad-interesting'),
url(r'^(?P<pk>\d+)/notinteresting$', notinteresting, name='ad-notinteresting'),
url(r'^criterias$', UserCriteriaListView.as_view(), name='user-criteria-list'),
url(r'^criterias/add$', UserCriteriaCreateView.as_view(), name='user-criteria-create'),
url(r'^criterias/(?P<pk>\d+)/update$', UserCriteriaUpdateView.as_view(), name='user-criteria-update'),
url(r'^criterias/(?P<pk>\d+)/delete$', UserCriteriaDeleteView.as_view(), name='user-criteria-delete'),
]
| agpl-3.0 |
liorvh/raspberry_pwn | src/pentest/metagoofil/hachoir_parser/video/flv.py | 95 | 4763 | """
FLV video parser.
Documentation:
- FLV File format: http://osflash.org/flv
- libavformat from ffmpeg project
- flashticle: Python project to read Flash (SWF and FLV with AMF metadata)
http://undefined.org/python/#flashticle
Author: Victor Stinner
Creation date: 4 november 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
UInt8, UInt24, UInt32, NullBits, NullBytes,
Bit, Bits, String, RawBytes, Enum)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_parser.audio.mpeg_audio import Frame
from hachoir_parser.video.amf import AMFObject
from hachoir_core.tools import createDict
SAMPLING_RATE = {
0: ( 5512, "5.5 kHz"),
1: (11025, "11 kHz"),
2: (22050, "22.1 kHz"),
3: (44100, "44.1 kHz"),
}
SAMPLING_RATE_VALUE = createDict(SAMPLING_RATE, 0)
SAMPLING_RATE_TEXT = createDict(SAMPLING_RATE, 1)
AUDIO_CODEC_MP3 = 2
AUDIO_CODEC_NAME = {
0: u"Uncompressed",
1: u"ADPCM",
2: u"MP3",
5: u"Nellymoser 8kHz mono",
6: u"Nellymoser",
}
VIDEO_CODEC_NAME = {
2: u"Sorensen H.263",
3: u"Screen video",
4: u"On2 VP6",
}
FRAME_TYPE = {
1: u"keyframe",
2: u"inter frame",
3: u"disposable inter frame",
}
class Header(FieldSet):
def createFields(self):
yield String(self, "signature", 3, "FLV format signature", charset="ASCII")
yield UInt8(self, "version")
yield NullBits(self, "reserved[]", 5)
yield Bit(self, "type_flags_audio")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "type_flags_video")
yield UInt32(self, "data_offset")
def parseAudio(parent, size):
yield Enum(Bits(parent, "codec", 4, "Audio codec"), AUDIO_CODEC_NAME)
yield Enum(Bits(parent, "sampling_rate", 2, "Sampling rate"), SAMPLING_RATE_TEXT)
yield Bit(parent, "is_16bit", "16-bit or 8-bit per sample")
yield Bit(parent, "is_stereo", "Stereo or mono channel")
size -= 1
if 0 < size:
if parent["codec"].value == AUDIO_CODEC_MP3 :
yield Frame(parent, "music_data", size=size*8)
else:
yield RawBytes(parent, "music_data", size)
def parseVideo(parent, size):
yield Enum(Bits(parent, "frame_type", 4, "Frame type"), FRAME_TYPE)
yield Enum(Bits(parent, "codec", 4, "Video codec"), VIDEO_CODEC_NAME)
if 1 < size:
yield RawBytes(parent, "data", size-1)
def parseAMF(parent, size):
while parent.current_size < parent.size:
yield AMFObject(parent, "entry[]")
class Chunk(FieldSet):
tag_info = {
8: ("audio[]", parseAudio, ""),
9: ("video[]", parseVideo, ""),
18: ("metadata", parseAMF, ""),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (11 + self["size"].value) * 8
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parser, self._description = self.tag_info[tag]
else:
self.parser = None
def createFields(self):
yield UInt8(self, "tag")
yield UInt24(self, "size", "Content size")
yield UInt24(self, "timestamp", "Timestamp in millisecond")
yield NullBytes(self, "reserved", 4)
size = self["size"].value
if size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "content", size)
def getSampleRate(self):
try:
return SAMPLING_RATE_VALUE[self["sampling_rate"].value]
except LookupError:
return None
class FlvFile(Parser):
PARSER_TAGS = {
"id": "flv",
"category": "video",
"file_ext": ("flv",),
"mime": (u"video/x-flv",),
"min_size": 9*4,
"magic": (
# Signature, version=1, flags=5 (video+audio), header size=9
("FLV\1\x05\0\0\0\x09", 0),
# Signature, version=1, flags=5 (video), header size=9
("FLV\1\x01\0\0\0\x09", 0),
),
"description": u"Macromedia Flash video"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 3) != "FLV":
return "Wrong file signature"
if self["header/data_offset"].value != 9:
return "Unknown data offset in main header"
return True
def createFields(self):
yield Header(self, "header")
yield UInt32(self, "prev_size[]", "Size of previous chunk")
while not self.eof:
yield Chunk(self, "chunk[]")
yield UInt32(self, "prev_size[]", "Size of previous chunk")
def createDescription(self):
return u"Macromedia Flash video version %s" % self["header/version"].value
| gpl-3.0 |
jarshwah/django | django/http/cookie.py | 119 | 2895 | from __future__ import unicode_literals
import sys
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import http_cookies
# http://bugs.python.org/issue2193 is fixed in Python 3.3+.
_cookie_allows_colon_in_names = six.PY3
# Cookie pickling bug is fixed in Python 2.7.9 and Python 3.4.3+
# http://bugs.python.org/issue22775
cookie_pickles_properly = (
(sys.version_info[:2] == (2, 7) and sys.version_info >= (2, 7, 9)) or
sys.version_info >= (3, 4, 3)
)
if _cookie_allows_colon_in_names and cookie_pickles_properly:
SimpleCookie = http_cookies.SimpleCookie
else:
Morsel = http_cookies.Morsel
class SimpleCookie(http_cookies.SimpleCookie):
if not cookie_pickles_properly:
def __setitem__(self, key, value):
# Apply the fix from http://bugs.python.org/issue22775 where
# it's not fixed in Python itself
if isinstance(value, Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
super(SimpleCookie, self).__setitem__(key, value)
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
if isinstance(rawdata, six.text_type):
rawdata = force_str(rawdata)
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
key = force_str(key)
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except http_cookies.CookieError:
if not hasattr(self, 'bad_cookies'):
self.bad_cookies = set()
self.bad_cookies.add(key)
dict.__setitem__(self, key, http_cookies.Morsel())
def parse_cookie(cookie):
"""
Return a dictionary parsed from a `Cookie:` header string.
"""
cookiedict = {}
if six.PY2:
cookie = force_str(cookie)
for chunk in cookie.split(str(';')):
if str('=') in chunk:
key, val = chunk.split(str('='), 1)
else:
# Assume an empty name per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
key, val = str(''), chunk
key, val = key.strip(), val.strip()
if key or val:
# unquote using Python's algorithm.
cookiedict[key] = http_cookies._unquote(val)
return cookiedict
| bsd-3-clause |
shubhdev/edx-platform | lms/djangoapps/class_dashboard/tests/test_dashboard_data.py | 27 | 13246 | """
Tests for class dashboard (Metrics tab in instructor dashboard)
"""
import json
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.tests.factories import StudentModuleFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory, AdminFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from class_dashboard.dashboard_data import (
get_problem_grade_distribution, get_sequential_open_distrib,
get_problem_set_grade_distrib, get_d3_problem_grade_distrib,
get_d3_sequential_open_distrib, get_d3_section_grade_distrib,
get_section_display_name, get_array_section_has_problem,
get_students_opened_subsection, get_students_problem_grades,
)
from class_dashboard.views import has_instructor_access_for_class
USER_COUNT = 11
@attr('shard_1')
class TestGetProblemGradeDistribution(ModuleStoreTestCase):
"""
Tests related to class_dashboard/dashboard_data.py
"""
def setUp(self):
super(TestGetProblemGradeDistribution, self).setUp()
self.request_factory = RequestFactory()
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.attempts = 3
self.course = CourseFactory.create(
display_name=u"test course omega \u03a9",
)
section = ItemFactory.create(
parent_location=self.course.location,
category="chapter",
display_name=u"test factory section omega \u03a9",
)
self.sub_section = ItemFactory.create(
parent_location=section.location,
category="sequential",
display_name=u"test subsection omega \u03a9",
)
unit = ItemFactory.create(
parent_location=self.sub_section.location,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=u"test unit omega \u03a9",
)
self.users = [UserFactory.create(username="metric" + str(__)) for __ in xrange(USER_COUNT)]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
for i in xrange(USER_COUNT - 1):
category = "problem"
self.item = ItemFactory.create(
parent_location=unit.location,
category=category,
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'},
display_name=u"test problem omega \u03a9 " + str(i)
)
for j, user in enumerate(self.users):
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1 if i < j else 0.5,
student=user,
course_id=self.course.id,
module_state_key=self.item.location,
state=json.dumps({'attempts': self.attempts}),
)
for j, user in enumerate(self.users):
StudentModuleFactory.create(
course_id=self.course.id,
module_type='sequential',
module_state_key=self.item.location,
)
def test_get_problem_grade_distribution(self):
prob_grade_distrib, total_student_count = get_problem_grade_distribution(self.course.id)
for problem in prob_grade_distrib:
max_grade = prob_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
for val in total_student_count.values():
self.assertEquals(USER_COUNT, val)
def test_get_sequential_open_distibution(self):
sequential_open_distrib = get_sequential_open_distrib(self.course.id)
for problem in sequential_open_distrib:
num_students = sequential_open_distrib[problem]
self.assertEquals(USER_COUNT, num_students)
def test_get_problemset_grade_distrib(self):
prob_grade_distrib, __ = get_problem_grade_distribution(self.course.id)
probset_grade_distrib = get_problem_set_grade_distrib(self.course.id, prob_grade_distrib)
for problem in probset_grade_distrib:
max_grade = probset_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
grade_distrib = probset_grade_distrib[problem]['grade_distrib']
sum_attempts = 0
for item in grade_distrib:
sum_attempts += item[1]
self.assertEquals(USER_COUNT, sum_attempts)
def test_get_d3_problem_grade_distrib(self):
d3_data = get_d3_problem_grade_distrib(self.course.id)
for data in d3_data:
for stack_data in data['data']:
sum_values = 0
for problem in stack_data['stackData']:
sum_values += problem['value']
self.assertEquals(USER_COUNT, sum_values)
def test_get_d3_sequential_open_distrib(self):
d3_data = get_d3_sequential_open_distrib(self.course.id)
for data in d3_data:
for stack_data in data['data']:
for problem in stack_data['stackData']:
value = problem['value']
self.assertEquals(0, value)
def test_get_d3_section_grade_distrib(self):
d3_data = get_d3_section_grade_distrib(self.course.id, 0)
for stack_data in d3_data:
sum_values = 0
for problem in stack_data['stackData']:
sum_values += problem['value']
self.assertEquals(USER_COUNT, sum_values)
def test_get_students_problem_grades(self):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
response_content = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
self.assertEquals(USER_COUNT, len(response_content))
self.assertEquals(False, response_max_exceeded)
for item in response_content:
if item['grade'] == 0:
self.assertEquals(0, item['percent'])
else:
self.assertEquals(100, item['percent'])
def test_get_students_problem_grades_max(self):
with patch('class_dashboard.dashboard_data.MAX_SCREEN_LIST_LENGTH', 2):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
# Only 2 students in the list and response_max_exceeded is True
self.assertEquals(2, len(response_results))
self.assertEquals(True, response_max_exceeded)
def test_get_students_problem_grades_csv(self):
tooltip = 'P1.2.1 Q1 - 3382 Students (100%: 1/1 questions)'
attributes = '?module_id=' + self.item.location.to_deprecated_string() + '&tooltip=' + tooltip + '&csv=true'
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
# Check header and a row for each student in csv response
self.assertContains(response, '"Name","Username","Grade","Percent"')
self.assertContains(response, '"metric0","0.0","0.0"')
self.assertContains(response, '"metric1","0.0","0.0"')
self.assertContains(response, '"metric2","0.0","0.0"')
self.assertContains(response, '"metric3","0.0","0.0"')
self.assertContains(response, '"metric4","0.0","0.0"')
self.assertContains(response, '"metric5","0.0","0.0"')
self.assertContains(response, '"metric6","0.0","0.0"')
self.assertContains(response, '"metric7","0.0","0.0"')
self.assertContains(response, '"metric8","0.0","0.0"')
self.assertContains(response, '"metric9","0.0","0.0"')
self.assertContains(response, '"metric10","1.0","100.0"')
def test_get_students_opened_subsection(self):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
self.assertEquals(USER_COUNT, len(response_results))
self.assertEquals(False, response_max_exceeded)
def test_get_students_opened_subsection_max(self):
with patch('class_dashboard.dashboard_data.MAX_SCREEN_LIST_LENGTH', 2):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
# Only 2 students in the list and response_max_exceeded is True
self.assertEquals(2, len(response_results))
self.assertEquals(True, response_max_exceeded)
def test_get_students_opened_subsection_csv(self):
tooltip = '4162 students opened Subsection 5: Relational Algebra Exercises'
attributes = '?module_id=' + self.item.location.to_deprecated_string() + '&tooltip=' + tooltip + '&csv=true'
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
self.assertContains(response, '"Name","Username"')
# Check response contains 1 line for each user +1 for the header
self.assertEquals(USER_COUNT + 1, len(response.content.splitlines()))
def test_post_metrics_data_subsections_csv(self):
url = reverse('post_metrics_data_csv')
sections = json.dumps(["Introduction"])
tooltips = json.dumps([[{"subsection_name": "Pre-Course Survey", "subsection_num": 1, "type": "subsection", "num_students": 18963}]])
course_id = self.course.id
data_type = 'subsection'
data = json.dumps({'sections': sections,
'tooltips': tooltips,
'course_id': course_id.to_deprecated_string(),
'data_type': data_type,
})
response = self.client.post(url, {'data': data})
# Check response contains 1 line for header, 1 line for Section and 1 line for Subsection
self.assertEquals(3, len(response.content.splitlines()))
def test_post_metrics_data_problems_csv(self):
url = reverse('post_metrics_data_csv')
sections = json.dumps(["Introduction"])
tooltips = json.dumps([[[
{'student_count_percent': 0,
'problem_name': 'Q1',
'grade': 0,
'percent': 0,
'label': 'P1.2.1',
'max_grade': 1,
'count_grade': 26,
'type': u'problem'},
{'student_count_percent': 99,
'problem_name': 'Q1',
'grade': 1,
'percent': 100,
'label': 'P1.2.1',
'max_grade': 1,
'count_grade': 4763,
'type': 'problem'},
]]])
course_id = self.course.id
data_type = 'problem'
data = json.dumps({'sections': sections,
'tooltips': tooltips,
'course_id': course_id.to_deprecated_string(),
'data_type': data_type,
})
response = self.client.post(url, {'data': data})
# Check response contains 1 line for header, 1 line for Sections and 2 lines for problems
self.assertEquals(4, len(response.content.splitlines()))
def test_get_section_display_name(self):
section_display_name = get_section_display_name(self.course.id)
self.assertMultiLineEqual(section_display_name[0], u"test factory section omega \u03a9")
def test_get_array_section_has_problem(self):
b_section_has_problem = get_array_section_has_problem(self.course.id)
self.assertEquals(b_section_has_problem[0], True)
def test_has_instructor_access_for_class(self):
"""
Test for instructor access
"""
ret_val = has_instructor_access_for_class(self.instructor, self.course.id)
self.assertEquals(ret_val, True)
| agpl-3.0 |
FractalBobz/code-for-blog | 2009/pygame_creeps_game/example_code/pathfinder_visualize.py | 12 | 7094 | import sys
sys.path.append('..')
import time
import pygame
from pygame import Rect, Color
from pathfinder import PathFinder
from gridmap import GridMap
class Visualizer(object):
def __init__(self, screen, field, message_func):
self.screen = screen
self.field = field
self.message_func = message_func
self.grid_size = 15
self.field_color = Color('black')
self.grid_color = Color('gray')
self.start_pos_color = Color('red')
self.goal_pos_color = Color('green')
self.path_color = Color('violet')
self.blocked_color = Color('gray')
self._init_map()
def draw(self):
self._draw_grid(self.field)
self._draw_map(self.field,
self.blocked_list, self.start_pos,
self.goal_pos, self.path)
self.message_func(self.msg1, self.msg2)
def user_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F5:
self._recompute_path()
elif event.type == pygame.MOUSEBUTTONDOWN:
self.path_valid = False
self.msg1 = 'Please recompute path (F5)'
self.msg2 = ''
self._handle_mouse_click(event)
########################## PRIVATE ##########################
def _init_map(self):
self.start_pos = 0, 0
self.goal_pos = 3, 8
nrows = self.field.height / self.grid_size
ncols = self.field.width / self.grid_size
self.map = GridMap(nrows, ncols)
for b in [ (1, 1), (1, 2), (0, 3), (1, 3), (2, 3),
(2, 4), (2, 5), (2, 6)]:
self.map.set_blocked(b)
self._recompute_path()
def _handle_mouse_click(self, event):
if not self.field.collidepoint(event.pos):
return
ncol = (event.pos[0] - self.field.left) / self.grid_size
nrow = (event.pos[1] - self.field.top) / self.grid_size
coord = (nrow, ncol)
if event.button == 1:
self.map.set_blocked(coord, not self.map.blocked[coord])
elif event.button == 2:
self.start_pos = coord
elif event.button == 3:
self.goal_pos = coord
def _recompute_path(self):
self.blocked_list = self.map.blocked
pf = PathFinder(self.map.successors, self.map.move_cost,
self.map.move_cost)
t = time.clock()
self.path = list(pf.compute_path(self.start_pos, self.goal_pos))
dt = time.clock() - t
if self.path == []:
self.msg1 = "No path found"
else:
self.msg1 = "Found path (length %d)" % len(self.path)
self.msg2 = "Elapsed: %s seconds" % dt
self.path_valid = True
def _draw_grid(self, field):
""" Draw a grid on the given surface.
"""
self.screen.fill(self.field_color, field)
nrows = field.height / self.grid_size
ncols = field.width / self.grid_size
for y in range(nrows + 1):
pygame.draw.line(
self.screen,
self.grid_color,
(field.left, field.top + y * self.grid_size - 1),
(field.right - 1, field.top + y * self.grid_size - 1))
for x in range(ncols + 1):
pygame.draw.line(
self.screen,
self.grid_color,
(field.left + x * self.grid_size - 1, field.top),
(field.left + x * self.grid_size - 1, field.bottom - 1))
def _draw_map(self, field, blocked, start, goal, path):
def _fill_square((nrow, ncol), color):
left = field.left + ncol * self.grid_size
top = field.top + nrow * self.grid_size
width = self.grid_size - 1
self.screen.fill(color, Rect(left, top, width, width))
def _fill_spot((nrow, ncol), color):
pos_x = field.left + ncol * self.grid_size + self.grid_size / 2
pos_y = field.top + nrow * self.grid_size + self.grid_size / 2
radius = self.grid_size / 4
pygame.draw.circle(self.screen,
color, (pos_x, pos_y), radius)
for bl in blocked:
_fill_square(bl, self.blocked_color)
if self.path_valid:
for path_square in path:
_fill_spot(path_square, self.path_color)
_fill_spot(start, self.start_pos_color)
_fill_spot(goal, self.goal_pos_color)
def draw_messages(screen, rect, message1, message2):
draw_rimmed_box(screen, rect, (50, 20, 0), 4, Color('white'))
my_font = pygame.font.SysFont('arial', 18)
message1_sf = my_font.render(message1, True, Color('white'))
message2_sf = my_font.render(message2, True, Color('white'))
screen.blit(message1_sf, rect.move(10, 0))
screen.blit(message2_sf, rect.move(10, message1_sf.get_height()))
def draw_rimmed_box(screen, box_rect, box_color,
rim_width=0,
rim_color=Color('black')):
""" Draw a rimmed box on the given surface. The rim is drawn
outside the box rect.
"""
if rim_width:
rim_rect = Rect(box_rect.left - rim_width,
box_rect.top - rim_width,
box_rect.width + rim_width * 2,
box_rect.height + rim_width * 2)
pygame.draw.rect(screen, rim_color, rim_rect)
pygame.draw.rect(screen, box_color, box_rect)
def draw_title(screen, rect):
draw_rimmed_box(screen, rect, (40, 10, 60), 4, Color('gray'))
msgs = [
'Left click to toggle wall',
'Middle click to set start (red)',
'Right click to set goal (green)',
'F5 to recompute the path',
]
my_font = pygame.font.SysFont('arial', 16)
for i, msg in enumerate(msgs):
rendered = my_font.render(msg, True, Color('white'))
screen.blit(rendered, rect.move(10, i * rendered.get_height()))
def run_game():
SCREEN_WIDTH, SCREEN_HEIGHT = 350, 550
FIELD_RECT = Rect(25, 130, 300, 300)
MESSAGES_RECT = Rect(25, 450, 300, 50)
TITLE_RECT = Rect(25, 10, 300, 90)
pygame.init()
screen = pygame.display.set_mode(
(SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
clock = pygame.time.Clock()
def message_func(msg1, msg2):
draw_messages(screen, MESSAGES_RECT, msg1, msg2)
visualizer = Visualizer(screen, FIELD_RECT, message_func)
while True:
time_passed = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game()
else:
visualizer.user_event(event)
draw_title(screen, TITLE_RECT)
visualizer.draw()
pygame.display.flip()
def exit_game():
sys.exit()
run_game()
| unlicense |
lancezlin/pylearn2 | pylearn2/utils/tests/test_general.py | 45 | 1096 | """
Tests for pylearn2.utils.general functions.
"""
from pylearn2.utils import contains_nan, contains_inf, isfinite
import numpy as np
def test_contains_nan():
"""
Tests that pylearn2.utils.contains_nan correctly
identifies `np.nan` values in an array.
"""
arr = np.random.random(100)
assert not contains_nan(arr)
arr[0] = np.nan
assert contains_nan(arr)
def test_contains_inf():
"""
Tests that pylearn2.utils.contains_inf correctly
identifies `np.inf` values in an array.
"""
arr = np.random.random(100)
assert not contains_inf(arr)
arr[0] = np.nan
assert not contains_inf(arr)
arr[1] = np.inf
assert contains_inf(arr)
arr[1] = -np.inf
assert contains_inf(arr)
def test_isfinite():
"""
Tests that pylearn2.utils.isfinite correctly
identifies `np.nan` and `np.inf` values in an array.
"""
arr = np.random.random(100)
assert isfinite(arr)
arr[0] = np.nan
assert not isfinite(arr)
arr[0] = np.inf
assert not isfinite(arr)
arr[0] = -np.inf
assert not isfinite(arr)
| bsd-3-clause |
lthurlow/Boolean-Constrained-Routing | networkx-1.8.1/networkx/algorithms/tests/test_distance_regular.py | 87 | 1729 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestDistanceRegular:
def test_is_distance_regular(self):
assert_true(nx.is_distance_regular(nx.icosahedral_graph()))
assert_true(nx.is_distance_regular(nx.petersen_graph()))
assert_true(nx.is_distance_regular(nx.cubical_graph()))
assert_true(nx.is_distance_regular(nx.complete_bipartite_graph(3,3)))
assert_true(nx.is_distance_regular(nx.tetrahedral_graph()))
assert_true(nx.is_distance_regular(nx.dodecahedral_graph()))
assert_true(nx.is_distance_regular(nx.pappus_graph()))
assert_true(nx.is_distance_regular(nx.heawood_graph()))
assert_true(nx.is_distance_regular(nx.cycle_graph(3)))
# no distance regular
assert_false(nx.is_distance_regular(nx.path_graph(4)))
def test_not_connected(self):
G=nx.cycle_graph(4)
G.add_cycle([5,6,7])
assert_false(nx.is_distance_regular(G))
def test_global_parameters(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 0, 1), (1, 1, 0)])
b,c=nx.intersection_array(nx.cycle_graph(3))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 1, 0)])
def test_intersection_array(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
assert_equal(b,[2, 1])
assert_equal(c,[1, 1])
b,c=nx.intersection_array(nx.dodecahedral_graph())
assert_equal(b,[3, 2, 1, 1, 1])
assert_equal(c,[1, 1, 1, 2, 3])
b,c=nx.intersection_array(nx.icosahedral_graph())
assert_equal(b,[5, 2, 1])
assert_equal(c,[1, 2, 5])
| mit |
nvoron23/socialite | jython/Lib/trace.py | 90 | 29112 | #!/usr/bin/env python
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
import linecache
import os
import re
import sys
import threading
import token
import tokenize
import types
import gc
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
def usage(outfile):
outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to <module>.cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file=<file> File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir=<dir> Directory where the report files. The coverage
report for <package>.<module> is written to file
<dir>/<package>/<module>.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
Filters, may be repeated multiple times:
--ignore-module=<mod> Ignore the given module and its submodules
(if it is a package).
--ignore-dir=<dir> Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class Ignore:
def __init__(self, modules = None, dirs = None):
self._mods = modules or []
self._dirs = dirs or []
self._dirs = map(os.path.normpath, self._dirs)
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if self._ignore.has_key(modulename):
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list. Need to take some care since ignoring
# "cmp" musn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
for mod in self._mods:
if mod == modulename: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
n = len(mod)
# (will not overflow since if the first n characters are the
# same and the name has not already occurred, then the size
# of "name" is greater than that of "mod")
if mod == modulename[:n] and modulename[n] == '.':
self._ignore[modulename] = 1
return 1
# Now check that __file__ isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError), err:
print >> sys.stderr, ("Skipping counts file %r: %s"
% (self.infile, err))
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts.keys():
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
calledfuncs[key] = 1
for key in other_callers.keys():
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print
print "functions called:"
calls = self.calledfuncs.keys()
calls.sort()
for filename, modulename, funcname in calls:
print ("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname))
if self.callers:
print
print "calling relationships:"
calls = self.callers.keys()
calls.sort()
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls:
if pfile != lastfile:
print
print "***", pfile, "***"
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print " -->", cfile
lastcfile = cfile
print " %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts.keys():
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.iteritems():
# skip some "files" we don't care about...
if filename == "<string>":
continue
if filename.startswith("<doctest "):
continue
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count)
if summary and n_lines:
percent = int(100 * n_hits / n_lines)
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
mods = sums.keys()
mods.sort()
print "lines cov% module (path)"
for m in mods:
n_lines, percent, modulename, filename = sums[m]
print "%5d %3d%% %s (%s)" % sums[m]
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except IOError, err:
print >> sys.stderr, "Can't save counts files because %s" % err
def write_results_file(self, path, lines, lnotab, lines_hit):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w")
except IOError, err:
print >> sys.stderr, ("trace: Could not open %r for writing: %s"
"- skipping" % (path, err))
return 0, 0
n_lines = 0
n_hits = 0
for i, line in enumerate(lines):
lineno = i + 1
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in lines[i]:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(lines[i].expandtabs(8))
outfile.close()
return n_hits, n_lines
def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
table_length = len(line_increments)
docstring = False
lineno = code.co_firstlineno
for li in line_increments:
lineno += li
if lineno not in strs:
linenos[lineno] = 1
return linenos
def find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if isinstance(c, types.CodeType):
# find another code object, so recurse into it
linenos.update(find_lines(c, strs))
return linenos
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, "rU").read()
except IOError, err:
print >> sys.stderr, ("Not printing coverage data for %r: %s"
% (filename, err))
return {}
code = compile(prog, filename, "exec")
strs = find_strings(filename)
return find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
"""
self.infile = infile
self.outfile = outfile
self.ignore = Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.blabbed = {} # for debugging
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
if not self.donothing:
sys.settrace(self.globaltrace)
threading.settrace(self.globaltrace)
try:
exec cmd in dict, dict
finally:
if not self.donothing:
sys.settrace(None)
threading.settrace(None)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
sys.settrace(self.globaltrace)
threading.settrace(self.globaltrace)
try:
exec cmd in globals, locals
finally:
if not self.donothing:
sys.settrace(None)
threading.settrace(None)
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if hasattr(f, "func_doc")]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = str(classes[0])
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
# final hack - module name shows up in str(cls), but we've already
# computed module name, so remove it
clsname = clsname.split(".")[1:]
clsname = ".".join(clsname)
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print (" --- modulename: %s, funcname: %s"
% (modulename, code.co_name))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lT",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls"])
except getopt.error, msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
for opt, val in opts:
if opt == "--help":
usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
ignore_modules.append(val)
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file)
try:
t.run('execfile(%r)' % (progname,))
except IOError, err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
if __name__=='__main__':
main()
| apache-2.0 |
acshan/odoo | openerp/addons/base/module/wizard/base_module_upgrade.py | 294 | 5164 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Upgrade Completed" version="7.0">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="oe_highlight"/> or
<button special="cancel" string="Close" class="oe_link"/>
</footer>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module_cancel(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.module.module')
to_installed_ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove'])])
if to_installed_ids:
mod_obj.write(cr, uid, to_installed_ids, {'state': 'installed'}, context=context)
to_uninstalled_ids = mod_obj.search(cr, uid, [
('state', '=', 'to install')])
if to_uninstalled_ids:
mod_obj.write(cr, uid, to_uninstalled_ids, {'state': 'uninstalled'}, context=context)
return {'type': 'ir.actions.act_window_close'}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise osv.except_osv(_('Unmet Dependency!'),
_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
openerp.api.Environment.reset()
openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
return {'type': 'ir.actions.act_window_close'}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
2ndQuadrant/ansible | test/units/modules/network/f5/test_bigip_device_auth_ldap.py | 37 | 4296 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_auth_ldap import ApiParameters
from library.modules.bigip_device_auth_ldap import ModuleParameters
from library.modules.bigip_device_auth_ldap import ModuleManager
from library.modules.bigip_device_auth_ldap import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_auth_ldap import ApiParameters
from ansible.modules.network.f5.bigip_device_auth_ldap import ModuleParameters
from ansible.modules.network.f5.bigip_device_auth_ldap import ModuleManager
from ansible.modules.network.f5.bigip_device_auth_ldap import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
servers=['10.10.10.10', '10.10.10.11'],
port=389,
remote_directory_tree='foo',
scope='base',
bind_dn='bar',
bind_password='secret',
user_template='alice',
check_member_attr=False,
ssl='no',
ssl_ca_cert='default.crt',
ssl_client_key='default.key',
ssl_client_cert='default1.crt',
ssl_check_peer=True,
login_ldap_attr='bob',
fallback_to_local=True,
update_password='on_create',
)
p = ApiParameters(params=args)
assert p.port == 389
assert p.servers == ['10.10.10.10', '10.10.10.11']
assert p.remote_directory_tree == 'foo'
assert p.scope == 'base'
assert p.bind_dn == 'bar'
assert p.bind_password == 'secret'
assert p.user_template == 'alice'
assert p.check_member_attr == 'no'
assert p.ssl == 'no'
assert p.ssl_ca_cert == '/Common/default.crt'
assert p.ssl_client_key == '/Common/default.key'
assert p.ssl_client_cert == '/Common/default1.crt'
assert p.ssl_check_peer == 'yes'
assert p.login_ldap_attr == 'bob'
assert p.fallback_to_local == 'yes'
assert p.update_password == 'on_create'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
servers=['10.10.10.10', '10.10.10.11'],
update_password='on_create',
state='present',
provider=dict(
password='admin',
server='localhost',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.update_auth_source_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
xHeliotrope/injustice_dropper | env/lib/python3.4/site-packages/twilio/rest/pricing.py | 11 | 1061 | from twilio.rest.base import TwilioClient
from twilio.rest.resources import UNSET_TIMEOUT
from twilio.rest.resources.pricing import (
PhoneNumbers,
Voice,
)
class TwilioPricingClient(TwilioClient):
"""
A client for accessing the Twilio Pricing API.
:param str account: Your Account SID from `your dashboard
<https://twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://twilio.com/user_account>`_
:param float timeout: The socket connect and read timeout for requests
to Twilio
"""
def __init__(self, account=None, token=None,
base="https://pricing.twilio.com", version="v1",
timeout=UNSET_TIMEOUT):
super(TwilioPricingClient, self).__init__(account, token, base,
version, timeout)
uri_base = "{}/{}".format(base, version)
self.voice = Voice(uri_base, self.auth, self.timeout)
self.phone_numbers = PhoneNumbers(uri_base, self.auth, self.timeout)
| mit |
jaberg/nengo | examples/question_control.py | 2 | 2464 | D=16 # number of dimensions for representations
N=100 # number of neurons per dimension
import nef.nef_theano as nef
import nps
import nef.convolution
import hrr
import math
import random
# semantic pointers do not work well with small numbers of dimensions.
# To keep this example model small enough to be easily run, we have lowered
# the number of dimensions (D) to 16 and chosen a random number seed for
# which this model works well.
seed=17
net=nef.Network('Question Answering with Control',seed=seed)
# Make a simple node to generate interesting input for the network
random.seed(seed)
vocab=hrr.Vocabulary(D,max_similarity=0.1)
class Input(nef.SimpleNode):
def __init__(self,name):
self.zero=[0]*D
nef.SimpleNode.__init__(self,name)
self.RED_CIRCLE=vocab.parse('STATEMENT+RED*CIRCLE').v
self.BLUE_SQUARE=vocab.parse('STATEMENT+BLUE*SQUARE').v
self.RED=vocab.parse('QUESTION+RED').v
self.SQUARE=vocab.parse('QUESTION+SQUARE').v
def origin_x(self):
if 0.1<self.t<0.3:
return self.RED_CIRCLE
elif 0.35<self.t<0.5:
return self.BLUE_SQUARE
elif self.t>0.5 and 0.2<(self.t-0.5)%0.6<0.4:
return self.RED
elif self.t>0.5 and 0.4<(self.t-0.5)%0.6<0.6:
return self.SQUARE
else:
return self.zero
# Add the input to the network
inv=Input('inv')
net.add(inv)
prods=nps.ProductionSet() #This is an older way of implementing an SPA
#(see SPA routing examples), using the nps
#code directly
prods.add(dict(visual='STATEMENT'),dict(visual_to_wm=True))
prods.add(dict(visual='QUESTION'),dict(wm_deconv_visual_to_motor=True))
subdim=4
model=nps.NPS(net,prods,D,direct_convolution=False,direct_buffer=['visual'],
neurons_buffer=N/subdim,subdimensions=subdim)
model.add_buffer_feedback(wm=1,pstc=0.4)
net.connect(inv.getOrigin('x'),'buffer_visual')
# Rename objects for display purposes
net.network.getNode('prod').name='thalamus'
net.network.getNode('buffer_visual').name='visual'
net.network.getNode('buffer_wm').name='memory'
net.network.getNode('buffer_motor').name='motor'
net.network.getNode('channel_visual_to_wm').name='channel'
net.network.getNode('wm_deconv_visual_to_motor').name='*'
net.network.getNode('gate_visual_wm').name='gate1'
net.network.getNode('gate_wm_visual_motor').name='gate2'
net.add_to_nengo()
| mit |
gnowledge/ncert_nroer | gstudio/urls/dashboard.py | 3 | 3522 | # Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Url for Gstudio User Dashboard"""
from django.conf.urls.defaults import url
from django.conf.urls.defaults import patterns
urlpatterns = patterns('gstudio.views.userdashboard',
url(r'^$', 'userdashboard',
name='gstudio_userdashboard'),
)
| agpl-3.0 |
timduru/platform-external-chromium_org | tools/telemetry/telemetry/page/page_measurement.py | 23 | 5804 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from telemetry.page import block_page_measurement_results
from telemetry.page import buildbot_page_measurement_results
from telemetry.page import csv_page_measurement_results
from telemetry.page import html_page_measurement_results
from telemetry.page import page_measurement_results
from telemetry.page import page_test
class MeasurementFailure(page_test.Failure):
"""Exception that can be thrown from MeasurePage to indicate an undesired but
designed-for problem."""
pass
class PageMeasurement(page_test.PageTest):
"""Glue code for running a measurement across a set of pages.
To use this, subclass from the measurement and override MeasurePage. For
example:
class BodyChildElementMeasurement(PageMeasurement):
def MeasurePage(self, page, tab, results):
body_child_count = tab.EvaluateJavaScript(
'document.body.children.length')
results.Add('body_children', 'count', body_child_count)
if __name__ == '__main__':
page_measurement.Main(BodyChildElementMeasurement())
To add test-specific options:
class BodyChildElementMeasurement(PageMeasurement):
def AddCommandLineOptions(parser):
parser.add_option('--element', action='store', default='body')
def MeasurePage(self, page, tab, results):
body_child_count = tab.EvaluateJavaScript(
'document.querySelector('%s').children.length')
results.Add('children', 'count', child_count)
"""
def __init__(self,
action_name_to_run='',
needs_browser_restart_after_each_run=False,
discard_first_result=False,
clear_cache_before_each_run=False):
super(PageMeasurement, self).__init__(
'_RunTest',
action_name_to_run,
needs_browser_restart_after_each_run,
discard_first_result,
clear_cache_before_each_run)
def _RunTest(self, page, tab, results):
results.WillMeasurePage(page)
self.MeasurePage(page, tab, results)
results.DidMeasurePage()
def AddOutputOptions(self, parser):
super(PageMeasurement, self).AddOutputOptions(parser)
parser.add_option('-o', '--output',
dest='output_file',
help='Redirects output to a file. Defaults to stdout.')
parser.add_option('--output-trace-tag',
default='',
help='Append a tag to the key of each result trace.')
parser.add_option('--reset-html-results', action='store_true',
help='Delete all stored runs in HTML output')
@property
def output_format_choices(self):
return ['html', 'buildbot', 'block', 'csv', 'none']
def PrepareResults(self, options):
if hasattr(options, 'output_file') and options.output_file:
output_file = os.path.expanduser(options.output_file)
open(output_file, 'a').close() # Create file if it doesn't exist.
output_stream = open(output_file, 'r+')
else:
output_stream = sys.stdout
if not hasattr(options, 'output_format'):
options.output_format = self.output_format_choices[0]
if not hasattr(options, 'output_trace_tag'):
options.output_trace_tag = ''
if options.output_format == 'csv':
return csv_page_measurement_results.CsvPageMeasurementResults(
output_stream,
self.results_are_the_same_on_every_page)
elif options.output_format == 'block':
return block_page_measurement_results.BlockPageMeasurementResults(
output_stream)
elif options.output_format == 'buildbot':
return buildbot_page_measurement_results.BuildbotPageMeasurementResults(
trace_tag=options.output_trace_tag)
elif options.output_format == 'html':
return html_page_measurement_results.HtmlPageMeasurementResults(
output_stream, self.__class__.__name__, options.reset_html_results,
options.browser_type, trace_tag=options.output_trace_tag)
elif options.output_format == 'none':
return page_measurement_results.PageMeasurementResults(
trace_tag=options.output_trace_tag)
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (options.output_format,
', '.join(self.output_format_choices)))
@property
def results_are_the_same_on_every_page(self):
"""By default, measurements are assumed to output the same values for every
page. This allows incremental output, for example in CSV. If, however, the
measurement discovers what values it can report as it goes, and those values
may vary from page to page, you need to override this function and return
False. Output will not appear in this mode until the entire pageset has
run."""
return True
def MeasurePage(self, page, tab, results):
"""Override to actually measure the page's performance.
page is a page_set.Page
tab is an instance of telemetry.core.Tab
Should call results.Add(name, units, value) for each result, or raise an
exception on failure. The name and units of each Add() call must be
the same across all iterations. The name 'url' must not be used.
Prefer field names that are in accordance with python variable style. E.g.
field_name.
Put together:
def MeasurePage(self, page, tab, results):
res = tab.EvaluateJavaScript('2+2')
if res != 4:
raise Exception('Oh, wow.')
results.Add('two_plus_two', 'count', res)
"""
raise NotImplementedError()
| bsd-3-clause |
sephii/django | tests/inspectdb/tests.py | 36 | 12973 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from unittest import skipUnless
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.six import PY3, StringIO
from .models import ColumnTypes
class InspectDBTestCase(TestCase):
def test_stealth_table_name_filter_option(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
error_message = "inspectdb has examined a table that should have been filtered out."
# contrib.contenttypes is one of the apps always installed when running
# the Django test suite, check that one of its tables hasn't been
# inspected
self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message)
def make_field_type_asserter(self):
"""Call inspectdb and return a function to validate a field type in its output"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
def assertFieldType(name, definition):
out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0]
self.assertEqual(definition, out_def)
return assertFieldType
def test_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
# Inspecting Oracle DB doesn't produce correct results (#19884):
# - it gets max_length wrong: it returns a number of bytes.
# - it reports fields as blank=True when they aren't.
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('char_field', "models.CharField(max_length=10)")
assertFieldType('null_char_field', "models.CharField(max_length=10, blank=True, null=True)")
assertFieldType('comma_separated_int_field', "models.CharField(max_length=99)")
assertFieldType('date_field', "models.DateField()")
assertFieldType('date_time_field', "models.DateTimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('email_field', "models.CharField(max_length=254)")
assertFieldType('file_field', "models.CharField(max_length=100)")
assertFieldType('file_path_field', "models.CharField(max_length=100)")
if connection.features.can_introspect_ip_address_field:
assertFieldType('ip_address_field', "models.GenericIPAddressField()")
assertFieldType('gen_ip_adress_field', "models.GenericIPAddressField()")
elif (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('ip_address_field', "models.CharField(max_length=15)")
assertFieldType('gen_ip_adress_field', "models.CharField(max_length=39)")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('slug_field', "models.CharField(max_length=50)")
if not connection.features.interprets_empty_strings_as_nulls:
assertFieldType('text_field', "models.TextField()")
if connection.features.can_introspect_time_field:
assertFieldType('time_field', "models.TimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('url_field', "models.CharField(max_length=200)")
def test_number_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
if not connection.features.can_introspect_autofield:
assertFieldType('id', "models.IntegerField(primary_key=True) # AutoField?")
if connection.features.can_introspect_big_integer_field:
assertFieldType('big_int_field', "models.BigIntegerField()")
else:
assertFieldType('big_int_field', "models.IntegerField()")
bool_field = ColumnTypes._meta.get_field('bool_field')
bool_field_type = connection.features.introspected_boolean_field_type(bool_field)
assertFieldType('bool_field', "models.{}()".format(bool_field_type))
null_bool_field = ColumnTypes._meta.get_field('null_bool_field')
null_bool_field_type = connection.features.introspected_boolean_field_type(null_bool_field)
if 'BooleanField' in null_bool_field_type:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
else:
if connection.features.can_introspect_null:
assertFieldType('null_bool_field', "models.{}(blank=True, null=True)".format(null_bool_field_type))
else:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
if connection.features.can_introspect_decimal_field:
assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)")
else: # Guessed arguments on SQLite, see #5014
assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) "
"# max_digits and decimal_places have been guessed, "
"as this database handles decimal fields as float")
assertFieldType('float_field', "models.FloatField()")
assertFieldType('int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
assertFieldType('pos_int_field', "models.PositiveIntegerField()")
else:
assertFieldType('pos_int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.PositiveIntegerField()")
else:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.IntegerField()")
if connection.features.can_introspect_small_integer_field:
assertFieldType('small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('small_int_field', "models.IntegerField()")
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_attribute_name_not_python_keyword(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated an attribute name which is a python keyword"
# Recursive foreign keys should be set to 'self'
self.assertIn("parent = models.ForeignKey('self')", output)
self.assertNotIn("from = models.ForeignKey(InspectdbPeople)", output, msg=error_message)
# As InspectdbPeople model is defined after InspectdbMessage, it should be quoted
self.assertIn("from_field = models.ForeignKey('InspectdbPeople', db_column='from_id')",
output)
self.assertIn("people_pk = models.ForeignKey(InspectdbPeople, primary_key=True)",
output)
self.assertIn("people_unique = models.ForeignKey(InspectdbPeople, unique=True)",
output)
def test_digits_column_name_introspection(self):
"""Introspection of column names consist/start with digits (#16536/#17676)"""
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated a model field name which is a number"
self.assertNotIn(" 123 = models.CharField", output, msg=error_message)
self.assertIn("number_123 = models.CharField", output)
error_message = "inspectdb generated a model field name which starts with a digit"
self.assertNotIn(" 4extra = models.CharField", output, msg=error_message)
self.assertIn("number_4extra = models.CharField", output)
self.assertNotIn(" 45extra = models.CharField", output, msg=error_message)
self.assertIn("number_45extra = models.CharField", output)
def test_special_column_name_introspection(self):
"""
Introspection of column names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
base_name = 'Field' if not connection.features.uppercases_column_names else 'field'
self.assertIn("field = models.IntegerField()", output)
self.assertIn("field_field = models.IntegerField(db_column='%s_')" % base_name, output)
self.assertIn("field_field_0 = models.IntegerField(db_column='%s__')" % base_name, output)
self.assertIn("field_field_1 = models.IntegerField(db_column='__field')", output)
self.assertIn("prc_x = models.IntegerField(db_column='prc(%) x')", output)
if PY3:
# Python 3 allows non-ASCII identifiers
self.assertIn("tamaño = models.IntegerField()", output)
else:
self.assertIn("tama_o = models.IntegerField(db_column='tama\\xf1o')", output)
def test_table_name_introspection(self):
"""
Introspection of table names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
self.assertIn("class InspectdbSpecialTableName(models.Model):", output)
def test_managed_models(self):
"""Test that by default the command generates models with `Meta.managed = False` (#14305)"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.longMessage = False
self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.')
def test_unique_together_meta(self):
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_uniquetogether'),
stdout=out)
output = out.getvalue()
self.assertIn(" unique_together = (('field1', 'field2'),)", output, msg='inspectdb should generate unique_together.')
@skipUnless(connection.vendor == 'sqlite',
"Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test")
def test_custom_fields(self):
"""
Introspection of columns with a custom field (#21090)
"""
out = StringIO()
orig_data_types_reverse = connection.introspection.data_types_reverse
try:
connection.introspection.data_types_reverse = {
'text': 'myfields.TextField',
'bigint': 'BigIntegerField',
}
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.assertIn("text_field = myfields.TextField()", output)
self.assertIn("big_int_field = models.BigIntegerField()", output)
finally:
connection.introspection.data_types_reverse = orig_data_types_reverse
| bsd-3-clause |
dunkhong/grr | grr/core/grr_response_core/lib/time_utils_test.py | 2 | 1228 | #!/usr/bin/env python
"""Tests for time-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import time_utils
from grr.test_lib import test_lib
class TimeUtilsTest(absltest.TestCase):
def testInvalidTimeRange(self):
with self.assertRaisesWithLiteralMatch(ValueError,
"Invalid time-range: 2000 > 1000."):
time_utils.TimeRange(
rdfvalue.RDFDatetime(2000), rdfvalue.RDFDatetime(1000))
def testIncludesTimeRange(self):
time_range = time_utils.TimeRange(
rdfvalue.RDFDatetime(1000), rdfvalue.RDFDatetime(2000))
self.assertFalse(time_range.Includes(rdfvalue.RDFDatetime(500)))
self.assertTrue(time_range.Includes(rdfvalue.RDFDatetime(1000)))
self.assertTrue(time_range.Includes(rdfvalue.RDFDatetime(1500)))
self.assertTrue(time_range.Includes(rdfvalue.RDFDatetime(2000)))
self.assertFalse(time_range.Includes(rdfvalue.RDFDatetime(2500)))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
40223142/2015cad0623 | static/Brython3.1.3-20150514-095342/Lib/abc.py | 765 | 8057 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| gpl-3.0 |
usersource/tasks | tasks_phonegap/Tasks/plugins/io.usersource.anno/tools/copytool3/oauth2client/crypt.py | 221 | 10233 | #!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import logging
import time
from anyjson import simplejson
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
logger = logging.getLogger(__name__)
class AppIdentityError(Exception):
pass
try:
from OpenSSL import crypto
class OpenSSLVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was signed by the private key associated with the public
key that this object was constructed with.
"""
try:
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error if the key_pem can't be parsed.
"""
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return OpenSSLVerifier(pubkey)
class OpenSSLSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PKCS12 or PEM format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
if key.startswith('-----BEGIN '):
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
else:
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return OpenSSLSigner(pkey)
except ImportError:
OpenSSLVerifier = None
OpenSSLSigner = None
try:
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Signature import PKCS1_v1_5
class PyCryptoVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey (or equiv), The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was signed by the private key associated with the public
key that this object was constructed with.
"""
try:
return PKCS1_v1_5.new(self._pubkey).verify(
SHA256.new(message), signature)
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
NotImplementedError if is_x509_cert is true.
"""
if is_x509_cert:
raise NotImplementedError(
'X509 certs are not supported by the PyCrypto library. '
'Try using PyOpenSSL if native code is an option.')
else:
pubkey = RSA.importKey(key_pem)
return PyCryptoVerifier(pubkey)
class PyCryptoSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM files.
Returns:
Signer instance.
Raises:
NotImplementedError if they key isn't in PEM format.
"""
if key.startswith('-----BEGIN '):
pkey = RSA.importKey(key)
else:
raise NotImplementedError(
'PKCS12 format is not supported by the PyCrpto library. '
'Try converting to a "PEM" '
'(openssl pkcs12 -in xxxxx.p12 -nodes -nocerts > privatekey.pem) '
'or using PyOpenSSL if native code is an option.')
return PyCryptoSigner(pkey)
except ImportError:
PyCryptoVerifier = None
PyCryptoSigner = None
if OpenSSLSigner:
Signer = OpenSSLSigner
Verifier = OpenSSLVerifier
elif PyCryptoSigner:
Signer = PyCryptoSigner
Verifier = PyCryptoVerifier
else:
raise ImportError('No encryption library found. Please install either '
'PyOpenSSL, or PyCrypto 2.6 or later')
def _urlsafe_b64encode(raw_bytes):
return base64.urlsafe_b64encode(raw_bytes).rstrip('=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _json_encode(data):
return simplejson.dumps(data, separators = (',', ':'))
def make_signed_jwt(signer, payload):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
segments = [
_urlsafe_b64encode(_json_encode(header)),
_urlsafe_b64encode(_json_encode(payload)),
]
signing_input = '.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_urlsafe_b64encode(signature))
logger.debug(str(segments))
return '.'.join(segments)
def verify_signed_jwt_with_certs(jwt, certs, audience):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if (len(segments) != 3):
raise AppIdentityError(
'Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = simplejson.loads(json_body)
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for (keyname, pem) in certs.items():
verifier = Verifier.from_string(pem, True)
if (verifier.verify(signed, signature)):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = long(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError(
'exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return parsed
| mpl-2.0 |
dfdx2/django | tests/template_tests/syntax_tests/i18n/test_get_language_info.py | 55 | 1551 | from django.test import SimpleTestCase
from django.utils import translation
from ...utils import setup
class I18nGetLanguageInfoTagTests(SimpleTestCase):
libraries = {
'custom': 'template_tests.templatetags.custom',
'i18n': 'django.templatetags.i18n',
}
# retrieving language information
@setup({'i18n28_2': '{% load i18n %}'
'{% get_language_info for "de" as l %}'
'{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}'})
def test_i18n28_2(self):
output = self.engine.render_to_string('i18n28_2')
self.assertEqual(output, 'de: German/Deutsch bidi=False')
@setup({'i18n29': '{% load i18n %}'
'{% get_language_info for LANGUAGE_CODE as l %}'
'{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}'})
def test_i18n29(self):
output = self.engine.render_to_string('i18n29', {'LANGUAGE_CODE': 'fi'})
self.assertEqual(output, 'fi: Finnish/suomi bidi=False')
# Test whitespace in filter arguments
@setup({'i18n38': '{% load i18n custom %}'
'{% get_language_info for "de"|noop:"x y" as l %}'
'{{ l.code }}: {{ l.name }}/{{ l.name_local }}/'
'{{ l.name_translated }} bidi={{ l.bidi }}'})
def test_i18n38(self):
with translation.override('cs'):
output = self.engine.render_to_string('i18n38')
self.assertEqual(output, 'de: German/Deutsch/německy bidi=False')
| bsd-3-clause |
ondoheer/flask-admin | flask_admin/model/ajax.py | 53 | 1076 | DEFAULT_PAGE_SIZE = 10
class AjaxModelLoader(object):
"""
Ajax related model loader. Override this to implement custom loading behavior.
"""
def __init__(self, name, options):
"""
Constructor.
:param name:
Field name
"""
self.name = name
self.options = options
def format(self, model):
"""
Return (id, name) tuple from the model.
"""
raise NotImplementedError()
def get_one(self, pk):
"""
Find model by its primary key.
:param pk:
Primary key value
"""
raise NotImplementedError()
def get_list(self, query, offset=0, limit=DEFAULT_PAGE_SIZE):
"""
Return models that match `query`.
:param view:
Administrative view.
:param query:
Query string
:param offset:
Offset
:param limit:
Limit
"""
raise NotImplementedError()
| bsd-3-clause |
NickelMedia/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/user.py | 128 | 6864 | # Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getpass
import logging
import os
import platform
import re
import shlex
import subprocess
import sys
import webbrowser
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.platforminfo import PlatformInfo
_log = logging.getLogger(__name__)
try:
import readline
except ImportError:
if sys.platform != "win32":
# There is no readline module for win32, not much to do except cry.
_log.warn("Unable to import readline.")
class User(object):
DEFAULT_NO = 'n'
DEFAULT_YES = 'y'
def __init__(self, platforminfo=None):
# We cannot get the PlatformInfo object from a SystemHost because
# User is part of SystemHost itself.
self._platforminfo = platforminfo or PlatformInfo(sys, platform, Executive())
# FIXME: These are @classmethods because bugzilla.py doesn't have a Tool object (thus no User instance).
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
response = None
while (repeat and not response):
repeat -= 1
response = raw_input(message)
return response
@classmethod
def prompt_password(cls, message, repeat=1):
return cls.prompt(message, repeat=repeat, raw_input=getpass.getpass)
@classmethod
def prompt_with_multiple_lists(cls, list_title, subtitles, lists, can_choose_multiple=False, raw_input=raw_input):
item_index = 0
cumulated_list = []
print list_title
for i in range(len(subtitles)):
print "\n" + subtitles[i]
for item in lists[i]:
item_index += 1
print "%2d. %s" % (item_index, item)
cumulated_list += lists[i]
return cls._wait_on_list_response(cumulated_list, can_choose_multiple, raw_input)
@classmethod
def _wait_on_list_response(cls, list_items, can_choose_multiple, raw_input):
while True:
if can_choose_multiple:
response = cls.prompt("Enter one or more numbers (comma-separated) or ranges (e.g. 3-7), or \"all\": ", raw_input=raw_input)
if not response.strip() or response == "all":
return list_items
try:
indices = []
for value in re.split("\s*,\s*", response):
parts = value.split('-')
if len(parts) == 2:
indices += range(int(parts[0]) - 1, int(parts[1]))
else:
indices.append(int(value) - 1)
except ValueError, err:
continue
return [list_items[i] for i in indices]
else:
try:
result = int(cls.prompt("Enter a number: ", raw_input=raw_input)) - 1
except ValueError, err:
continue
return list_items[result]
@classmethod
def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
print list_title
i = 0
for item in list_items:
i += 1
print "%2d. %s" % (i, item)
return cls._wait_on_list_response(list_items, can_choose_multiple, raw_input)
def edit(self, files):
editor = os.environ.get("EDITOR") or "vi"
args = shlex.split(editor)
# Note: Not thread safe: http://bugs.python.org/issue2320
subprocess.call(args + files)
def _warn_if_application_is_xcode(self, edit_application):
if "Xcode" in edit_application:
print "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\"."
def edit_changelog(self, files):
edit_application = os.environ.get("CHANGE_LOG_EDIT_APPLICATION")
if edit_application and self._platforminfo.is_mac():
# On Mac we support editing ChangeLogs using an application.
args = shlex.split(edit_application)
print "Using editor in the CHANGE_LOG_EDIT_APPLICATION environment variable."
print "Please quit the editor application when done editing."
self._warn_if_application_is_xcode(edit_application)
subprocess.call(["open", "-W", "-n", "-a"] + args + files)
return
self.edit(files)
def page(self, message):
pager = os.environ.get("PAGER") or "less"
try:
# Note: Not thread safe: http://bugs.python.org/issue2320
child_process = subprocess.Popen([pager], stdin=subprocess.PIPE)
child_process.communicate(input=message)
except IOError, e:
pass
def confirm(self, message=None, default=DEFAULT_YES, raw_input=raw_input):
if not message:
message = "Continue?"
choice = {'y': 'Y/n', 'n': 'y/N'}[default]
response = raw_input("%s [%s]: " % (message, choice))
if not response:
response = default
return response.lower() == 'y'
def can_open_url(self):
try:
webbrowser.get()
return True
except webbrowser.Error, e:
return False
def open_url(self, url):
if not self.can_open_url():
_log.warn("Failed to open %s" % url)
webbrowser.open(url)
| bsd-3-clause |
bike-md/bike_md | diag_app/urls.py | 1 | 1061 | from django.conf.urls import url
from . import views
import diag_app
from django.views.generic import TemplateView
urlpatterns = [
url((r'^$'), views.main_page, name='main'),
url((r'^problem_list/([A-Za-z0-9]+)/([0-9]+)'), views.problem_list, name='problem_list'),
url((r'^model_detail/([0-9]+)'), views.model_detail, name='model_detail'),
url((r'^problem_detail/([0-9]+)'), views.problem_detail, name='problem_detail'),
url((r'^profile/$'), views.profile, name='profile'),
url(r'^create_account/$', views.create_account, name='create_account'),
url((r'^about/$'), views.about_us, name='about'),
url((r'^notifications/$'), views.notifications, name='notifications'),
### developemnt urls ###
url((r'^problems/'),TemplateView.as_view(template_name="build_templates/problem_listing.html")),
url((r'^model_details/([0-9]+)'), TemplateView.as_view(template_name='build_templates/bike_detail.html')),
url((r'^problem_details/([0-9]+)'), TemplateView.as_view(template_name='build_templates/problem_detail.html')),
]
| gpl-3.0 |
thekingofkings/focusread | libs/click/termui.py | 19 | 23207 | import os
import sys
import struct
import inspect
import itertools
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type, Choice, Path
from .globals import resolve_color_default
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
'reset': 39,
'bright_black': 90,
'bright_red': 91,
'bright_green': 92,
'bright_yellow': 93,
'bright_blue': 94,
'bright_magenta': 95,
'bright_cyan': 96,
'bright_white': 97,
}
_ansi_reset_all = '\033[0m'
def hidden_prompt_func(prompt):
import getpass
return getpass.getpass(prompt)
def _build_prompt(text, suffix, show_default=False, default=None, show_choices=True, type=None):
prompt = text
if type is not None and show_choices and isinstance(type, Choice):
prompt += ' (' + ", ".join(map(str, type.choices)) + ')'
if default is not None and show_default:
prompt = '%s [%s]' % (prompt, default)
return prompt + suffix
def prompt(text, default=None, hide_input=False, confirmation_prompt=False,
type=None, value_proc=None, prompt_suffix=': ', show_default=True,
err=False, show_choices=True):
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending a interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 7.0
Added the show_choices parameter.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: asks for confirmation for the value.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param show_choices: Show or hide choices if the passed type is a Choice.
For example if type is a Choice of either day or week,
show_choices is true and text is "Group by" then the
prompt will be "Group by (day, week): ".
"""
result = None
def prompt_func(text):
f = hide_input and hidden_prompt_func or visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text, nl=False, err=err)
return f('')
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type)
while 1:
while 1:
value = prompt_func(prompt)
if value:
break
elif default is not None:
if isinstance(value_proc, Path):
# validate Path default value(exists, dir_okay etc.)
value = default
break
return default
try:
result = value_proc(value)
except UsageError as e:
echo('Error: %s' % e.message, err=err)
continue
if not confirmation_prompt:
return result
while 1:
value2 = prompt_func('Repeat for confirmation: ')
if value2:
break
if value == value2:
return result
echo('Error: the two entered values do not match', err=err)
def confirm(text, default=False, abort=False, prompt_suffix=': ',
show_default=True, err=False):
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the question to ask.
:param default: the default for the prompt.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
prompt = _build_prompt(text, prompt_suffix, show_default,
default and 'Y/n' or 'y/N')
while 1:
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(prompt, nl=False, err=err)
value = visible_prompt_func('').lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort()
if value in ('y', 'yes'):
rv = True
elif value in ('n', 'no'):
rv = False
elif value == '':
rv = default
else:
echo('Error: invalid input', err=err)
continue
break
if abort and not rv:
raise Abort()
return rv
def get_terminal_size():
"""Returns the current size of the terminal as tuple in the form
``(width, height)`` in columns and rows.
"""
# If shutil has get_terminal_size() (Python 3.3 and later) use that
if sys.version_info >= (3, 3):
import shutil
shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None)
if shutil_get_terminal_size:
sz = shutil_get_terminal_size()
return sz.columns, sz.lines
# We provide a sensible default for get_winterm_size() when being invoked
# inside a subprocess. Without this, it would not provide a useful input.
if get_winterm_size is not None:
size = get_winterm_size()
if size == (0, 0):
return (79, 24)
else:
return size
def ioctl_gwinsz(fd):
try:
import fcntl
import termios
cr = struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except Exception:
return
return cr
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
cr = ioctl_gwinsz(fd)
finally:
os.close(fd)
except Exception:
pass
if not cr or not cr[0] or not cr[1]:
cr = (os.environ.get('LINES', 25),
os.environ.get('COLUMNS', DEFAULT_COLUMNS))
return int(cr[1]), int(cr[0])
def echo_via_pager(text_or_generator, color=None):
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text_or_generator: the text to page, or alternatively, a
generator emitting the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if inspect.isgeneratorfunction(text_or_generator):
i = text_or_generator()
elif isinstance(text_or_generator, string_types):
i = [text_or_generator]
else:
i = iter(text_or_generator)
# convert every element of i to a text type if necessary
text_generator = (el if isinstance(el, string_types) else text_type(el)
for el in i)
from ._termui_impl import pager
return pager(itertools.chain(text_generator, "\n"), color)
def progressbar(iterable=None, length=None, label=None, show_eta=True,
show_percent=None, show_pos=False,
item_show_func=None, fill_char='#', empty_char='-',
bar_template='%(label)s [%(bar)s] %(info)s',
info_sep=' ', width=36, file=None, color=None):
"""This function creates an iterable context manager that can be used
to iterate over something while showing a progress bar. It will
either iterate over the `iterable` or `length` items (that are counted
up). While iteration happens, this function will print a rendered
progress bar to the given `file` (defaults to stdout) and will attempt
to calculate remaining time and more. By default, this progress bar
will not be rendered if the file is not a terminal.
The context manager creates the progress bar. When the context
manager is entered the progress bar is already displayed. With every
iteration over the progress bar, the iterable passed to the bar is
advanced and the bar is updated. When the context manager exits,
a newline is printed and the progress bar is finalized on screen.
No printing must happen or the progress bar will be unintentionally
destroyed.
Example usage::
with progressbar(items) as bar:
for item in bar:
do_something_with(item)
Alternatively, if no iterable is specified, one can manually update the
progress bar through the `update()` method instead of directly
iterating over the progress bar. The update method accepts the number
of steps to increment the bar with::
with progressbar(length=chunks.total_bytes) as bar:
for chunk in chunks:
process_chunk(chunk)
bar.update(chunks.bytes)
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `color` parameter. Added a `update` method to the
progressbar object.
:param iterable: an iterable to iterate over. If not provided the length
is required.
:param length: the number of items to iterate over. By default the
progressbar will attempt to ask the iterator about its
length, which might or might not work. If an iterable is
also provided this parameter can be used to override the
length. If an iterable is not provided the progress bar
will iterate over a range of that length.
:param label: the label to show next to the progress bar.
:param show_eta: enables or disables the estimated time display. This is
automatically disabled if the length cannot be
determined.
:param show_percent: enables or disables the percentage display. The
default is `True` if the iterable has a length or
`False` if not.
:param show_pos: enables or disables the absolute position display. The
default is `False`.
:param item_show_func: a function called with the current item which
can return a string to show the current item
next to the progress bar. Note that the current
item can be `None`!
:param fill_char: the character to use to show the filled part of the
progress bar.
:param empty_char: the character to use to show the non-filled part of
the progress bar.
:param bar_template: the format string to use as template for the bar.
The parameters in it are ``label`` for the label,
``bar`` for the progress bar and ``info`` for the
info section.
:param info_sep: the separator between multiple info items (eta etc.)
:param width: the width of the progress bar in characters, 0 means full
terminal width
:param file: the file to write to. If this is not a terminal then
only the label is printed.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are included anywhere in the progress bar output
which is not the case by default.
"""
from ._termui_impl import ProgressBar
color = resolve_color_default(color)
return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,
show_percent=show_percent, show_pos=show_pos,
item_show_func=item_show_func, fill_char=fill_char,
empty_char=empty_char, bar_template=bar_template,
info_sep=info_sep, file=file, label=label,
width=width, color=color)
def clear():
"""Clears the terminal screen. This will have the effect of clearing
the whole visible space of the terminal and moving the cursor to the
top left. This does not do anything if not connected to a terminal.
.. versionadded:: 2.0
"""
if not isatty(sys.stdout):
return
# If we're on Windows and we don't have colorama available, then we
# clear the screen by shelling out. Otherwise we can use an escape
# sequence.
if WIN:
os.system('cls')
else:
sys.stdout.write('\033[2J\033[1;1H')
def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
blink=None, reverse=None, reset=True):
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``bright_black``
* ``bright_red``
* ``bright_green``
* ``bright_yellow``
* ``bright_blue``
* ``bright_magenta``
* ``bright_cyan``
* ``bright_white``
* ``reset`` (reset the color code only)
.. versionadded:: 2.0
.. versionadded:: 7.0
Added support for bright colors.
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
"""
bits = []
if fg:
try:
bits.append('\033[%dm' % (_ansi_colors[fg]))
except KeyError:
raise TypeError('Unknown color %r' % fg)
if bg:
try:
bits.append('\033[%dm' % (_ansi_colors[bg] + 10))
except KeyError:
raise TypeError('Unknown color %r' % bg)
if bold is not None:
bits.append('\033[%dm' % (1 if bold else 22))
if dim is not None:
bits.append('\033[%dm' % (2 if dim else 22))
if underline is not None:
bits.append('\033[%dm' % (4 if underline else 24))
if blink is not None:
bits.append('\033[%dm' % (5 if blink else 25))
if reverse is not None:
bits.append('\033[%dm' % (7 if reverse else 27))
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return ''.join(bits)
def unstyle(text):
"""Removes ANSI styling information from a string. Usually it's not
necessary to use this function as Click's echo function will
automatically remove styling if necessary.
.. versionadded:: 2.0
:param text: the text to remove style information from.
"""
return strip_ansi(text)
def secho(message=None, file=None, nl=True, err=False, color=None, **styles):
"""This function combines :func:`echo` and :func:`style` into one
call. As such the following two calls are the same::
click.secho('Hello World!', fg='green')
click.echo(click.style('Hello World!', fg='green'))
All keyword arguments are forwarded to the underlying functions
depending on which one they go with.
.. versionadded:: 2.0
"""
if message is not None:
message = style(message, **styles)
return echo(message, file=file, nl=nl, err=err, color=color)
def edit(text=None, editor=None, env=None, require_save=True,
extension='.txt', filename=None):
r"""Edits the given text in the defined editor. If an editor is given
(should be the full path to the executable but the regular operating
system search path is used for finding the executable) it overrides
the detected editor. Optionally, some environment variables can be
used. If the editor is closed without changes, `None` is returned. In
case a file is edited directly the return value is always `None` and
`require_save` and `extension` are ignored.
If the editor cannot be opened a :exc:`UsageError` is raised.
Note for Windows: to simplify cross-platform usage, the newlines are
automatically converted from POSIX to Windows and vice versa. As such,
the message here will have ``\n`` as newline markers.
:param text: the text to edit.
:param editor: optionally the editor to use. Defaults to automatic
detection.
:param env: environment variables to forward to the editor.
:param require_save: if this is true, then not saving in the editor
will make the return value become `None`.
:param extension: the extension to tell the editor about. This defaults
to `.txt` but changing this might change syntax
highlighting.
:param filename: if provided it will edit this file instead of the
provided text contents. It will not use a temporary
file as an indirection in that case.
"""
from ._termui_impl import Editor
editor = Editor(editor=editor, env=env, require_save=require_save,
extension=extension)
if filename is None:
return editor.edit(text)
editor.edit_file(filename)
def launch(url, wait=False, locate=False):
"""This function launches the given URL (or filename) in the default
viewer application for this file type. If this is an executable, it
might launch the executable in a new session. The return value is
the exit code of the launched application. Usually, ``0`` indicates
success.
Examples::
click.launch('https://click.palletsprojects.com/')
click.launch('/my/downloaded/file', locate=True)
.. versionadded:: 2.0
:param url: URL or filename of the thing to launch.
:param wait: waits for the program to stop.
:param locate: if this is set to `True` then instead of launching the
application associated with the URL it will attempt to
launch a file manager with the file located. This
might have weird effects if the URL does not point to
the filesystem.
"""
from ._termui_impl import open_url
return open_url(url, wait=wait, locate=locate)
# If this is provided, getchar() calls into this instead. This is used
# for unittesting purposes.
_getchar = None
def getchar(echo=False):
"""Fetches a single character from the terminal and returns it. This
will always return a unicode character and under certain rare
circumstances this might return more than one character. The
situations which more than one character is returned is when for
whatever reason multiple characters end up in the terminal buffer or
standard input was not actually a terminal.
Note that this will always read from the terminal, even if something
is piped into the standard input.
Note for Windows: in rare cases when typing non-ASCII characters, this
function might wait for a second character and then return both at once.
This is because certain Unicode characters look like special-key markers.
.. versionadded:: 2.0
:param echo: if set to `True`, the character read will also show up on
the terminal. The default is to not show it.
"""
f = _getchar
if f is None:
from ._termui_impl import getchar as f
return f(echo)
def raw_terminal():
from ._termui_impl import raw_terminal as f
return f()
def pause(info='Press any key to continue ...', err=False):
"""This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: the info string to print before pausing.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo.
"""
if not isatty(sys.stdin) or not isatty(sys.stdout):
return
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err)
| mit |
jpadilla/django-extensions | django_extensions/management/commands/set_fake_passwords.py | 1 | 1785 | # coding=utf-8
"""
set_fake_passwords.py
Reset all user passwords to a common value. Useful for testing in a
development environment. As such, this command is only available when
setting.DEBUG is True.
"""
from django.conf import settings
from django.core.management.base import CommandError
from django_extensions.management.utils import signalcommand
from django_extensions.compat import CompatibilityBaseCommand as BaseCommand
DEFAULT_FAKE_PASSWORD = 'password'
class Command(BaseCommand):
help = 'DEBUG only: sets all user passwords to a common value ("%s" by default)' % (DEFAULT_FAKE_PASSWORD, )
requires_model_validation = False
def add_arguments(self, parser):
parser.add_argument(
'--prompt', dest='prompt_passwd', default=False,
action='store_true',
help='Prompts for the new password to apply to all users')
parser.add_argument(
'--password', dest='default_passwd', default=DEFAULT_FAKE_PASSWORD,
help='Use this as default password.')
@signalcommand
def handle(self, *args, **options):
if not settings.DEBUG:
raise CommandError('Only available in debug mode')
from django_extensions.compat import get_user_model
if options.get('prompt_passwd', False):
from getpass import getpass
passwd = getpass('Password: ')
if not passwd:
raise CommandError('You must enter a valid password')
else:
passwd = options.get('default_passwd', DEFAULT_FAKE_PASSWORD)
User = get_user_model()
user = User()
user.set_password(passwd)
count = User.objects.all().update(password=user.password)
print('Reset %d passwords' % count)
| mit |
SDSG-Invenio/invenio | invenio/legacy/weblinkback/api.py | 13 | 14163 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebLinkback - Handling Linkbacks"""
from invenio.config import CFG_SITE_URL, \
CFG_SITE_RECORD, \
CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_LANG
from invenio.legacy.weblinkback.config import CFG_WEBLINKBACK_TYPE, \
CFG_WEBLINKBACK_SUBSCRIPTION_DEFAULT_ARGUMENT_NAME, \
CFG_WEBLINKBACK_STATUS, \
CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME, \
CFG_WEBLINKBACK_LIST_TYPE, \
CFG_WEBLINKBACK_TRACKBACK_SUBSCRIPTION_ERROR_MESSAGE, \
CFG_WEBLINKBACK_PAGE_TITLE_STATUS, \
CFG_WEBLINKBACK_BROKEN_COUNT, \
CFG_WEBLINKBACK_LATEST_FACTOR, \
CFG_WEBLINKBACK_MAX_LINKBACKS_IN_EMAIL
from invenio.legacy.weblinkback.db_layer import create_linkback, \
get_url_list, \
get_all_linkbacks, \
get_approved_latest_added_linkbacks, \
approve_linkback, \
get_urls_and_titles, \
update_url_title, \
set_url_broken, \
increment_broken_count, \
remove_linkback
from invenio.legacy.search_engine import check_user_can_view_record, \
guess_primary_collection_of_a_record
from invenio.modules.access.engine import acc_authorize_action, \
acc_get_authorized_emails
from invenio.legacy.webuser import collect_user_info
from invenio.ext.email import send_email
from invenio.utils.url import get_title_of_page
def check_user_can_view_linkbacks(user_info, recid):
"""
Check if the user is authorized to view linkbacks for a given recid.
Returns the same type as acc_authorize_action
"""
# check user cannot view the record itself
(auth_code, auth_msg) = check_user_can_view_record(user_info, recid)
if auth_code:
return (auth_code, auth_msg)
# check if user can view the linkbacks
record_primary_collection = guess_primary_collection_of_a_record(recid)
return acc_authorize_action(user_info, 'viewlinkbacks', authorized_if_no_roles=True, collection=record_primary_collection)
def generate_redirect_url(recid, ln=CFG_SITE_LANG, action = None):
"""
Get redirect URL for an action
@param action: the action, must be defined in weblinkback_webinterface.py
@return "CFG_SITE_URL/CFG_SITE_RECORD/recid/linkbacks/action?ln=%s" if action != None,
otherwise CFG_SITE_URL/CFG_SITE_RECORD/recid/linkbacks?ln=%s
"""
result = "%s/%s/%s/linkbacks" % (CFG_SITE_URL, CFG_SITE_RECORD, recid)
if action != None:
return result + "/%s?ln=%s" % (action, ln)
else:
return result + "?ln=%s" % ln
def split_in_days(linkbacks):
"""
Split linkbacks in days
@param linkbacks: a list of this format: [(linkback_id,
origin_url,
recid,
additional_properties,
type,
status,
insert_time)]
in ascending or descending order by insert_time
@return a list of lists of linkbacks
"""
result = []
same_day_list = []
previous_date = None
current_date = None
for i in range(len(linkbacks)):
current_linkback = linkbacks[i]
previous_date = None
if i > 0:
previous_date = current_date
else:
previous_date = current_linkback[6]
current_date = current_linkback[6]
# same day --> same group
if (current_date.year == previous_date.year and
current_date.month == previous_date.month and
current_date.day == previous_date.day):
same_day_list.append(current_linkback)
else:
result.append(same_day_list)
same_day_list = []
same_day_list.append(current_linkback)
# add last group if non-empty
if same_day_list:
result.append(same_day_list)
return result
def create_trackback(recid, url, title, excerpt, blog_name, blog_id, source, user_info):
"""
Create a trackback
@param recid
"""
# copy optional arguments
argument_copy = {}
if title != CFG_WEBLINKBACK_SUBSCRIPTION_DEFAULT_ARGUMENT_NAME:
argument_copy['title'] = title
if excerpt != CFG_WEBLINKBACK_SUBSCRIPTION_DEFAULT_ARGUMENT_NAME:
argument_copy['excerpt'] = excerpt
if blog_name != CFG_WEBLINKBACK_SUBSCRIPTION_DEFAULT_ARGUMENT_NAME:
argument_copy['blog_name'] = blog_name
if blog_id != CFG_WEBLINKBACK_SUBSCRIPTION_DEFAULT_ARGUMENT_NAME:
argument_copy['id'] = blog_id
if source != CFG_WEBLINKBACK_SUBSCRIPTION_DEFAULT_ARGUMENT_NAME:
argument_copy['source'] = source
additional_properties = ""
if len(argument_copy) > 0:
additional_properties = argument_copy
return create_linkback(url, recid, additional_properties, CFG_WEBLINKBACK_TYPE['TRACKBACK'], user_info)
def send_pending_linkbacks_notification(linkback_type):
"""
Send notification emails to all linkback moderators for all pending linkbacks
@param linkback_type: of CFG_WEBLINKBACK_LIST_TYPE
"""
pending_linkbacks = get_all_linkbacks(linkback_type=CFG_WEBLINKBACK_TYPE['TRACKBACK'], status=CFG_WEBLINKBACK_STATUS['PENDING'])
if pending_linkbacks:
pending_count = len(pending_linkbacks)
cutoff_text = ''
if pending_count > CFG_WEBLINKBACK_MAX_LINKBACKS_IN_EMAIL:
cutoff_text = ' (Printing only the first %s requests)' % CFG_WEBLINKBACK_MAX_LINKBACKS_IN_EMAIL
content = """There are %(count)s new %(linkback_type)s requests which you should approve or reject%(cutoff)s:
""" % {'count': pending_count,
'linkback_type': linkback_type,
'cutoff': cutoff_text}
for pending_linkback in pending_linkbacks[0:CFG_WEBLINKBACK_MAX_LINKBACKS_IN_EMAIL]:
content += """
For %(recordURL)s from %(origin_url)s.
""" % {'recordURL': generate_redirect_url(pending_linkback[2]),
'origin_url': pending_linkback[1]}
for email in acc_get_authorized_emails('moderatelinkbacks'):
send_email(CFG_SITE_ADMIN_EMAIL, email, 'Pending ' + linkback_type + ' requests', content)
def infix_exists_for_url_in_list(url, list_type):
"""
Check if an infix of a url exists in a list
@param url
@param list_type, of CFG_WEBLINKBACK_LIST_TYPE
@return True, False
"""
urls = get_url_list(list_type)
for current_url in urls:
if current_url in url:
return True
return False
def get_latest_linkbacks_to_accessible_records(rg, linkbacks, user_info):
result = []
for linkback in linkbacks:
(auth_code, auth_msg) = check_user_can_view_record(user_info, linkback[2]) # pylint: disable=W0612
if not auth_code:
result.append(linkback)
if len(result) == rg:
break
return result
def perform_request_display_record_linbacks(req, recid, show_admin, weblinkback_templates, ln): # pylint: disable=W0613
"""
Display linkbacks of a record
@param recid
@param argd
@param show_admin: True, False --> show admin parts to approve/reject linkbacks pending requests
@param weblinkback_templates: template object reference
"""
out = weblinkback_templates.tmpl_linkbacks_general(recid=recid,
ln=ln)
if show_admin:
pending_linkbacks = get_all_linkbacks(recid, CFG_WEBLINKBACK_STATUS['PENDING'], CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME['DESC'])
out += weblinkback_templates.tmpl_linkbacks_admin(pending_linkbacks=pending_linkbacks,
recid=recid,
ln=ln)
approved_linkbacks = get_all_linkbacks(recid, CFG_WEBLINKBACK_STATUS['APPROVED'], CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME['DESC'])
out += weblinkback_templates.tmpl_linkbacks(approved_linkbacks=approved_linkbacks,
ln=ln)
return out
def perform_request_display_approved_latest_added_linkbacks_to_accessible_records(rg, ln, user_info, weblinkback_templates):
"""
Display approved latest added linbacks to accessible records
@param rg: count of linkbacks to display
@param weblinkback_templates: template object reference
"""
latest_linkbacks = get_approved_latest_added_linkbacks(rg * CFG_WEBLINKBACK_LATEST_FACTOR)
latest_linkbacks = get_latest_linkbacks_to_accessible_records(rg, latest_linkbacks, user_info)
latest_linkbacks_in_days = split_in_days(latest_linkbacks)
out = weblinkback_templates.tmpl_get_latest_linkbacks_top(rg, ln)
out += '<br>'
out += weblinkback_templates.tmpl_get_latest_linkbacks(latest_linkbacks_in_days, ln)
return out
def perform_sendtrackback(recid, url, title, excerpt, blog_name, blog_id, source, current_user):
"""
Send trackback
@param recid: recid
"""
# assume unsuccessful request
status = 400
xml_response = '<response>'
xml_error_response = """<error>1</error>
<message>%s</message>
"""
blacklist_match = infix_exists_for_url_in_list(url, CFG_WEBLINKBACK_LIST_TYPE['BLACKLIST'])
whitelist_match = infix_exists_for_url_in_list(url, CFG_WEBLINKBACK_LIST_TYPE['WHITELIST'])
# faulty request, url argument not set
if url in (CFG_WEBLINKBACK_SUBSCRIPTION_DEFAULT_ARGUMENT_NAME, None, ''):
xml_response += xml_error_response % CFG_WEBLINKBACK_TRACKBACK_SUBSCRIPTION_ERROR_MESSAGE['BAD_ARGUMENT']
# request refused: whitelist match has precedence over blacklist match
elif blacklist_match and not whitelist_match:
xml_response += xml_error_response % CFG_WEBLINKBACK_TRACKBACK_SUBSCRIPTION_ERROR_MESSAGE['BLACKLIST']
# request accepted: will be either approved automatically or pending
else:
status = 200
linkback_id = create_trackback(recid, url, title, excerpt, blog_name, blog_id, source, current_user)
# approve request automatically from url in whitelist
if whitelist_match:
approve_linkback(linkback_id, current_user)
xml_response += '</response>'
return xml_response, status
def perform_sendtrackback_disabled():
status = 404
xml_response = """<response>
<error>1</error>
<message>Trackback facility disabled</message>
</response>"""
return xml_response, status
def update_linkbacks(mode):
"""
Update titles of pages that link to the instance
@param mode: 1 update page titles of new linkbacks
2 update page titles of old linkbacks
3 update manually set page titles
4 detect and disable broken linkbacks
"""
if mode in (1, 2, 3):
if mode == 1:
urls_and_titles = get_urls_and_titles(CFG_WEBLINKBACK_PAGE_TITLE_STATUS['NEW'])
elif mode == 2:
urls_and_titles = get_urls_and_titles(CFG_WEBLINKBACK_PAGE_TITLE_STATUS['OLD'])
elif mode == 3:
urls_and_titles = get_urls_and_titles(CFG_WEBLINKBACK_PAGE_TITLE_STATUS['MANUALLY_SET'])
for (url, title, manual_set, broken_count) in urls_and_titles: # pylint: disable=W0612
new_title = get_title_of_page(url)
# Only accept valid titles
if new_title != None:
update_url_title(url, new_title)
elif mode == 4:
urls_and_titles = get_urls_and_titles()
for (url, title, manual_set, broken_count) in urls_and_titles: # pylint: disable=W0612
new_title = get_title_of_page(url)
# Broken one detected
if new_title == None:
increment_broken_count(url)
if broken_count + 1 == CFG_WEBLINKBACK_BROKEN_COUNT:
set_url_broken(url)
def delete_linkbacks_on_blacklist():
"""
Delete all rejected, broken and pending linkbacks whose URL on in the blacklist
"""
linkbacks = list(get_all_linkbacks(status=CFG_WEBLINKBACK_STATUS['PENDING']))
linkbacks.extend(list(get_all_linkbacks(status=CFG_WEBLINKBACK_STATUS['REJECTED'])))
linkbacks.extend(list(get_all_linkbacks(status=CFG_WEBLINKBACK_STATUS['BROKEN'])))
for linkback in linkbacks:
if infix_exists_for_url_in_list(linkback[1], CFG_WEBLINKBACK_LIST_TYPE['BLACKLIST']):
remove_linkback(linkback[0])
| gpl-2.0 |
lab305itep/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
bladealslayer/iobserver | lib/iobserver.py | 1 | 22415 | ############################################################################
# iObserver #
# v0.1 #
# #
# Copyright (C) 2007 by Boyan Tabakov #
# blade.alslayer@gmail.com #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
from pyinotify import *
from pyinotify import Event as pyinotify_Event
from configobj import ConfigObj, ConfigObjError
from threading import Thread, Lock, Event
from glob import glob
from types import ModuleType
from time import time
import imp
import plugins
import sys
import os.path
import copy
# Exception classes
# Note that no exceptions are thrown out of our main class
# because we are running in a separate thread and thus the
# exception will be left uncaught.
# Instead the user is able to check if we died with some error.
# Of course, just like every rule, this has one exception :)
# Exception is raised if the error situation occures before
# the thread is started: e.g. config error.
class iError(Exception):
""" General error """
def __init__(self, observer, msg):
Exception.__init__(self, msg)
if observer:
observer._notify_error(self)
class iPublicError(iError):
""" Exceptions that generate error to the user. """
pass
class iPrivateError(iError):
""" Exceptions for internal use. """
#class iConfigError(iPublicError):
#""" Configuration error """
#pass
class iWatchError(iPublicError):
""" Error while watching a target """
pass
class iObserverError(iPublicError):
""" General observer error """
pass
class iPluginError(iPrivateError):
""" Parent class for exceptions originating
from within plugins. Plugins should derive
exceptions from this one.
These are caught by iWatch and are not passed to user.
An iProcessEventError is raised instead. """
def __init__(self, *args):
if len(args) == 1:
iPrivateError.__init__(self, None, args[0])
else:
iPrivateError.__init__(self, *args)
# So it begins...
class iPlugin(object):
""" Base class for plugins """
def __init__(self, watch, cache, config):
self._config = config
self._cache = cache
self._watch = watch
def process_event(self, event):
""" This is the method that is called to handle an event. """
pass
class iProcessEvent(ProcessEvent):
""" Our universal event handler. """
def __init__(self, watch):
self._watch = watch
def process_default(self, event):
""" Do nothing, just pass event to the iWatch instance to handle. """
self._watch.process_event(event)
class iCache(object):
""" A stash shared by all watches. Passed to plugins
to store data in, because plugin objects are not persistent. """
def __init__(self, max_age, expire_after_count):
self._cache = {}
self._max_age = max_age
self._expire_counter = 0
self._expire_after_count = expire_after_count
self._lock = Lock()
def push(self, key, value, persistent=False):
self._lock.acquire()
self._expire_counter += 1
if self._expire_counter > self._expire_after_count:
# Every N pushes, purge any old entries
self._expire()
self._expire_counter = 0
time_stamp = 0
if not persistent:
time_stamp = time()
self._cache[key] = (value, time_stamp)
self._lock.release()
def pop(self, key):
self._lock.acquire()
(result, time_stamp) = self._cache.pop(key, (None, None))
self._lock.release()
return result
def get(self, key):
self._lock.acquire()
(result, time_stamp) = self._cache.get(key, (None, None))
self._lock.release()
return result
def _expire(self):
current_time = time()
for (key, (value, time_stamp)) in self._cache.items():
if time_stamp and current_time - time_stamp > self._max_age:
self._cache.pop(key)
class iWatch(object):
""" Represents a single watched directory.
Watch the directory in a separate thread
using the desired plugins. """
def __init__(self, observer, available_plugins, config):
self._observer = observer
self._cache = observer._cache
self._lock = Lock()
self._thread = Thread(target=self.run)
self._terminate_event = Event()
self._error_event = Event()
self._config_changed_event = Event()
self._config = None
self._path = None
self._avilable_plugins = None
self._watch_manager = None
self._notifier = None
self._watches = None
self._configure(available_plugins, config)
def get_path(self):
return self._path
def is_alive(self):
return self._thread.isAlive()
def _configure(self, available_plugins, config):
self._available_plugins = available_plugins.copy()
temp = copy.deepcopy(config)
self._path = temp.keys()[0]
self._config = temp[self._path]
# Check plugins
# Redundant check
if not self._config.has_key('plugins'):
self._error_event.set()
iWatchError(self._observer, "Watch %s: Missing 'plugins' section in configuration." % self._path)
return
plugins = self._config['plugins']
if not isinstance(plugins, list):
plugins = [plugins]
for plugin in plugins:
if not self._available_plugins.has_key(plugin):
self._error_event.set()
iWatchError(self._observer, "Required plugin '%s' is missing." % plugin)
def update_config(self, available_plugins, config):
""" Calles from iObserver whenever a change of plugins or config
is detected. """
# Not running in the main thread of the instance!
self._lock.acquire()
self._new_available_plugins = available_plugins
self._new_config = config
self._lock.release()
self._config_changed_event.set()
def _reconfigure(self):
""" Called in the main watch thread so that no locking
of the configuration when reading is required. """
self._lock.acquire()
self._configure(self._new_available_plugins, self._new_config)
self._lock.release()
def start(self):
if not self._terminate_event.isSet() and not self._error_event.isSet():
try:
self._thread.start()
except:
self._error_event.set()
iWatchError(self._observer, "Could not start watch thread.")
def run(self):
""" Our thread's main executable """
# Send a custom WATCH_INIT event.
# Plugins may use this to do any "one time" initializations.
process_event = iProcessEvent(self)
process_event.process_default(pyinotify_Event(
{
'event_name': 'WATCH_INIT',
'path': self._path
}
))
self._watch_manager = WatchManager()
self._notifier = Notifier(self._watch_manager, iProcessEvent(self))
try:
self._watches = self._watch_manager.add_watch(self._path, EventsCodes.ALL_EVENTS, rec=True, auto_add=True)
for watch in self._watches.keys():
if self._watches[watch] == -1:
# Error: path is missing?
iWatchError(self._observer, "Error watching %s. Maybe file or directory don't exist?" % watch)
return
# Rock'n'Roll baby!
while True:
self._notifier.process_events()
if self._notifier.check_events(timeout=1000):
self._notifier.read_events()
# Check if our config should be updated
if self._config_changed_event.isSet():
self._config_changed_event.clear()
self._reconfigure()
# Notify plugins that a configuration might be changed.
# They should act accordingly...
process_event.process_default(pyinotify_Event(
{
'event_name': 'WATCH_RECONFIG',
'path': self._path
}
))
# Check if we have to terminate:
if self._error_event.isSet():
self._terminate_event.set()
if self._terminate_event.isSet():
self._terminate_event.clear()
self._notifier.stop()
break
# Send a custom final event: WATCH_DEAD
# A plugin may use this to cleanup anything
# left behind in the cache.
process_event.process_default(pyinotify_Event(
{
'event_name': 'WATCH_DEAD',
'path': self._path
}
))
except NotifierError, data:
self._notifier.stop()
iWatchError(self._observer, "Error while watching %s: %s" % (self._path, data))
except ProcessEventError, data:
self._notifier.stop()
iWatchError(self._observer, "Error processing event while watching %s: %s" % (self._path, data))
except:
self._notifier.stop()
iWatchError(self._observer, "Unknown error while watching %s." % self._path)
def process_event(self, event):
""" iProcessEvent calls this to handle an event.
I could have used iWatch as an event handler directly given
to the notifier, but this way it is a lil' more flexible.
A side effect is that a watch could be (possibly) used as a plugin -
plugin for plugin management:) """
# Our job is to call each of the plugins' process_event methods.
# Plugins are instantiated each time, so that a reloaded plugin
# could be updated.
# If we are stopped (or rather "stopping", ignore any events:
# We need to clear the terminate_event in order our
# final WATCH_DEAD event to pass. This is done in the run method.
if self._error_event.isSet() or self._terminate_event.isSet():
return
# Some special handling:
# If the watched directory is moved - stop watching it,
# because paths are no longer valid:
if event.event_name == 'IN_MOVE_SELF' and event.path == self._path+'-invalided-path':
self.stop()
# Also if watched item gets deleted, the internal inotify watch will
# be stopped, but our thread will still be running... so stop it
if event.event_name == 'IN_DELETE_SELF' and event.path == self._path:
self.stop()
plugins = self._config['plugins']
if not isinstance(plugins, list):
# In case we have a single plugin, it is a string
# and not a list...
plugins = [plugins]
plugins = set(plugins)
for plugin_name in plugins:
if self._available_plugins.has_key(plugin_name):
plugin_config = dict([(key, self._config[key]) for key in self._config.keys() if key.startswith(plugin_name + '_')])
plugin_class = None
plugin = None
if isinstance(self._available_plugins[plugin_name], ModuleType):
plugin_class = self._available_plugins[plugin_name].__getattribute__(plugin_name.title())
plugin = plugin_class(self, self._cache, plugin_config)
else:
plugin = self._available_plugins[plugin_name]
# Process event
try:
plugin.process_event(event)
except iPluginError, data:
iWatchError(self._observer, "Watch: %s: Plugin '%s' reported error: %s" % (self._path, plugin_name, data))
else:
iWatchError(self._observer, "Watch: %s: Required plugin '%s' is missing." % (self._path, plugin_name))
def stop(self):
self._terminate_event.set()
class iPollWatch(iWatch):
""" A watch using polling """
# Used only to watch our config file
# because vi causes some trouble otherwise.
# No recursion implemented!
def run(self):
if not os.path.isfile(self._path):
iWatchError(self._observer, "Missing target or target is not a regular file!")
else:
last_mtime = os.stat(self._path).st_mtime
try:
while True:
mtime = os.stat(self._path).st_mtime
if mtime > last_mtime:
# File was modified
last_mtime = mtime
process_event = iProcessEvent(self)
event = pyinotify_Event(
{
'event_name': 'IN_MODIFY',
'path': self._path,
'name': None,
}
)
process_event.process_default(event)
self._terminate_event.wait(1)
if self._terminate_event.isSet():
break
except:
iWatchError(self._observer, "Could not stat target!")
class iObserver(iPlugin):
""" The main class. Runs in a separate thread. """
def __init__(self, config=None):
self._thread = Thread(target=self.run)
self._config = None
self._config_path = None
self._error = None
self._config_changed_event = Event()
self._plugins_changed_event = Event()
self._terminate_event = Event()
self._error_event = Event()
self._configure(config)
self._plugins = None
self._watches = None
self._config_watch = None
self._plugins_watch = None
self._cache = iCache(max_age=10, expire_after_count=100)
self._load_plugins()
def _notify_error(self, error):
# An exception rose somewhere in our threads.
# Maybe terminate or examine error?
if isinstance(error, iPublicError):
self._error = error
self._error_event.set()
def is_alive(self):
""" Check if we are in error state and dead/dying """
return self._thread.isAlive()
def error(self):
""" Return the error message that killed us """
if self._error_event.isSet():
return self._error.__str__()
else:
return None
def _validate_config(self):
""" TODO: Sanity checks of the final config """
allowed_globals = "watch_plugins,watch_config".split(',')
for (key, val) in self._config['global'].iteritems():
if not key in allowed_globals:
if not self._thread.isAlive():
raise iObserverError(self, "Illegal option '%s' in configuration." % key)
else:
iObserverError(self, "Illegal option '%s' in configuration." % key)
elif not isinstance(val, str) and not isinstance(val, int):
if not self._thread.isAlive():
raise iObserverError(self, "Illegal option value '%s' for '%s' in configuration." % (val, key))
else:
iObserverError(self, "Illegal option value '%s' for '%s' in configuration." % (val, key))
def _merge_config(self, config):
""" For now, merge new the global section
with the defaul one and add the watches one.
Maybe do some deep copying later if
more complex config is desired? """
if config.has_key('global'):
self._config['global'].update(config['global'])
if config.has_key('watches'):
self._config['watches'] = dict([ (os.path.realpath(watch), val) for (watch, val) in config['watches'].items()])
def _read_config_file(self, file):
""" Read... well... the config file:) """
config = None
try:
config = ConfigObj(file, file_error=True)
except IOError, data:
raise iObserverError(self, "Error reading configuration file: %s" % data)
except ConfigObjError, data:
raise iObserverError(self, "Error parsing confiuration file: %s" % data)
return config.dict()
def _is_true(self, value):
""" Check if value looks like something True.
Used because configobj does not make value conversions."""
if isinstance(value, bool) or isinstance(value, int):
return value
elif isinstance(value, str):
if value == '1' or value.lower() == 'yes' or value.lower() == 'true':
return True
return False
def _configure(self, config):
""" Set default config values and read any config
from a dict or a file"""
self._config = {
'global':{
'watch_config': False,
'watch_plugins': False,
},
'watches':{
},
}
if config and isinstance(config, dict):
self._merge_config(config)
self._config_path = None
elif config:
self._config_path = os.path.realpath(config)
try:
from_file = self._read_config_file(config)
self._merge_config(from_file)
except iObserverError, data:
if not self._thread.isAlive():
raise
self._validate_config()
def _obey_global_option(self, option):
""" Do what needs to be done for each
global option """
if option == 'watch_config':
if self._is_true(self._config['global'][option]):
# Turn on
if self._config_path:
self._config_watch = iPollWatch(
observer=self,
available_plugins = {'config_watch': self},
config = {
self._config_path: {
'plugins': 'config_watch'
}
}
)
self._config_watch.start()
else:
# Turn off
if self._config_watch and self._config_watch.is_alive():
print "Stopping config watch."
self._config_watch.stop()
self._config_watch = None
elif option == 'watch_plugins':
if self._is_true(self._config['global']['watch_plugins']):
# Turn on
self._plugins_watch = iWatch(
observer=self,
available_plugins = {'plugins_watch': self},
config = {
plugins.__path__[0]: {
'plugins': 'plugins_watch'
}
}
)
self._plugins_watch.start()
else:
# Turn off
if self._plugins_watch and self._plugins_watch.is_alive():
self._plugins_watch.stop()
self._plugins_watch = None
else:
iObserverError(self, "_obey_global_option called with incorrect option '%s'" % option)
def _update_config(self):
""" Update the config when a change is detected. """
old_config = copy.deepcopy(self._config)
self._configure(self._config_path)
# See what's changed and what needs to be done:
for (option, value) in old_config['global'].iteritems():
if self._is_true(value) != self._is_true(self._config['global'][option]):
self._obey_global_option(option)
# Stop watches that were removed from config file
# and update the ones that are still running
for watch in self._watches.keys():
if not watch in self._config['watches'].keys():
self._watches[watch].stop()
self._watches.pop(watch)
else:
self._watches[watch].update_config(
available_plugins=self._plugins,
config={watch: self._config['watches'][watch]}
)
# Start any new watches
for watch in self._config['watches'].keys():
if not watch in self._watches.keys():
self._watches[watch] = iWatch(
observer=self,
available_plugins=self._plugins,
config={watch: self._config['watches'][watch]}
)
self._watches[watch].start()
def _load_plugins(self):
# Get a list of all plugins... (ignoring any names starting with _)
# TODO use a more readable expression like:
# [d for d in ... if ...]
#self._plugins = dict(map(lambda x: (os.path.basename(x)[:-3], None), glob(plugins.__path__[0] + '/[!_]*.py')))
self._plugins = dict([ (os.path.basename(x)[:-3], None)
for x in glob(plugins.__path__[0] + '/[!_]*.py') ])
# and load any that are not yet loaded.
# Don't attempt to reload existing ones even if they might have changed,
# because I don't know what the consequences be:)
try:
for plugin in self._plugins.keys():
if not sys.modules.has_key('iobserver.plugins.'+plugin):
# Load new plugin
imp.acquire_lock()
found = imp.find_module(plugin, plugins.__path__)
self._plugins[plugin] = imp.load_module('iobserver.plugins.'+plugin, found[0], found[1], found[2])
imp.release_lock()
else:
# Reload plugin
reload(sys.modules['iobserver.plugins.'+plugin])
except Exception, data:
if imp.lock_held(): imp.release_lock()
if self._thread.isAlive():
iObserverError(self, "Could not load plugin(s): %s" % data)
else:
raise iObserverError(None, "Could not load plugin(s): %s" % data)
def process_event(self, event):
""" Having this method makes us a valid plugin:)
We use us as a plugin to handle both configuration
and plugin changes. """
# Not running in the main thread of the instance
if not event.event_name in "IN_CREATE,IN_DELETE,IN_DELETE_SELF,IN_MODIFY,IN_MOVE_SELF,IN_MOVED_FROM,IN_MOVED_TO".split(','):
return
if event.path == self._config_path:
# Event is about the configuration file
self._config_changed_event.set()
else:
# Event is about plugins directory
# NOTE: This is now handled by the watch itself!
## If we are moved/deleted, stop watching...
#if event.event_name == 'IN_MOVE_SELF' or event.event_name == 'IN_DELETE_SELF':
#watch.stop()
#return
if event.name.endswith('.pyc'):
# Ignore changes in the compiled modules, as they occur when
# a changed module is loaded and will cause unneccessery reload.
return
if event.name.startswith('.'):
# Ignore hidden files
return
self._plugins_changed_event.set()
def start(self):
""" Start our new thread. """
try:
self._thread.start()
except:
iObserverError(self, "Could not start observer thread.")
def run(self):
""" The target passed to our thread to execute. """
self._watches = {}
# Set up our special config/plugin watches
self._obey_global_option('watch_config')
self._obey_global_option('watch_plugins')
# Set up all other configured watches
for watch in self._config['watches'].keys():
self._watches[watch] = iWatch(
observer=self,
available_plugins=self._plugins,
config={watch: self._config['watches'][watch]}
)
# Fire in the hole!
for watch in self._watches.values():
watch.start()
# This thread now waits for various events:
# - terminate event
# - error
# - configuration changed event
while True:
self._terminate_event.wait(1)
if self._terminate_event.isSet():
# Exiting
break
if self._error_event.isSet():
break
if self._plugins_changed_event.isSet():
self._plugins_changed_event.clear()
self._load_plugins()
if self._config_changed_event.isSet():
self._config_changed_event.clear()
self._update_config()
for watch in self._watches.values():
if watch.is_alive:
watch.stop()
if self._config_watch: self._config_watch.stop()
if self._plugins_watch: self._plugins_watch.stop()
def stop(self):
""" Called from application thread. """
self._terminate_event.set() | gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.