repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/tornado/test/queues_test.py | 34 | 13062 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
from datetime import timedelta
from random import random
from tornado import gen, queues
from tornado.gen import TimeoutError
from tornado.testing import gen_test, AsyncTestCase
from tornado.test.util import unittest, skipBefore35, exec_test
class QueueBasicTest(AsyncTestCase):
def test_repr_and_str(self):
q = queues.Queue(maxsize=1)
self.assertIn(hex(id(q)), repr(q))
self.assertNotIn(hex(id(q)), str(q))
q.get()
for q_str in repr(q), str(q):
self.assertTrue(q_str.startswith('<Queue'))
self.assertIn('maxsize=1', q_str)
self.assertIn('getters[1]', q_str)
self.assertNotIn('putters', q_str)
self.assertNotIn('tasks', q_str)
q.put(None)
q.put(None)
# Now the queue is full, this putter blocks.
q.put(None)
for q_str in repr(q), str(q):
self.assertNotIn('getters', q_str)
self.assertIn('putters[1]', q_str)
self.assertIn('tasks=2', q_str)
def test_order(self):
q = queues.Queue()
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 3, 2], items)
@gen_test
def test_maxsize(self):
self.assertRaises(TypeError, queues.Queue, maxsize=None)
self.assertRaises(ValueError, queues.Queue, maxsize=-1)
q = queues.Queue(maxsize=2)
self.assertTrue(q.empty())
self.assertFalse(q.full())
self.assertEqual(2, q.maxsize)
self.assertTrue(q.put(0).done())
self.assertTrue(q.put(1).done())
self.assertFalse(q.empty())
self.assertTrue(q.full())
put2 = q.put(2)
self.assertFalse(put2.done())
self.assertEqual(0, (yield q.get())) # Make room.
self.assertTrue(put2.done())
self.assertFalse(q.empty())
self.assertTrue(q.full())
class QueueGetTest(AsyncTestCase):
@gen_test
def test_blocking_get(self):
q = queues.Queue()
q.put_nowait(0)
self.assertEqual(0, (yield q.get()))
def test_nonblocking_get(self):
q = queues.Queue()
q.put_nowait(0)
self.assertEqual(0, q.get_nowait())
def test_nonblocking_get_exception(self):
q = queues.Queue()
self.assertRaises(queues.QueueEmpty, q.get_nowait)
@gen_test
def test_get_with_putters(self):
q = queues.Queue(1)
q.put_nowait(0)
put = q.put(1)
self.assertEqual(0, (yield q.get()))
self.assertIsNone((yield put))
@gen_test
def test_blocking_get_wait(self):
q = queues.Queue()
q.put(0)
self.io_loop.call_later(0.01, q.put, 1)
self.io_loop.call_later(0.02, q.put, 2)
self.assertEqual(0, (yield q.get(timeout=timedelta(seconds=1))))
self.assertEqual(1, (yield q.get(timeout=timedelta(seconds=1))))
@gen_test
def test_get_timeout(self):
q = queues.Queue()
get_timeout = q.get(timeout=timedelta(seconds=0.01))
get = q.get()
with self.assertRaises(TimeoutError):
yield get_timeout
q.put_nowait(0)
self.assertEqual(0, (yield get))
@gen_test
def test_get_timeout_preempted(self):
q = queues.Queue()
get = q.get(timeout=timedelta(seconds=0.01))
q.put(0)
yield gen.sleep(0.02)
self.assertEqual(0, (yield get))
@gen_test
def test_get_clears_timed_out_putters(self):
q = queues.Queue(1)
# First putter succeeds, remainder block.
putters = [q.put(i, timedelta(seconds=0.01)) for i in range(10)]
put = q.put(10)
self.assertEqual(10, len(q._putters))
yield gen.sleep(0.02)
self.assertEqual(10, len(q._putters))
self.assertFalse(put.done()) # Final waiter is still active.
q.put(11)
self.assertEqual(0, (yield q.get())) # get() clears the waiters.
self.assertEqual(1, len(q._putters))
for putter in putters[1:]:
self.assertRaises(TimeoutError, putter.result)
@gen_test
def test_get_clears_timed_out_getters(self):
q = queues.Queue()
getters = [q.get(timedelta(seconds=0.01)) for _ in range(10)]
get = q.get()
self.assertEqual(11, len(q._getters))
yield gen.sleep(0.02)
self.assertEqual(11, len(q._getters))
self.assertFalse(get.done()) # Final waiter is still active.
q.get() # get() clears the waiters.
self.assertEqual(2, len(q._getters))
for getter in getters:
self.assertRaises(TimeoutError, getter.result)
@skipBefore35
@gen_test
def test_async_for(self):
q = queues.Queue()
for i in range(5):
q.put(i)
namespace = exec_test(globals(), locals(), """
async def f():
results = []
async for i in q:
results.append(i)
if i == 4:
return results
""")
results = yield namespace['f']()
self.assertEqual(results, list(range(5)))
class QueuePutTest(AsyncTestCase):
@gen_test
def test_blocking_put(self):
q = queues.Queue()
q.put(0)
self.assertEqual(0, q.get_nowait())
def test_nonblocking_put_exception(self):
q = queues.Queue(1)
q.put(0)
self.assertRaises(queues.QueueFull, q.put_nowait, 1)
@gen_test
def test_put_with_getters(self):
q = queues.Queue()
get0 = q.get()
get1 = q.get()
yield q.put(0)
self.assertEqual(0, (yield get0))
yield q.put(1)
self.assertEqual(1, (yield get1))
@gen_test
def test_nonblocking_put_with_getters(self):
q = queues.Queue()
get0 = q.get()
get1 = q.get()
q.put_nowait(0)
# put_nowait does *not* immediately unblock getters.
yield gen.moment
self.assertEqual(0, (yield get0))
q.put_nowait(1)
yield gen.moment
self.assertEqual(1, (yield get1))
@gen_test
def test_blocking_put_wait(self):
q = queues.Queue(1)
q.put_nowait(0)
self.io_loop.call_later(0.01, q.get)
self.io_loop.call_later(0.02, q.get)
futures = [q.put(0), q.put(1)]
self.assertFalse(any(f.done() for f in futures))
yield futures
@gen_test
def test_put_timeout(self):
q = queues.Queue(1)
q.put_nowait(0) # Now it's full.
put_timeout = q.put(1, timeout=timedelta(seconds=0.01))
put = q.put(2)
with self.assertRaises(TimeoutError):
yield put_timeout
self.assertEqual(0, q.get_nowait())
# 1 was never put in the queue.
self.assertEqual(2, (yield q.get()))
# Final get() unblocked this putter.
yield put
@gen_test
def test_put_timeout_preempted(self):
q = queues.Queue(1)
q.put_nowait(0)
put = q.put(1, timeout=timedelta(seconds=0.01))
q.get()
yield gen.sleep(0.02)
yield put # No TimeoutError.
@gen_test
def test_put_clears_timed_out_putters(self):
q = queues.Queue(1)
# First putter succeeds, remainder block.
putters = [q.put(i, timedelta(seconds=0.01)) for i in range(10)]
put = q.put(10)
self.assertEqual(10, len(q._putters))
yield gen.sleep(0.02)
self.assertEqual(10, len(q._putters))
self.assertFalse(put.done()) # Final waiter is still active.
q.put(11) # put() clears the waiters.
self.assertEqual(2, len(q._putters))
for putter in putters[1:]:
self.assertRaises(TimeoutError, putter.result)
@gen_test
def test_put_clears_timed_out_getters(self):
q = queues.Queue()
getters = [q.get(timedelta(seconds=0.01)) for _ in range(10)]
get = q.get()
q.get()
self.assertEqual(12, len(q._getters))
yield gen.sleep(0.02)
self.assertEqual(12, len(q._getters))
self.assertFalse(get.done()) # Final waiters still active.
q.put(0) # put() clears the waiters.
self.assertEqual(1, len(q._getters))
self.assertEqual(0, (yield get))
for getter in getters:
self.assertRaises(TimeoutError, getter.result)
@gen_test
def test_float_maxsize(self):
# Non-int maxsize must round down: http://bugs.python.org/issue21723
q = queues.Queue(maxsize=1.3)
self.assertTrue(q.empty())
self.assertFalse(q.full())
q.put_nowait(0)
q.put_nowait(1)
self.assertFalse(q.empty())
self.assertTrue(q.full())
self.assertRaises(queues.QueueFull, q.put_nowait, 2)
self.assertEqual(0, q.get_nowait())
self.assertFalse(q.empty())
self.assertFalse(q.full())
yield q.put(2)
put = q.put(3)
self.assertFalse(put.done())
self.assertEqual(1, (yield q.get()))
yield put
self.assertTrue(q.full())
class QueueJoinTest(AsyncTestCase):
queue_class = queues.Queue
def test_task_done_underflow(self):
q = self.queue_class()
self.assertRaises(ValueError, q.task_done)
@gen_test
def test_task_done(self):
q = self.queue_class()
for i in range(100):
q.put_nowait(i)
self.accumulator = 0
@gen.coroutine
def worker():
while True:
item = yield q.get()
self.accumulator += item
q.task_done()
yield gen.sleep(random() * 0.01)
# Two coroutines share work.
worker()
worker()
yield q.join()
self.assertEqual(sum(range(100)), self.accumulator)
@gen_test
def test_task_done_delay(self):
# Verify it is task_done(), not get(), that unblocks join().
q = self.queue_class()
q.put_nowait(0)
join = q.join()
self.assertFalse(join.done())
yield q.get()
self.assertFalse(join.done())
yield gen.moment
self.assertFalse(join.done())
q.task_done()
self.assertTrue(join.done())
@gen_test
def test_join_empty_queue(self):
q = self.queue_class()
yield q.join()
yield q.join()
@gen_test
def test_join_timeout(self):
q = self.queue_class()
q.put(0)
with self.assertRaises(TimeoutError):
yield q.join(timeout=timedelta(seconds=0.01))
class PriorityQueueJoinTest(QueueJoinTest):
queue_class = queues.PriorityQueue
@gen_test
def test_order(self):
q = self.queue_class(maxsize=2)
q.put_nowait((1, 'a'))
q.put_nowait((0, 'b'))
self.assertTrue(q.full())
q.put((3, 'c'))
q.put((2, 'd'))
self.assertEqual((0, 'b'), q.get_nowait())
self.assertEqual((1, 'a'), (yield q.get()))
self.assertEqual((2, 'd'), q.get_nowait())
self.assertEqual((3, 'c'), (yield q.get()))
self.assertTrue(q.empty())
class LifoQueueJoinTest(QueueJoinTest):
queue_class = queues.LifoQueue
@gen_test
def test_order(self):
q = self.queue_class(maxsize=2)
q.put_nowait(1)
q.put_nowait(0)
self.assertTrue(q.full())
q.put(3)
q.put(2)
self.assertEqual(3, q.get_nowait())
self.assertEqual(2, (yield q.get()))
self.assertEqual(0, q.get_nowait())
self.assertEqual(1, (yield q.get()))
self.assertTrue(q.empty())
class ProducerConsumerTest(AsyncTestCase):
@gen_test
def test_producer_consumer(self):
q = queues.Queue(maxsize=3)
history = []
# We don't yield between get() and task_done(), so get() must wait for
# the next tick. Otherwise we'd immediately call task_done and unblock
# join() before q.put() resumes, and we'd only process the first four
# items.
@gen.coroutine
def consumer():
while True:
history.append((yield q.get()))
q.task_done()
@gen.coroutine
def producer():
for item in range(10):
yield q.put(item)
consumer()
yield producer()
yield q.join()
self.assertEqual(list(range(10)), history)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
luto/django-dashing | dashing/utils.py | 6 | 1414 | from django.conf.urls import url
from .views import Dashboard
class Router(object):
def __init__(self):
self.registry = []
def register(self, widget, basename, **parameters):
""" Register a widget, URL basename and any optional URL parameters.
Parameters are passed as keyword arguments, i.e.
>>> router.register(MyWidget, 'mywidget', my_parameter="[A-Z0-9]+")
This would be the equivalent of manually adding the following
to urlpatterns:
>>> url(r"^widgets/mywidget/(P<my_parameter>[A-Z0-9]+)/?",
MyWidget.as_view(), "widget_mywidget")
"""
self.registry.append((widget, basename, parameters))
def get_urls(self):
urlpatterns = [
url(r'^$', Dashboard.as_view(), name='dashboard'),
]
for widget, basename, parameters in self.registry:
urlpatterns += [
url(r'/'.join((
r'^widgets/{}'.format(basename),
r'/'.join((r'(?P<{}>{})'.format(parameter, regex)
for parameter, regex in parameters.items())),
)),
widget.as_view(),
name='widget_{}'.format(basename)),
]
return urlpatterns
@property
def urls(self):
return self.get_urls()
router = Router()
| bsd-3-clause |
kailIII/geraldo | site/newsite/django_1_0/django/utils/_decimal.py | 51 | 105443 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module is currently Py2.3 compatible and should be kept that way
# unless a major compelling advantage arises. IOW, 2.3 compatibility is
# strongly preferred, but not guaranteed.
# Also, this module should be kept in sync with the latest updates of
# the IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is a Py2.3 implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
www2.hursley.ibm.com/decimal/decarith.html
and IEEE standard 854-1987:
www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of the module is to support arithmetic using familiar
"schoolhouse" rules and to avoid the some of tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of the expected Decimal("0.00") returned by decimal floating point).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal("0")
>>> Decimal("1")
Decimal("1")
>>> Decimal("-.0123")
Decimal("-0.0123")
>>> Decimal(123456)
Decimal("123456")
>>> Decimal("123.45e12345678901234567890")
Decimal("1.2345E+12345678901234567892")
>>> Decimal("1.33") + Decimal("1.27")
Decimal("2.60")
>>> Decimal("12.34") + Decimal("3.87") - Decimal("18.41")
Decimal("-2.20")
>>> dig = Decimal(1)
>>> print dig / Decimal(3)
0.333333333
>>> getcontext().prec = 18
>>> print dig / Decimal(3)
0.333333333333333333
>>> print dig.sqrt()
1
>>> print Decimal(3).sqrt()
1.73205080756887729
>>> print Decimal(3) ** 123
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print inf
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print neginf
-Infinity
>>> print neginf + inf
NaN
>>> print neginf * inf
-Infinity
>>> print dig / 0
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print dig / 0
Traceback (most recent call last):
...
...
...
DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal("NaN")
>>> c.traps[InvalidOperation] = 1
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> print c.divide(Decimal(0), Decimal(0))
Traceback (most recent call last):
...
...
...
InvalidOperation: 0 / 0
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print c.divide(Decimal(0), Decimal(0))
NaN
>>> print c.flags[InvalidOperation]
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN',
# Functions for manipulating contexts
'setcontext', 'getcontext'
]
import copy as _copy
#Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
#Rounding decision (not part of the public API)
NEVER_ROUND = 'NEVER_ROUND' # Round in division (non-divmod), sqrt ONLY
ALWAYS_ROUND = 'ALWAYS_ROUND' # Every operation rounds at end.
#Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
"""
def handle(self, context, *args):
if args:
if args[0] == 1: #sNaN, must drop 's' but keep diagnostics
return Decimal( (args[1]._sign, args[1]._int, 'n') )
return NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return (0, (0,), 'n') #Passed to something which uses a tuple.
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, double = None, *args):
if double is not None:
return (Infsign[sign],)*2
return Infsign[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return (NaN, NaN)
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, tup=None, *args):
if tup is not None:
return (NaN, NaN) #for 0 %0, 0 // 0
return NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return Infsign[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return Infsign[sign]
return Decimal((sign, (9,)*context.prec,
context.Emax-context.prec+1))
if sign == 1:
if context.rounding == ROUND_FLOOR:
return Infsign[sign]
return Decimal( (sign, (9,)*context.prec,
context.Emax-context.prec+1))
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
##### Context Functions #######################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.currentThread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
import sys
class MockThreading:
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del sys, MockThreading
try:
threading.local
except AttributeError:
#To fix reloading, force it to create a new context
#Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.currentThread(), '__decimal_context__'):
del threading.currentThread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.currentThread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.currentThread().__decimal_context__
except AttributeError:
context = Context()
threading.currentThread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
##### Decimal class ###########################################
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal("3.14")
>>> Decimal((0, (3, 1, 4), -2)) # tuple input (sign, digit_tuple, exponent)
Decimal("3.14")
>>> Decimal(314) # int or long
Decimal("314")
>>> Decimal(Decimal(314)) # another decimal instance
Decimal("314")
"""
self = object.__new__(cls)
self._is_special = False
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = tuple(map(int, str(value.int)))
self._exp = int(value.exp)
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an integer
if isinstance(value, (int,long)):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = tuple(map(int, str(abs(value))))
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError, 'Invalid arguments'
if value[0] not in (0,1):
raise ValueError, 'Invalid sign'
for digit in value[1]:
if not isinstance(digit, (int,long)) or digit < 0:
raise ValueError, "The second value in the tuple must be composed of non negative integer elements."
self._sign = value[0]
self._int = tuple(value[1])
if value[2] in ('F','n','N'):
self._exp = value[2]
self._is_special = True
else:
self._exp = int(value[2])
return self
if isinstance(value, float):
raise TypeError("Cannot convert float to Decimal. " +
"First convert the float to a string")
# Other argument types may require the context during interpretation
if context is None:
context = getcontext()
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, basestring):
if _isinfinity(value):
self._exp = 'F'
self._int = (0,)
self._is_special = True
if _isinfinity(value) == 1:
self._sign = 0
else:
self._sign = 1
return self
if _isnan(value):
sig, sign, diag = _isnan(value)
self._is_special = True
if len(diag) > context.prec: #Diagnostic info too long
self._sign, self._int, self._exp = \
context._raise_error(ConversionSyntax)
return self
if sig == 1:
self._exp = 'n' #qNaN
else: #sig == 2
self._exp = 'N' #sNaN
self._sign = sign
self._int = tuple(map(int, diag)) #Diagnostic info
return self
try:
self._sign, self._int, self._exp = _string2exact(value)
except ValueError:
self._is_special = True
self._sign, self._int, self._exp = context._raise_error(ConversionSyntax)
return self
raise TypeError("Cannot convert %r to Decimal" % value)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other = None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
1, self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
1, other)
if self_is_nan:
return self
return other
return 0
def __nonzero__(self):
"""Is the number non-zero?
0 if self == 0
1 if self != 0
"""
if self._is_special:
return 1
return sum(self._int) != 0
def __cmp__(self, other, context=None):
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return 1 # Comparison involving NaN's always reports self > other
# INF = INF
return cmp(self._isinfinity(), other._isinfinity())
if not self and not other:
return 0 #If both 0, sign comparison isn't certain.
#If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted and \
self._int + (0,)*(self._exp - other._exp) == \
other._int + (0,)*(other._exp - self._exp):
return 0 #equal, except in precision. ([0]*(-x) = [])
elif self_adjusted > other_adjusted and self._int[0] != 0:
return (-1)**self._sign
elif self_adjusted < other_adjusted and other._int[0] != 0:
return -((-1)**self._sign)
# Need to round, so make sure we have a valid context
if context is None:
context = getcontext()
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_UP) #round away from 0
flags = context._ignore_all_flags()
res = self.__sub__(other, context=context)
context._regard_flags(*flags)
context.rounding = rounding
if not res:
return 0
elif res._sign:
return -1
return 1
def __eq__(self, other):
if not isinstance(other, (Decimal, int, long)):
return NotImplemented
return self.__cmp__(other) == 0
def __ne__(self, other):
if not isinstance(other, (Decimal, int, long)):
return NotImplemented
return self.__cmp__(other) != 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
#compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self.__cmp__(other, context))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# Decimal integers must hash the same as the ints
# Non-integer decimals are normalized and hashed as strings
# Normalization assures that hast(100E-1) == hash(10)
if self._is_special:
if self._isnan():
raise TypeError('Cannot hash a NaN value.')
return hash(str(self))
i = int(self)
if self == Decimal(i):
return hash(i)
assert self.__nonzero__() # '-0' handled by integer case
return hash(str(self.normalize()))
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return (self._sign, self._int, self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return 'Decimal("%s")' % str(self)
def __str__(self, eng = 0, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
if self._is_special:
if self._isnan():
minus = '-'*self._sign
if self._int == (0,):
info = ''
else:
info = ''.join(map(str, self._int))
if self._isnan() == 2:
return minus + 'sNaN' + info
return minus + 'NaN' + info
if self._isinfinity():
minus = '-'*self._sign
return minus + 'Infinity'
if context is None:
context = getcontext()
tmp = map(str, self._int)
numdigits = len(self._int)
leftdigits = self._exp + numdigits
if eng and not self: #self = 0eX wants 0[.0[0]]eY, not [[0]0]0eY
if self._exp < 0 and self._exp >= -6: #short, no need for e/E
s = '-'*self._sign + '0.' + '0'*(abs(self._exp))
return s
#exp is closest mult. of 3 >= self._exp
exp = ((self._exp - 1)// 3 + 1) * 3
if exp != self._exp:
s = '0.'+'0'*(exp - self._exp)
else:
s = '0'
if exp != 0:
if context.capitals:
s += 'E'
else:
s += 'e'
if exp > 0:
s += '+' #0.0e+3, not 0.0e3
s += str(exp)
s = '-'*self._sign + s
return s
if eng:
dotplace = (leftdigits-1)%3+1
adjexp = leftdigits -1 - (leftdigits-1)%3
else:
adjexp = leftdigits-1
dotplace = 1
if self._exp == 0:
pass
elif self._exp < 0 and adjexp >= 0:
tmp.insert(leftdigits, '.')
elif self._exp < 0 and adjexp >= -6:
tmp[0:0] = ['0'] * int(-leftdigits)
tmp.insert(0, '0.')
else:
if numdigits > dotplace:
tmp.insert(dotplace, '.')
elif numdigits < dotplace:
tmp.extend(['0']*(dotplace-numdigits))
if adjexp:
if not context.capitals:
tmp.append('e')
else:
tmp.append('E')
if adjexp > 0:
tmp.append('+')
tmp.append(str(adjexp))
if eng:
while tmp[0:1] == ['0']:
tmp[0:1] = []
if len(tmp) == 0 or tmp[0] == '.' or tmp[0].lower() == 'e':
tmp[0:0] = ['0']
if self._sign:
tmp.insert(0, '-')
return ''.join(tmp)
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=1, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if not self:
# -Decimal('0') is Decimal('0'), not Decimal('-0')
sign = 0
elif self._sign:
sign = 0
else:
sign = 1
if context is None:
context = getcontext()
if context._rounding_decision == ALWAYS_ROUND:
return Decimal((sign, self._int, self._exp))._fix(context)
return Decimal( (sign, self._int, self._exp))
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
sign = self._sign
if not self:
# + (-0) = 0
sign = 0
if context is None:
context = getcontext()
if context._rounding_decision == ALWAYS_ROUND:
ans = self._fix(context)
else:
ans = Decimal(self)
ans._sign = sign
return ans
def __abs__(self, round=1, context=None):
"""Returns the absolute value of self.
If the second argument is 0, do not round.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if not round:
if context is None:
context = getcontext()
context = context._shallow_copy()
context._set_rounding_decision(NEVER_ROUND)
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
#If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) #Can't both be infinity here
shouldround = context._rounding_decision == ALWAYS_ROUND
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
#If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
return Decimal( (sign, (0,), exp))
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, watchexp=0, context=context)
if shouldround:
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, watchexp=0, context=context)
if shouldround:
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, shouldround, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
if exp < context.Etiny():
exp = context.Etiny()
context._raise_error(Clamped)
return Decimal((negativezero, (0,), exp))
if op1.int < op2.int:
op1, op2 = op2, op1
#OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
#So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
#Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
if shouldround:
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self + (-other)"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# -Decimal(0) = Decimal(0), which we don't want since
# (-0 - 0 = -0 + (-0) = -0, but -0 + 0 = 0.)
# so we change the sign directly to a copy
tmp = Decimal(other)
tmp._sign = 1-tmp._sign
return self.__add__(tmp, context=context)
def __rsub__(self, other, context=None):
"""Return other + (-self)"""
other = _convert_other(other)
if other is NotImplemented:
return other
tmp = Decimal(self)
tmp._sign = 1 - tmp._sign
return other.__add__(tmp, context=context)
def _increment(self, round=1, context=None):
"""Special case of add, adding 1eExponent
Since it is common, (rounding, for example) this adds
(sign)*one E self._exp to the number more efficiently than add.
For example:
Decimal('5.624e10')._increment() == Decimal('5.625e10')
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self) # Must be infinite, and incrementing makes no difference
L = list(self._int)
L[-1] += 1
spot = len(L)-1
while L[spot] == 10:
L[spot] = 0
if spot == 0:
L[0:0] = [1]
break
L[spot-1] += 1
spot -= 1
ans = Decimal((self._sign, L, self._exp))
if context is None:
context = getcontext()
if round and context._rounding_decision == ALWAYS_ROUND:
ans = ans._fix(context)
return ans
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return Infsign[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return Infsign[resultsign]
resultexp = self._exp + other._exp
shouldround = context._rounding_decision == ALWAYS_ROUND
# Special case for multiplying by zero
if not self or not other:
ans = Decimal((resultsign, (0,), resultexp))
if shouldround:
#Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == (1,):
ans = Decimal((resultsign, other._int, resultexp))
if shouldround:
ans = ans._fix(context)
return ans
if other._int == (1,):
ans = Decimal((resultsign, self._int, resultexp))
if shouldround:
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = Decimal( (resultsign, map(int, str(op1.int * op2.int)), resultexp))
if shouldround:
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __div__(self, other, context=None):
"""Return self / other."""
return self._divide(other, context=context)
__truediv__ = __div__
def _divide(self, other, divmod = 0, context=None):
"""Return a / b, to context.prec precision.
divmod:
0 => true division
1 => (a //b, a%b)
2 => a //b
3 => a%b
Actually, if divmod is 2 or 3 a tuple is returned, but errors for
computing the other value are not raised.
"""
other = _convert_other(other)
if other is NotImplemented:
if divmod in (0, 1):
return NotImplemented
return (NotImplemented, NotImplemented)
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
if divmod:
return (ans, ans)
return ans
if self._isinfinity() and other._isinfinity():
if divmod:
return (context._raise_error(InvalidOperation,
'(+-)INF // (+-)INF'),
context._raise_error(InvalidOperation,
'(+-)INF % (+-)INF'))
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
if divmod == 1:
return (Infsign[sign],
context._raise_error(InvalidOperation, 'INF % x'))
elif divmod == 2:
return (Infsign[sign], NaN)
elif divmod == 3:
return (Infsign[sign],
context._raise_error(InvalidOperation, 'INF % x'))
return Infsign[sign]
if other._isinfinity():
if divmod:
return (Decimal((sign, (0,), 0)), Decimal(self))
context._raise_error(Clamped, 'Division by infinity')
return Decimal((sign, (0,), context.Etiny()))
# Special cases for zeroes
if not self and not other:
if divmod:
return context._raise_error(DivisionUndefined, '0 / 0', 1)
return context._raise_error(DivisionUndefined, '0 / 0')
if not self:
if divmod:
otherside = Decimal(self)
otherside._exp = min(self._exp, other._exp)
return (Decimal((sign, (0,), 0)), otherside)
exp = self._exp - other._exp
if exp < context.Etiny():
exp = context.Etiny()
context._raise_error(Clamped, '0e-x / y')
if exp > context.Emax:
exp = context.Emax
context._raise_error(Clamped, '0e+x / y')
return Decimal( (sign, (0,), exp) )
if not other:
if divmod:
return context._raise_error(DivisionByZero, 'divmod(x,0)',
sign, 1)
return context._raise_error(DivisionByZero, 'x / 0', sign)
#OK, so neither = 0, INF or NaN
shouldround = context._rounding_decision == ALWAYS_ROUND
#If we're dividing into ints, and self < other, stop.
#self.__abs__(0) does not round.
if divmod and (self.__abs__(0, context) < other.__abs__(0, context)):
if divmod == 1 or divmod == 3:
exp = min(self._exp, other._exp)
ans2 = self._rescale(exp, context=context, watchexp=0)
if shouldround:
ans2 = ans2._fix(context)
return (Decimal( (sign, (0,), 0) ),
ans2)
elif divmod == 2:
#Don't round the mod part, if we don't need it.
return (Decimal( (sign, (0,), 0) ), Decimal(self))
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2, adjust = _adjust_coefficients(op1, op2)
res = _WorkRep( (sign, 0, (op1.exp - op2.exp)) )
if divmod and res.exp > context.prec + 1:
return context._raise_error(DivisionImpossible)
prec_limit = 10 ** context.prec
while 1:
while op2.int <= op1.int:
res.int += 1
op1.int -= op2.int
if res.exp == 0 and divmod:
if res.int >= prec_limit and shouldround:
return context._raise_error(DivisionImpossible)
otherside = Decimal(op1)
frozen = context._ignore_all_flags()
exp = min(self._exp, other._exp)
otherside = otherside._rescale(exp, context=context, watchexp=0)
context._regard_flags(*frozen)
if shouldround:
otherside = otherside._fix(context)
return (Decimal(res), otherside)
if op1.int == 0 and adjust >= 0 and not divmod:
break
if res.int >= prec_limit and shouldround:
if divmod:
return context._raise_error(DivisionImpossible)
shouldround=1
# Really, the answer is a bit higher, so adding a one to
# the end will make sure the rounding is right.
if op1.int != 0:
res.int *= 10
res.int += 1
res.exp -= 1
break
res.int *= 10
res.exp -= 1
adjust += 1
op1.int *= 10
op1.exp -= 1
if res.exp == 0 and divmod and op2.int > op1.int:
#Solves an error in precision. Same as a previous block.
if res.int >= prec_limit and shouldround:
return context._raise_error(DivisionImpossible)
otherside = Decimal(op1)
frozen = context._ignore_all_flags()
exp = min(self._exp, other._exp)
otherside = otherside._rescale(exp, context=context)
context._regard_flags(*frozen)
return (Decimal(res), otherside)
ans = Decimal(res)
if shouldround:
ans = ans._fix(context)
return ans
def __rdiv__(self, other, context=None):
"""Swaps self/other and returns __div__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__div__(self, context=context)
__rtruediv__ = __rdiv__
def __divmod__(self, other, context=None):
"""
(self // other, self % other)
"""
return self._divide(other, 1, context)
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self and not other:
return context._raise_error(InvalidOperation, 'x % 0')
return self._divide(other, 3, context)[1]
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self and not other:
return context._raise_error(InvalidOperation, 'x % 0')
if context is None:
context = getcontext()
# If DivisionImpossible causes an error, do not leave Rounded/Inexact
# ignored in the calling function.
context = context._shallow_copy()
flags = context._ignore_flags(Rounded, Inexact)
#keep DivisionImpossible flags
(side, r) = self.__divmod__(other, context=context)
if r._isnan():
context._regard_flags(*flags)
return r
context = context._shallow_copy()
rounding = context._set_rounding_decision(NEVER_ROUND)
if other._sign:
comparison = other.__div__(Decimal(-2), context=context)
else:
comparison = other.__div__(Decimal(2), context=context)
context._set_rounding_decision(rounding)
context._regard_flags(*flags)
s1, s2 = r._sign, comparison._sign
r._sign, comparison._sign = 0, 0
if r < comparison:
r._sign, comparison._sign = s1, s2
#Get flags now
self.__divmod__(other, context=context)
return r._fix(context)
r._sign, comparison._sign = s1, s2
rounding = context._set_rounding_decision(NEVER_ROUND)
(side, r) = self.__divmod__(other, context=context)
context._set_rounding_decision(rounding)
if r._isnan():
return r
decrease = not side._iseven()
rounding = context._set_rounding_decision(NEVER_ROUND)
side = side.__abs__(context=context)
context._set_rounding_decision(rounding)
s1, s2 = r._sign, comparison._sign
r._sign, comparison._sign = 0, 0
if r > comparison or decrease and r == comparison:
r._sign, comparison._sign = s1, s2
context.prec += 1
if len(side.__add__(Decimal(1), context=context)._int) >= context.prec:
context.prec -= 1
return context._raise_error(DivisionImpossible)[1]
context.prec -= 1
if self._sign == other._sign:
r = r.__sub__(other, context=context)
else:
r = r.__add__(other, context=context)
else:
r._sign, comparison._sign = s1, s2
return r._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
return self._divide(other, 2, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
return float(str(self))
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
context = getcontext()
return context._raise_error(InvalidContext)
elif self._isinfinity():
raise OverflowError, "Cannot convert infinity to long"
if self._exp >= 0:
s = ''.join(map(str, self._int)) + '0'*self._exp
else:
s = ''.join(map(str, self._int))[:self._exp]
if s == '':
s = '0'
sign = '-'*self._sign
return int(sign + s)
def __long__(self):
"""Converts to a long.
Equivalent to long(int(self))
"""
return long(self.__int__())
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
return self
if context is None:
context = getcontext()
prec = context.prec
ans = self._fixexponents(context)
if len(ans._int) > prec:
ans = ans._round(prec, context=context)
ans = ans._fixexponents(context)
return ans
def _fixexponents(self, context):
"""Fix the exponents and return a copy with the exponent in bounds.
Only call if known to not be a special value.
"""
folddown = context._clamp
Emin = context.Emin
ans = self
ans_adjusted = ans.adjusted()
if ans_adjusted < Emin:
Etiny = context.Etiny()
if ans._exp < Etiny:
if not ans:
ans = Decimal(self)
ans._exp = Etiny
context._raise_error(Clamped)
return ans
ans = ans._rescale(Etiny, context=context)
#It isn't zero, and exp < Emin => subnormal
context._raise_error(Subnormal)
if context.flags[Inexact]:
context._raise_error(Underflow)
else:
if ans:
#Only raise subnormal if non-zero.
context._raise_error(Subnormal)
else:
Etop = context.Etop()
if folddown and ans._exp > Etop:
context._raise_error(Clamped)
ans = ans._rescale(Etop, context=context)
else:
Emax = context.Emax
if ans_adjusted > Emax:
if not ans:
ans = Decimal(self)
ans._exp = Emax
context._raise_error(Clamped)
return ans
context._raise_error(Inexact)
context._raise_error(Rounded)
return context._raise_error(Overflow, 'above Emax', ans._sign)
return ans
def _round(self, prec=None, rounding=None, context=None):
"""Returns a rounded version of self.
You can specify the precision or rounding method. Otherwise, the
context determines it.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity():
return Decimal(self)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if prec is None:
prec = context.prec
if not self:
if prec <= 0:
dig = (0,)
exp = len(self._int) - prec + self._exp
else:
dig = (0,) * prec
exp = len(self._int) + self._exp - prec
ans = Decimal((self._sign, dig, exp))
context._raise_error(Rounded)
return ans
if prec == 0:
temp = Decimal(self)
temp._int = (0,)+temp._int
prec = 1
elif prec < 0:
exp = self._exp + len(self._int) - prec - 1
temp = Decimal( (self._sign, (0, 1), exp))
prec = 1
else:
temp = Decimal(self)
numdigits = len(temp._int)
if prec == numdigits:
return temp
# See if we need to extend precision
expdiff = prec - numdigits
if expdiff > 0:
tmp = list(temp._int)
tmp.extend([0] * expdiff)
ans = Decimal( (temp._sign, tmp, temp._exp - expdiff))
return ans
#OK, but maybe all the lost digits are 0.
lostdigits = self._int[expdiff:]
if lostdigits == (0,) * len(lostdigits):
ans = Decimal( (temp._sign, temp._int[:prec], temp._exp - expdiff))
#Rounded, but not Inexact
context._raise_error(Rounded)
return ans
# Okay, let's round and lose data
this_function = getattr(temp, self._pick_rounding_function[rounding])
#Now we've got the rounding function
if prec != context.prec:
context = context._shallow_copy()
context.prec = prec
ans = this_function(prec, expdiff, context)
context._raise_error(Rounded)
context._raise_error(Inexact, 'Changed in rounding')
return ans
_pick_rounding_function = {}
def _round_down(self, prec, expdiff, context):
"""Also known as round-towards-0, truncate."""
return Decimal( (self._sign, self._int[:prec], self._exp - expdiff) )
def _round_half_up(self, prec, expdiff, context, tmp = None):
"""Rounds 5 up (away from 0)"""
if tmp is None:
tmp = Decimal( (self._sign,self._int[:prec], self._exp - expdiff))
if self._int[prec] >= 5:
tmp = tmp._increment(round=0, context=context)
if len(tmp._int) > prec:
return Decimal( (tmp._sign, tmp._int[:-1], tmp._exp + 1))
return tmp
def _round_half_even(self, prec, expdiff, context):
"""Round 5 to even, rest to nearest."""
tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff))
half = (self._int[prec] == 5)
if half:
for digit in self._int[prec+1:]:
if digit != 0:
half = 0
break
if half:
if self._int[prec-1] & 1 == 0:
return tmp
return self._round_half_up(prec, expdiff, context, tmp)
def _round_half_down(self, prec, expdiff, context):
"""Round 5 down"""
tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff))
half = (self._int[prec] == 5)
if half:
for digit in self._int[prec+1:]:
if digit != 0:
half = 0
break
if half:
return tmp
return self._round_half_up(prec, expdiff, context, tmp)
def _round_up(self, prec, expdiff, context):
"""Rounds away from 0."""
tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff) )
for digit in self._int[prec:]:
if digit != 0:
tmp = tmp._increment(round=1, context=context)
if len(tmp._int) > prec:
return Decimal( (tmp._sign, tmp._int[:-1], tmp._exp + 1))
else:
return tmp
return tmp
def _round_ceiling(self, prec, expdiff, context):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec, expdiff, context)
else:
return self._round_up(prec, expdiff, context)
def _round_floor(self, prec, expdiff, context):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec, expdiff, context)
else:
return self._round_up(prec, expdiff, context)
def __pow__(self, n, modulo = None, context=None):
"""Return self ** n (mod modulo)
If modulo is None (default), don't take it mod modulo.
"""
n = _convert_other(n)
if n is NotImplemented:
return n
if context is None:
context = getcontext()
if self._is_special or n._is_special or n.adjusted() > 8:
#Because the spot << doesn't work with really big exponents
if n._isinfinity() or n.adjusted() > 8:
return context._raise_error(InvalidOperation, 'x ** INF')
ans = self._check_nans(n, context)
if ans:
return ans
if not n._isinteger():
return context._raise_error(InvalidOperation, 'x ** (non-integer)')
if not self and not n:
return context._raise_error(InvalidOperation, '0 ** 0')
if not n:
return Decimal(1)
if self == Decimal(1):
return Decimal(1)
sign = self._sign and not n._iseven()
n = int(n)
if self._isinfinity():
if modulo:
return context._raise_error(InvalidOperation, 'INF % x')
if n > 0:
return Infsign[sign]
return Decimal( (sign, (0,), 0) )
#with ludicrously large exponent, just raise an overflow and return inf.
if not modulo and n > 0 and (self._exp + len(self._int) - 1) * n > context.Emax \
and self:
tmp = Decimal('inf')
tmp._sign = sign
context._raise_error(Rounded)
context._raise_error(Inexact)
context._raise_error(Overflow, 'Big power', sign)
return tmp
elength = len(str(abs(n)))
firstprec = context.prec
if not modulo and firstprec + elength + 1 > DefaultContext.Emax:
return context._raise_error(Overflow, 'Too much precision.', sign)
mul = Decimal(self)
val = Decimal(1)
context = context._shallow_copy()
context.prec = firstprec + elength + 1
if n < 0:
#n is a long now, not Decimal instance
n = -n
mul = Decimal(1).__div__(mul, context=context)
spot = 1
while spot <= n:
spot <<= 1
spot >>= 1
#Spot is the highest power of 2 less than n
while spot:
val = val.__mul__(val, context=context)
if val._isinfinity():
val = Infsign[sign]
break
if spot & n:
val = val.__mul__(mul, context=context)
if modulo is not None:
val = val.__mod__(modulo, context=context)
spot >>= 1
context.prec = firstprec
if context._rounding_decision == ALWAYS_ROUND:
return val._fix(context)
return val
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return Decimal( (dup._sign, (0,), 0) )
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == 0:
exp += 1
end -= 1
return Decimal( (dup._sign, dup._int[:end], exp) )
def quantize(self, exp, rounding=None, context=None, watchexp=1):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return self #if both are inf, it is OK
if context is None:
context = getcontext()
return context._raise_error(InvalidOperation,
'quantize with one INF')
return self._rescale(exp._exp, rounding, context, watchexp)
def same_quantum(self, other):
"""Test whether self and other have the same exponent.
same as self._exp == other._exp, except NaN == sNaN
"""
if self._is_special or other._is_special:
if self._isnan() or other._isnan():
return self._isnan() and other._isnan() and True
if self._isinfinity() or other._isinfinity():
return self._isinfinity() and other._isinfinity() and True
return self._exp == other._exp
def _rescale(self, exp, rounding=None, context=None, watchexp=1):
"""Rescales so that the exponent is exp.
exp = exp to scale to (an integer)
rounding = rounding version
watchexp: if set (default) an error is returned if exp is greater
than Emax or less than Etiny.
"""
if context is None:
context = getcontext()
if self._is_special:
if self._isinfinity():
return context._raise_error(InvalidOperation, 'rescale with an INF')
ans = self._check_nans(context=context)
if ans:
return ans
if watchexp and (context.Emax < exp or context.Etiny() > exp):
return context._raise_error(InvalidOperation, 'rescale(a, INF)')
if not self:
ans = Decimal(self)
ans._int = (0,)
ans._exp = exp
return ans
diff = self._exp - exp
digits = len(self._int) + diff
if watchexp and digits > context.prec:
return context._raise_error(InvalidOperation, 'Rescale > prec')
tmp = Decimal(self)
tmp._int = (0,) + tmp._int
digits += 1
if digits < 0:
tmp._exp = -digits + tmp._exp
tmp._int = (0,1)
digits = 1
tmp = tmp._round(digits, rounding, context=context)
if tmp._int[0] == 0 and len(tmp._int) > 1:
tmp._int = tmp._int[1:]
tmp._exp = exp
tmp_adjusted = tmp.adjusted()
if tmp and tmp_adjusted < context.Emin:
context._raise_error(Subnormal)
elif tmp and tmp_adjusted > context.Emax:
return context._raise_error(InvalidOperation, 'rescale(a, INF)')
return tmp
def to_integral(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._exp >= 0:
return self
if context is None:
context = getcontext()
flags = context._ignore_flags(Rounded, Inexact)
ans = self._rescale(0, rounding, context=context)
context._regard_flags(flags)
return ans
def sqrt(self, context=None):
"""Return the square root of self.
Uses a converging algorithm (Xn+1 = 0.5*(Xn + self / Xn))
Should quadratically approach the right answer.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
#exponent = self._exp / 2, using round_down.
#if self._exp < 0:
# exp = (self._exp+1) // 2
#else:
exp = (self._exp) // 2
if self._sign == 1:
#sqrt(-0) = -0
return Decimal( (1, (0,), exp))
else:
return Decimal( (0, (0,), exp))
if context is None:
context = getcontext()
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
tmp = Decimal(self)
expadd = tmp._exp // 2
if tmp._exp & 1:
tmp._int += (0,)
tmp._exp = 0
else:
tmp._exp = 0
context = context._shallow_copy()
flags = context._ignore_all_flags()
firstprec = context.prec
context.prec = 3
if tmp.adjusted() & 1 == 0:
ans = Decimal( (0, (8,1,9), tmp.adjusted() - 2) )
ans = ans.__add__(tmp.__mul__(Decimal((0, (2,5,9), -2)),
context=context), context=context)
ans._exp -= 1 + tmp.adjusted() // 2
else:
ans = Decimal( (0, (2,5,9), tmp._exp + len(tmp._int)- 3) )
ans = ans.__add__(tmp.__mul__(Decimal((0, (8,1,9), -3)),
context=context), context=context)
ans._exp -= 1 + tmp.adjusted() // 2
#ans is now a linear approximation.
Emax, Emin = context.Emax, context.Emin
context.Emax, context.Emin = DefaultContext.Emax, DefaultContext.Emin
half = Decimal('0.5')
maxp = firstprec + 2
rounding = context._set_rounding(ROUND_HALF_EVEN)
while 1:
context.prec = min(2*context.prec - 2, maxp)
ans = half.__mul__(ans.__add__(tmp.__div__(ans, context=context),
context=context), context=context)
if context.prec == maxp:
break
#round to the answer's precision-- the only error can be 1 ulp.
context.prec = firstprec
prevexp = ans.adjusted()
ans = ans._round(context=context)
#Now, check if the other last digits are better.
context.prec = firstprec + 1
# In case we rounded up another digit and we should actually go lower.
if prevexp != ans.adjusted():
ans._int += (0,)
ans._exp -= 1
lower = ans.__sub__(Decimal((0, (5,), ans._exp-1)), context=context)
context._set_rounding(ROUND_UP)
if lower.__mul__(lower, context=context) > (tmp):
ans = ans.__sub__(Decimal((0, (1,), ans._exp)), context=context)
else:
upper = ans.__add__(Decimal((0, (5,), ans._exp-1)),context=context)
context._set_rounding(ROUND_DOWN)
if upper.__mul__(upper, context=context) < tmp:
ans = ans.__add__(Decimal((0, (1,), ans._exp)),context=context)
ans._exp += expadd
context.prec = firstprec
context.rounding = rounding
ans = ans._fix(context)
rounding = context._set_rounding_decision(NEVER_ROUND)
if not ans.__mul__(ans, context=context) == self:
# Only rounded/inexact if here.
context._regard_flags(flags)
context._raise_error(Rounded)
context._raise_error(Inexact)
else:
#Exact answer, so let's set the exponent right.
#if self._exp < 0:
# exp = (self._exp +1)// 2
#else:
exp = self._exp // 2
context.prec += ans._exp - exp
ans = ans._rescale(exp, context=context)
context.prec = firstprec
context._regard_flags(flags)
context.Emax, context.Emin = Emax, Emin
return ans._fix(context)
def max(self, other, context=None):
"""Returns the larger value.
like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
# if one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn != 2:
return self
if sn == 1 and on != 2:
return other
return self._check_nans(other, context)
ans = self
c = self.__cmp__(other)
if c == 0:
# if both operands are finite and equal in numerical value
# then an ordering is applied:
#
# if the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# if the signs are the same then the exponent is used to select
# the result.
if self._sign != other._sign:
if self._sign:
ans = other
elif self._exp < other._exp and not self._sign:
ans = other
elif self._exp > other._exp and self._sign:
ans = other
elif c == -1:
ans = other
if context is None:
context = getcontext()
if context._rounding_decision == ALWAYS_ROUND:
return ans._fix(context)
return ans
def min(self, other, context=None):
"""Returns the smaller value.
like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
# if one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn != 2:
return self
if sn == 1 and on != 2:
return other
return self._check_nans(other, context)
ans = self
c = self.__cmp__(other)
if c == 0:
# if both operands are finite and equal in numerical value
# then an ordering is applied:
#
# if the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# if the signs are the same then the exponent is used to select
# the result.
if self._sign != other._sign:
if other._sign:
ans = other
elif self._exp > other._exp and not self._sign:
ans = other
elif self._exp < other._exp and self._sign:
ans = other
elif c == 1:
ans = other
if context is None:
context = getcontext()
if context._rounding_decision == ALWAYS_ROUND:
return ans._fix(context)
return ans
def _isinteger(self):
"""Returns whether self is an integer"""
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == (0,)*len(rest)
def _iseven(self):
"""Returns 1 if self is even. Assumes self is an integer."""
if self._exp > 0:
return 1
return self._int[-1+self._exp] & 1 == 0
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
#If NaN or Infinity, self._exp is string
except TypeError:
return 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) == Decimal:
return self # My components are also immutable
return self.__class__(str(self))
##### Context class ###########################################
# get rounding method function:
rounding_functions = [name for name in Decimal.__dict__.keys() if name.startswith('_round_')]
for name in rounding_functions:
#name is like _round_half_even, goes to the global ROUND_HALF_EVEN value.
globalname = name[1:].upper()
val = globals()[globalname]
Decimal._pick_rounding_function[val] = name
del name, val, globalname, rounding_functions
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type. (how you round)
_rounding_decision - ALWAYS_ROUND, NEVER_ROUND -- do you round?
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is incremented.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
_clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None,
traps=None, flags=None,
_rounding_decision=None,
Emin=None, Emax=None,
capitals=None, _clamp=0,
_ignored_flags=None):
if flags is None:
flags = []
if _ignored_flags is None:
_ignored_flags = []
if not isinstance(flags, dict):
flags = dict([(s,s in flags) for s in _signals])
del s
if traps is not None and not isinstance(traps, dict):
traps = dict([(s,s in traps) for s in _signals])
del s
for name, val in locals().items():
if val is None:
setattr(self, name, _copy.copy(getattr(DefaultContext, name)))
else:
setattr(self, name, val)
del self.self
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d' % vars(self))
s.append('flags=[' + ', '.join([f.__name__ for f, v in self.flags.items() if v]) + ']')
s.append('traps=[' + ', '.join([t.__name__ for t, v in self.traps.items() if v]) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.traps, self.flags,
self._rounding_decision, self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.traps.copy(), self.flags.copy(),
self._rounding_decision, self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it increments the flag, then, if the corresponding
trap_enabler is set, it reaises the exception. Otherwise, it returns
the default value after incrementing the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
#Don't touch the flag
return error().handle(self, *args)
self.flags[error] += 1
if not self.traps[error]:
#The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
#self._ignored_flags = []
raise error, explanation
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
def __hash__(self):
"""A Context cannot be hashed."""
# We inherit object.__hash__, so we must deny this explicitly
raise TypeError, "Cannot hash a Context."
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding_decision(self, type):
"""Sets the rounding decision.
Sets the rounding decision, and returns the current (previous)
rounding decision. Often used like:
context = context._shallow_copy()
# That so you don't change the calling context
# if an error occurs in the middle (say DivisionImpossible is raised).
rounding = context._set_rounding_decision(NEVER_ROUND)
instance = instance / Decimal(2)
context._set_rounding_decision(rounding)
This will make it not round for that operation.
"""
rounding = self._rounding_decision
self._rounding_decision = type
return rounding
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context."""
d = Decimal(num, context=self)
return d._fix(self)
#Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal("2.1")
>>> ExtendedContext.abs(Decimal('-100'))
Decimal("100")
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal("101.5")
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal("101.5")
"""
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal("19.00")
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal("1.02E+4")
"""
return a.__add__(b, context=self)
def _apply(self, a):
return str(a._fix(self))
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal("0")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal("0")
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal("1")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal("1")
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal("-1")
"""
return a.compare(b, context=self)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal("0.333333333")
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal("0.666666667")
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal("2.5")
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal("0.1")
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal("1")
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal("4.00")
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal("1.20")
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal("10")
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal("1000")
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal("1.20E+6")
"""
return a.__div__(b, context=self)
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal("0")
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal("3")
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal("3")
"""
return a.__floordiv__(b, context=self)
def divmod(self, a, b):
return a.__divmod__(b, context=self)
def max(self, a,b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal("3")
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal("3")
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal("1")
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal("7")
"""
return a.max(b, context=self)
def min(self, a,b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal("2")
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal("-10")
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal("1.0")
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal("7")
"""
return a.min(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal("-1.3")
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal("1.3")
"""
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together ('long multiplication'),
resulting in a number which may be as long as the sum of the lengths
of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal("3.60")
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal("21")
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal("0.72")
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal("-0.0")
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal("4.28135971E+11")
"""
return a.__mul__(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal("2.1")
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal("-2")
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal("1.2")
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal("-1.2E+2")
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal("1.2E+2")
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal("0")
"""
return a.normalize(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal("1.3")
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal("-1.3")
"""
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
The right-hand operand must be a whole number whose integer part (after
any exponent has been applied) has no more than 9 digits and whose
fractional part (if any) is all zeros before any rounding. The operand
may be positive, negative, or zero; if negative, the absolute value of
the power is used, and the left-hand operand is inverted (divided into
1) before use.
If the increased precision needed for the intermediate calculations
exceeds the capabilities of the implementation then an Invalid operation
condition is raised.
If, when raising to a negative power, an underflow occurs during the
division into 1, the operation is not halted at that point but
continues.
>>> ExtendedContext.power(Decimal('2'), Decimal('3'))
Decimal("8")
>>> ExtendedContext.power(Decimal('2'), Decimal('-3'))
Decimal("0.125")
>>> ExtendedContext.power(Decimal('1.7'), Decimal('8'))
Decimal("69.7575744")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('-2'))
Decimal("0")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('-1'))
Decimal("0")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('0'))
Decimal("1")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('1'))
Decimal("Infinity")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('2'))
Decimal("Infinity")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('-2'))
Decimal("0")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('-1'))
Decimal("-0")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('0'))
Decimal("1")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('1'))
Decimal("-Infinity")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('2'))
Decimal("Infinity")
>>> ExtendedContext.power(Decimal('0'), Decimal('0'))
Decimal("NaN")
"""
return a.__pow__(b, modulo, context=self)
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded) and having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is an
error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal("2.170")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal("2.17")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal("2.2")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal("2")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal("0E+1")
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal("-Infinity")
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal("-0")
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal("-0E+5")
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal("217.0")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal("217")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal("2.2E+2")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal("2E+2")
"""
return a.quantize(b, context=self)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded to
precision digits if necessary. The sign of the result, if non-zero, is
the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal("2.1")
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal("1")
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal("0.2")
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal("0.1")
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal("1.0")
"""
return a.__mod__(b, context=self)
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal("-0.9")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal("-2")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal("1")
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal("0.2")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal("0.1")
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal("-0.3")
"""
return a.remainder_near(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
"""
return a.same_quantum(b)
def sqrt(self, a):
"""Returns the square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal("0")
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal("-0")
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal("0.624499800")
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal("10")
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal("1")
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal("1.0")
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal("1.0")
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal("2.64575131")
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal("3.16227766")
>>> ExtendedContext.prec
9
"""
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal("0.23")
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal("0.00")
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal("-0.77")
"""
return a.__sub__(b, context=self)
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
return a.__str__(context=self)
def to_integral(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral(Decimal('2.1'))
Decimal("2")
>>> ExtendedContext.to_integral(Decimal('100'))
Decimal("100")
>>> ExtendedContext.to_integral(Decimal('100.0'))
Decimal("100")
>>> ExtendedContext.to_integral(Decimal('101.5'))
Decimal("102")
>>> ExtendedContext.to_integral(Decimal('-101.5'))
Decimal("-102")
>>> ExtendedContext.to_integral(Decimal('10E+5'))
Decimal("1.0E+6")
>>> ExtendedContext.to_integral(Decimal('7.89E+77'))
Decimal("7.89E+77")
>>> ExtendedContext.to_integral(Decimal('-Inf'))
Decimal("-Infinity")
"""
return a.to_integral(context=self)
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int or long
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
cum = 0
for digit in value._int:
cum = cum * 10 + digit
self.int = cum
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, shouldround = 0, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
# Yes, the exponent is a long, but the difference between exponents
# must be an int-- otherwise you'd get a big memory problem.
numdigits = int(op1.exp - op2.exp)
if numdigits < 0:
numdigits = -numdigits
tmp = op2
other = op1
else:
tmp = op1
other = op2
if shouldround and numdigits > prec + 1:
# Big difference in exponents - check the adjusted exponents
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
if numdigits > (other_len + prec + 1 - tmp_len):
# If the difference in adjusted exps is > prec+1, we know
# other is insignificant, so might as well put a 1 after the precision.
# (since this is only for addition.) Also stops use of massive longs.
extend = prec + 2 - tmp_len
if extend <= 0:
extend = 1
tmp.int *= 10 ** extend
tmp.exp -= extend
other.int = 1
other.exp = tmp.exp
return op1, op2
tmp.int *= 10 ** numdigits
tmp.exp -= numdigits
return op1, op2
def _adjust_coefficients(op1, op2):
"""Adjust op1, op2 so that op2.int * 10 > op1.int >= op2.int.
Returns the adjusted op1, op2 as well as the change in op1.exp-op2.exp.
Used on _WorkRep instances during division.
"""
adjust = 0
#If op1 is smaller, make it larger
while op2.int > op1.int:
op1.int *= 10
op1.exp -= 1
adjust += 1
#If op2 is too small, make it larger
while op1.int >= (10 * op2.int):
op2.int *= 10
op2.exp -= 1
adjust -= 1
return op1, op2, adjust
##### Helper Functions ########################################
def _convert_other(other):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
"""
if isinstance(other, Decimal):
return other
if isinstance(other, (int, long)):
return Decimal(other)
return NotImplemented
_infinity_map = {
'inf' : 1,
'infinity' : 1,
'+inf' : 1,
'+infinity' : 1,
'-inf' : -1,
'-infinity' : -1
}
def _isinfinity(num):
"""Determines whether a string or float is infinity.
+1 for negative infinity; 0 for finite ; +1 for positive infinity
"""
num = str(num).lower()
return _infinity_map.get(num, 0)
def _isnan(num):
"""Determines whether a string or float is NaN
(1, sign, diagnostic info as string) => NaN
(2, sign, diagnostic info as string) => sNaN
0 => not a NaN
"""
num = str(num).lower()
if not num:
return 0
#get the sign, get rid of trailing [+-]
sign = 0
if num[0] == '+':
num = num[1:]
elif num[0] == '-': #elif avoids '+-nan'
num = num[1:]
sign = 1
if num.startswith('nan'):
if len(num) > 3 and not num[3:].isdigit(): #diagnostic info
return 0
return (1, sign, num[3:].lstrip('0'))
if num.startswith('snan'):
if len(num) > 4 and not num[4:].isdigit():
return 0
return (2, sign, num[4:].lstrip('0'))
return 0
##### Setup Specific Contexts ################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
_rounding_decision=ALWAYS_ROUND,
Emax=999999999,
Emin=-999999999,
capitals=1
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### Useful Constants (internal use only) ####################
#Reusable defaults
Inf = Decimal('Inf')
negInf = Decimal('-Inf')
#Infsign[sign] is infinity w/ that sign
Infsign = (Inf, negInf)
NaN = Decimal('NaN')
##### crud for parsing strings #################################
import re
# There's an optional sign at the start, and an optional exponent
# at the end. The exponent has an optional sign and at least one
# digit. In between, must have either at least one digit followed
# by an optional fraction, or a decimal point followed by at least
# one digit. Yuck.
_parser = re.compile(r"""
# \s*
(?P<sign>[-+])?
(
(?P<int>\d+) (\. (?P<frac>\d*))?
|
\. (?P<onlyfrac>\d+)
)
([eE](?P<exp>[-+]? \d+))?
# \s*
$
""", re.VERBOSE).match #Uncomment the \s* to allow leading or trailing spaces.
del re
# return sign, n, p s.t. float string value == -1**sign * n * 10**p exactly
def _string2exact(s):
m = _parser(s)
if m is None:
raise ValueError("invalid literal for Decimal: %r" % s)
if m.group('sign') == "-":
sign = 1
else:
sign = 0
exp = m.group('exp')
if exp is None:
exp = 0
else:
exp = int(exp)
intpart = m.group('int')
if intpart is None:
intpart = ""
fracpart = m.group('onlyfrac')
else:
fracpart = m.group('frac')
if fracpart is None:
fracpart = ""
exp -= len(fracpart)
mantissa = intpart + fracpart
tmp = map(int, mantissa)
backup = tmp
while tmp and tmp[0] == 0:
del tmp[0]
# It's a zero
if not tmp:
if backup:
return (sign, tuple(backup), exp)
return (sign, (0,), exp)
mantissa = tuple(tmp)
return (sign, mantissa, exp)
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| lgpl-3.0 |
Dugy/wesnoth-names | data/tools/unit_tree/html_output.py | 4 | 44552 | #encoding: utf8
import os, gettext, time, copy, sys, re
import traceback
import unit_tree.helpers as helpers
import wesnoth.wmlparser3 as wmlparser3
pics_location = "../../pics"
html_header = '''
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link rel="stylesheet" href=\"%(path)sstyle.css\" type=\"text/css\"/>
<script type="text/javascript" src="%(path)s/menu.js"></script>
<title>%(title)s</title>
</head>
<body><div>'''.strip()
top_bar = '''
<div class="header">
<a href="http://www.wesnoth.org">
<img src="%(path)swesnoth-logo.jpg" alt="Wesnoth logo"/>
</a>
</div>
<div class="topnav">
<a href="%(path)sindex.html">Wesnoth Units database</a>
</div>'''.strip()
html_footer = '''
<div id="footer">
<p>%(generation_note)s</p>
<p><a href="http://wiki.wesnoth.org/Site_Map">Site map</a></p>
<p><a href="http://www.wesnoth.org/wiki/Wesnoth:Copyrights">Copyright</a> © 2003–2016 The Battle for Wesnoth</p>
<p>Supported by <a href="http://www.jexiste.fr/">Jexiste</a></p>
</div>
</div>
</body></html>
'''.strip()
all_written_html_files = []
error_only_once = {}
def error_message(message):
if message in error_only_once: return
error_only_once[message] = 1
write_error(message)
helpers.error_message = error_message
def reset_errors():
error_only_once = {}
class MyFile:
"""
Python 2 is a bit weird with encodings, really should switch this to
Python 3.
"""
def __init__(self, filename, mode):
self.filename = filename
self.f = open(filename, mode + "b")
def write(self, x):
self.f.write(x.encode("utf8"))
def close(self):
self.f.close()
class Translation:
def __init__(self, localedir, langcode):
self.catalog = {}
self.localedir = localedir
self.langcode = langcode
class Dummy:
def gettext(self, x):
if not x: return ""
caret = x.find("^")
if caret < 0: return x
return x[caret + 1:]
self.dummy = Dummy()
def translate(self, string, textdomain):
if textdomain not in self.catalog:
try:
self.catalog[textdomain] = gettext.translation(
textdomain, self.localedir, [self.langcode])
self.catalog[textdomain].add_fallback(self.dummy)
except IOError:
self.catalog[textdomain] = self.dummy
except AttributeError:
self.catalog[textdomain] = self.dummy
except IndexError:
# not sure why, but this happens within the
# gettext.translation call sometimes
self.catalog[textdomain] = self.dummy
r = self.catalog[textdomain].gettext(string)
return r
class GroupByRace:
def __init__(self, wesnoth, campaign):
self.wesnoth = wesnoth
self.campaign = campaign
def unitfilter(self, unit):
if not self.campaign: return True
return unit.campaigns and self.campaign == unit.campaigns[0]
def groups(self, unit):
return [T(unit.race, "plural_name")]
def group_name(self, group):
if not group: return "None"
return group
class GroupByNothing:
def __init__(self):
pass
def unitfilter(self, unit):
return True
def groups(self, unit):
return ["units"]
def group_name(self, group):
return "units"
class GroupByFaction:
def __init__(self, wesnoth, era):
self.wesnoth = wesnoth
self.era = era
def unitfilter(self, unit):
return self.era in unit.eras
def groups(self, unit):
return [x for x in unit.factions if x[0] == self.era]
def group_name(self, group):
era = self.wesnoth.era_lookup[group[0]]
if group[1]:
faction = era.faction_lookup[group[1]]
name = T(faction, "name")
name = name[name.rfind("=") + 1:]
else:
name = "factionless"
return name
global_htmlout = None
def T(tag, att):
if not tag: return "none"
return tag.get_text_val(att, translation = global_htmlout.translate)
class HTMLOutput:
def __init__(self, isocode, output, addon, campaign, is_era, wesnoth, verbose = False):
global global_htmlout
self.output = output
self.addon = addon
self.campaign = campaign
self.is_era = is_era
self.verbose = verbose
self.target = "index.html"
self.wesnoth = wesnoth
self.forest = None
self.translation = Translation(options.transdir, isocode)
self.isocode = isocode
global_htmlout = self
def translate(self, string, domain):
return self.translation.translate(string, domain)
def analyze_units(self, grouper, add_parents):
"""
This takes all units belonging to a campaign, then groups them either
by race or faction, and creates an advancements tree out of it.
"""
# Build an advancement tree forest of all units.
forest = self.forest = helpers.UnitForest()
units_added = {}
for uid, u in list(self.wesnoth.unit_lookup.items()):
if u.hidden: continue
if grouper.unitfilter(u):
forest.add_node(helpers.UnitNode(u))
units_added[uid] = u
#print(" %d/%d units" % (len(units_added), len(self.wesnoth.unit_lookup)))
# Always add any child units, even if they have been filtered out..
while units_added:
new_units_added = {}
for uid, u in list(units_added.items()):
for auid in u.advance:
if not auid in units_added:
try:
au = self.wesnoth.unit_lookup[auid]
except KeyError:
error_message(
"Warning: Unit %s not found as advancement of %s\n" %
(auid, repr(uid)))
continue
forest.add_node(helpers.UnitNode(au))
new_units_added[auid] = au
units_added = new_units_added
if add_parents:
# Also add parent units
added = True
while added:
added = False
for uid, u in list(self.wesnoth.unit_lookup.items()):
if uid in forest.lookup: continue
for auid in u.advance:
if auid in forest.lookup:
forest.add_node(helpers.UnitNode(u))
added = True
break
forest.update()
# Partition trees by race/faction of first unit.
groups = {}
breadth = 0
for tree in list(forest.trees.values()):
u = tree.unit
ugroups = grouper.groups(u)
for group in ugroups:
groups[group] = groups.get(group, []) + [tree]
breadth += tree.breadth
thelist = list(groups.keys())
thelist.sort(key = lambda x: grouper.group_name(x))
rows_count = breadth + len(thelist)
# Create empty grid.
rows = []
for j in range(rows_count):
column = []
for i in range(6):
column.append((1, 1, None))
rows.append(column)
# Sort advancement trees by name of first unit and place into the grid.
def by_name(t):
x = T(t.unit, "name")
if x is None: return ""
return x
def grid_place(nodes, x):
nodes.sort(key = by_name)
for node in nodes:
level = node.unit.level
if level < 0: level = 0
if level > 5: level = 5
rows[x][level] = (1, node.breadth, node)
for i in range(1, node.breadth):
rows[x + i][level] = (0, 0, node)
grid_place(node.children, x)
x += node.breadth
return x
x = 0
for group in thelist:
node = helpers.GroupNode(group)
node.name = grouper.group_name(group)
rows[x][0] = (6, 1, node)
for i in range(1, 6):
rows[x][i] = (0, 0, None)
nodes = groups[group]
x += 1
x = grid_place(nodes, x)
self.unitgrid = rows
return len(forest.lookup)
def write_navbar(self, report_type):
def write(x): self.output.write(x)
all_written_html_files.append((self.isocode, self.output.filename))
languages = self.wesnoth.languages_found
langlist = list(languages.keys())
langlist.sort()
write(top_bar % {"path" : "../../"})
write("""
<div class="navbar">
""")
write("<ul class=\"navbar\">")
def abbrev(name):
abbrev = name[0]
word_seperators = [" ", "_", "+", "(", ")"]
for i in range(1, len(name)):
if name[i] in ["+", "(", ")"] or name[i - 1] in word_seperators and name[i] not in word_seperators:
abbrev += name[i]
return abbrev
def add_menu(id, name, class2=""):
write("""<li class="popuptrigger"
onclick="toggle_menu(this, '""" + id + """', 2)"
onmouseover="toggle_menu(this, '""" + id + """', 1)"
onmouseout="toggle_menu(this, '""" + id + """', 0)">""")
write('<a class="' + class2 + '">' + name + "</a>")
write('<div class="popupmenu" id="' + id + '">')
write("<div>" + name + "</div>")
# We may not have all the required info yet so defer writing the
# campaigns/eras navigation.
# Campaigns
x = self.translate("addon_type^Campaign", "wesnoth")
add_menu("campaigns_menu", x)
write("PLACE CAMPAIGNS HERE\n")
write("</div></li>\n")
# Eras
x = self.translate("Era", "wesnoth")
add_menu("eras_menu", x)
write("PLACE ERAS HERE\n")
write("</div></li>\n")
# Races / Factions
target = self.target
if self.campaign == "units":
target = "mainline.html"
if not self.is_era:
x = self.translate("Race", "wesnoth-lib")
add_menu("races_menu", x)
write("<a href=\"mainline.html\">%s</a><br/>\n" % (
self.translate("all", "wesnoth-editor")))
r = {}, {}
for u in list(self.wesnoth.unit_lookup.values()):
race = u.race
racename = T(race, "plural_name")
m = 1
if u:
m = 0
rname = race.get_text_val("id") if race else "none"
if not rname:
rname = "none"
r[m][racename] = rname
racenames = sorted(r[0].items())
if list(r[1].items()):
racenames += [("-", "-")] + sorted(r[1].items())
for racename, rid in racenames:
if racename == "-":
write(" -<br/>")
else:
write(" <a href=\"%s#%s\">%s</a><br/>" % (
target, racename, racename))
write("</div></li>\n")
else:
x = self.translate("Factions", "wesnoth-help")
add_menu("races_menu", x)
for row in self.unitgrid:
for column in range(6):
hspan, vspan, un = row[column]
if not un: continue
if isinstance(un, helpers.GroupNode):
html = "../%s/%s.html" % (
self.isocode, self.campaign)
write(" <a href=\"%s#%s\">%s</a><br/>" % (
html, un.name, un.name))
write("</div></li>\n")
# Add entries for the races also to the navbar itself.
if not self.is_era:
class Entry: pass
races = {}
for uid, u in list(self.wesnoth.unit_lookup.items()):
if self.campaign != "units":
if self.campaign not in u.campaigns: continue
if u.race:
racename = T(u.race, "plural_name")
else:
racename = "none"
runits = races.get(racename, [])
runits.append(uid)
races[racename] = runits
racelist = sorted(races.keys())
got_menu = False
menuid = 0
for r in racelist:
if not r: continue
if got_menu: write("</div></li>\n")
add_menu("units_menu" + str(menuid), r, "unitmenu")
menuid += 1
got_menu = True
c = self.campaign
if c == "units": c = "mainline"
write("<a href=\"%s#%s\">%s</a><br/>" % (
target, r, r))
for uid in races[r]:
un = self.wesnoth.unit_lookup[uid]
if un.hidden: continue
if "mainline" in un.campaigns: addon = "mainline"
else: addon = self.addon
link = "../../%s/%s/%s.html" % (addon, self.isocode, uid)
name = self.wesnoth.get_unit_value(un,
"name", translation=self.translation.translate)
if not name:
error_message("Warning: Unit uid=" + uid + " has no name.\n")
name = uid
write("<a href=\"" + link + "\">" + name + "</a><br />")
if got_menu: write("</div></li>\n")
# Languages
x = self.translate("Language", "wesnoth")
add_menu("languages_menu", x)
col = 0
maxcol = len(langlist) - 1
write("<table>")
write("<tr>")
for lang in langlist:
col += 1
write("<td>")
labb = lang
#underscore = labb.find("_")
#if underscore > 0: labb = labb[:underscore]
if self.addon == "mainline":
write(" <a title=\"%s\" href=\"../%s/%s\">%s</a><br/>\n" % (
languages[lang], lang, self.target,
labb))
else:
write(" <a title=\"%s\" href=\"../%s/%s\">%s</a><br/>\n" % (
languages[lang], lang, "mainline.html",
labb))
write("</td>")
if col % 5 == 0:
if col < maxcol: write("</tr><tr>")
write("</tr>")
write("</table>")
write("</div></li>\n")
write("<li><div> </div></li>")
write("<li><div> </div></li>")
write('<li><a class="unitmenu" href="../../overview.html">Overview</a></li>')
write("</ul>\n")
write("</div>\n")
def pic(self, u, x, recursion = 0):
if recursion >= 4:
error_message(
"Warning: Cannot find image for unit %s(%s).\n" % (
u.get_text_val("id"), x.name.decode("utf8")))
return None, None
image = self.wesnoth.get_unit_value(x, "image")
portrait = x.get_all(tag="portrait")
if not portrait:
bu = self.wesnoth.get_base_unit(u)
if bu:
portrait = bu.get_all(tag="portrait")
if portrait:
portrait = portrait[0].get_text_val("image")
if not image:
if x.name == b"female":
baseunit = self.wesnoth.get_base_unit(u)
if baseunit:
female = baseunit.get_all(tag="female")
return self.pic(u, female[0], recursion = recursion + 1)
else:
return self.pic(u, u, recursion = recursion + 1)
error_message(
"Warning: Missing image for unit %s(%s).\n" % (
u.get_text_val("id"), x.name.decode("utf8")))
return None, None
icpic = image_collector.add_image_check(self.addon, image)
if not icpic.ipath:
error_message("Warning: No picture %s for unit %s.\n" %
(image, u.get_text_val("id")))
picname = icpic.id_name
image = os.path.join(pics_location, picname)
if portrait:
picname = image_collector.add_image(self.addon, portrait,
no_tc=True)
portrait = os.path.join(pics_location, picname)
return image, portrait
def get_abilities(self, u):
anames = []
already = {}
for abilities in u.get_all(tag="abilities"):
try: c = abilities.get_all()
except AttributeError: c = []
for ability in c:
try:
id = ability.get_text_val("id")
except AttributeError as e:
error_message("Error: Ignoring ability " + ability.debug())
continue
if id in already: continue
already[id] = True
name = T(ability, "name")
if not name: name = id
if not name: name = ability.name.decode("utf8")
anames.append(name)
return anames
def get_recursive_attacks(self, this_unit):
def copy_attributes(copy_from, copy_to):
for c in copy_from.data:
if isinstance(c, wmlparser3.AttributeNode):
copy_to.data.append(c)
# Use attacks of base_units as base, if we have one.
base_unit = self.wesnoth.get_base_unit(this_unit)
attacks = []
if base_unit:
attacks = copy.deepcopy(self.get_recursive_attacks(base_unit))
base_attacks_count = len(attacks)
for i, attack in enumerate(this_unit.get_all(tag="attack")):
# Attack merging is order based.
if i < base_attacks_count:
copy_attributes(attack, attacks[i])
else:
attacks.append(attack)
return attacks
def write_units(self):
def write(x): self.output.write(x)
def _(x, c="wesnoth"): return self.translate(x, c)
rows = self.unitgrid
write("<table class=\"units\">\n")
write("<colgroup>")
for i in range(6):
write("<col class=\"col%d\" />" % i)
write("</colgroup>")
pic = image_collector.add_image("general",
"../../../images/misc/leader-crown.png", no_tc=True)
crownimage = os.path.join(pics_location, pic)
ms = None
for row in range(len(rows)):
write("<tr>\n")
for column in range(6):
hspan, vspan, un = rows[row][column]
if vspan:
attributes = ""
if hspan == 1 and vspan == 1:
pass
elif hspan == 1:
attributes += " rowspan=\"%d\"" % vspan
elif vspan == 1:
attributes += " colspan=\"%d\"" % hspan
if un and isinstance(un, helpers.GroupNode):
# Find the current multiplayer side so we can show the
# little crowns..
ms = None
if self.is_era:
try:
eid, fid = un.data
era = self.wesnoth.era_lookup[eid]
if fid:
ms = era.faction_lookup[fid]
except TypeError:
pass
racename = un.name
attributes += " class=\"raceheader\""
write("<td%s>" % attributes)
write("<a name=\"%s\">%s</a>" % (racename, racename))
write("</td>\n")
elif un:
u = un.unit
attributes += " class=\"unitcell\""
write("<td%s>" % attributes)
uid = u.get_text_val("id")
def uval(name):
return self.wesnoth.get_unit_value(u, name,
translation=self.translation.translate)
name = uval("name")
cost = uval("cost")
hp = uval("hitpoints")
mp = uval("movement")
xp = uval("experience")
level = uval("level")
crown = ""
if ms:
if un.id in ms.units:
crown = " ♟"
if un.id in ms.is_leader:
crown = " ♚"
uaddon = "mainline"
if "mainline" not in u.campaigns: uaddon = self.addon
link = "../../%s/%s/%s.html" % (uaddon, self.isocode, uid)
write("<div class=\"i\"><a href=\"%s\" title=\"id=%s\">%s</a>" % (
link, uid, "i"))
write("</div>")
write("<div class=\"l\">L%s%s</div>" % (level, crown))
write("<a href=\"%s\">%s</a><br/>" % (link, name))
write('<div class="pic">')
image, portrait = self.pic(u, u)
write('<a href=\"%s\">' % link)
if crown == " ♚":
write('<div style="background: url(%s)">' % image)
write('<img src="%s" alt="(image)" />' % crownimage)
write("</div>")
else:
write('<img src="%s" alt="(image)" />' % image)
write('</a>\n</div>\n')
write("<div class=\"attributes\">")
write("%s%s<br />" % (_("Cost: ", "wesnoth-help"), cost))
write("%s%s<br />" % (_("HP: "), hp))
write("%s%s<br />" % (_("MP: "), mp))
write("%s%s<br />" % (_("XP: "), xp))
# Write info about abilities.
anames = self.get_abilities(u)
if anames:
write("\n<div style=\"clear:both\">")
write(", ".join(anames))
write("</div>")
# Write info about attacks.
write("\n<div style=\"clear:both\">")
attacks = self.get_recursive_attacks(u)
for attack in attacks:
n = T(attack, "number")
x = T(attack, "damage")
x = "%s - %s" % (x, n)
write("%s " % x)
r = T(attack, "range")
t = T(attack, "type")
write("%s (%s)" % (_(r), _(t)))
s = []
specials = attack.get_all(tag="specials")
if specials:
for special in specials[0].get_all(tag=""):
sname = T(special, "name")
if sname:
s.append(sname)
s = ", ".join(s)
if s: write(" (%s)" % s)
write("<br />")
write("</div>")
write("</div>")
write("</td>\n")
else:
write("<td class=\"empty\"></td>")
write("</tr>\n")
write("</table>\n")
def write_units_tree(self, grouper, title, add_parents):
self.output.write(html_header % {"path": "../../",
"title": title})
n = self.analyze_units(grouper, add_parents)
self.write_navbar("units_tree")
self.output.write("<div class=\"main\">")
self.output.write("<h1>%s</h1>" % title)
self.write_units()
self.output.write('<div id="clear" style="clear:both;"></div>')
self.output.write("</div>")
self.output.write(html_footer % {
"generation_note": "generated on " + time.ctime()})
return n
def write_unit_report(self, output, unit):
def write(x): self.output.write(x)
def _(x, c="wesnoth"): return self.translate(x, c)
def find_attr(what, key):
if unit.movetype:
mtx = unit.movetype.get_all(tag=what)
mty = None
if mtx:
mty = mtx[0].get_text_val(key)
x = unit.get_all(tag=what)
y = None
if x:
y = x[0].get_text_val(key,
translation=self.translation.translate)
if y:
return True, y
if unit.movetype and mty != None:
return False, mty
return False, "-"
def uval(name):
return self.wesnoth.get_unit_value(unit, name,
translation=self.translation.translate)
# Write unit name, picture and description.
uid = unit.get_text_val("id")
uname = uval("name")
display_name = uname
self.output = output
write(html_header % {"path": "../../",
"title": display_name})
self.write_navbar("unit_report")
self.output.write("<div class=\"main\">")
female = unit.get_all(tag="female")
if female:
fname = T(female[0], "name")
if fname and fname != uname:
display_name += "<br/>" + fname
write('<div class="unit-columns">')
write('<div class="unit-column-left">')
write("<h1>%s</h1>\n" % display_name)
write('<div class="pic">')
if female:
mimage, portrait = self.pic(unit, unit)
fimage, fportrait = self.pic(unit, female[0])
if not fimage: fimage = mimage
if not fportrait: fportrait = portrait
write('<img src="%s" alt="(image)" />\n' % mimage)
write('<img src="%s" alt="(image)" />\n' % fimage)
image = mimage
else:
image, portrait = self.pic(unit, unit)
write('<img src="%s" alt="(image)" />\n' % image)
write('</div>\n')
description = uval("description")
# TODO: what is unit_description?
if not description: description = uval("unit_description")
if not description: description = "-"
write("<p>%s</p>\n" % re.sub("\n", "\n<br />", description))
# Base info.
hp = uval("hitpoints")
mp = uval("movement")
xp = uval("experience")
vision = uval("vision")
jamming = uval("jamming")
level = uval("level")
alignment = uval("alignment")
write("<h2>Information</h2>\n")
write("<table class=\"unitinfo\">\n")
write("<tr>\n")
write("<td>%s" % _("Advances from: ", "wesnoth-help"))
write("</td><td>\n")
for pid in self.forest.get_parents(uid):
punit = self.wesnoth.unit_lookup[pid]
if "mainline" in unit.campaigns and "mainline" not in punit.campaigns:
continue
if "mainline" in unit.campaigns: addon = "mainline"
else: addon = self.addon
link = "../../%s/%s/%s.html" % (addon, self.isocode, pid)
name = self.wesnoth.get_unit_value(punit, "name",
translation=self.translation.translate)
write("\n<a href=\"%s\">%s</a>" % (link, name))
write("</td>\n")
write("</tr><tr>\n")
write("<td>%s" % _("Advances to: ", "wesnoth-help"))
write("</td><td>\n")
for cid in self.forest.get_children(uid):
try:
cunit = self.wesnoth.unit_lookup[cid]
if "mainline" in cunit.campaigns: addon = "mainline"
else: addon = self.addon
link = "../../%s/%s/%s.html" % (addon, self.isocode, cid)
if "mainline" in unit.campaigns and "mainline" not in cunit.campaigns:
continue
name = self.wesnoth.get_unit_value(cunit, "name",
translation=self.translation.translate)
except KeyError:
error_message("Warning: Unit %s not found.\n" % cid)
name = cid
if "mainline" in unit.campaigns: continue
link = self.target
write("\n<a href=\"%s\">%s</a>" % (link, name))
write("</td>\n")
write("</tr>\n")
for val, text in [
("cost", _("Cost: ", "wesnoth-help")),
("hitpoints", _("HP: ")),
("movement", _("Movement", "wesnoth-help") + ": "),
("vision", _("Vision", "wesnoth-help") + ": "),
("jamming", _("Jamming", "wesnoth-help") + ":"),
("experience", _("XP: ")),
("level", _("Level") + ": "),
("alignment", _("Alignment: ")),
("id", "ID")]:
x = uval(val)
if not x and val in ("jamming", "vision"): continue
if val == "alignment": x = _(x)
write("<tr>\n")
write("<td>%s</td>" % text)
write("<td class=\"val\">%s</td>" % x)
write("</tr>\n")
# Write info about abilities.
anames = self.get_abilities(unit)
write("<tr>\n")
write("<td>%s</td>" % _("Abilities: ", "wesnoth-help"))
write("<td class=\"val\">" + (", ".join(anames)) + "</td>")
write("</tr>\n")
write("</table>\n")
# Write info about attacks.
write("<h2>" + _("unit help^Attacks", "wesnoth-help") + " <small>(damage - count)</small></h2> \n")
write("<table class=\"unitinfo attacks\">\n")
write('<colgroup><col class="col0" /><col class="col1" /><col class="col2" /><col class="col3" /><col class="col4" /></colgroup>')
attacks = self.get_recursive_attacks(unit)
for attack in attacks:
write("<tr>")
aid = attack.get_text_val("name")
aname = T(attack, "description")
icon = attack.get_text_val("icon")
if not icon:
icon = "attacks/%s.png" % aid
image_add = image_collector.add_image_check(self.addon,
icon, no_tc = True)
if not image_add.ipath:
error_message("Error: No attack icon '%s' found for '%s'.\n" % (
icon, uid))
icon = os.path.join(pics_location, "unit$elves-wood$shaman.png")
else:
icon = os.path.join(pics_location, image_add.id_name)
write("<td><img src=\"%s\" alt=\"(image)\"/></td>" % icon)
write("<td><b>%s</b>" % aname)
r = T(attack, "range")
write("<br/>%s</td>" % _(r))
n = attack.get_text_val("number")
x = attack.get_text_val("damage")
x = "%s - %s" % (x, n)
write("<td><i>%s</i>" % x)
t = T(attack, "type")
write("<br/>%s</td>" % _(t))
s = []
specials = attack.get_all(tag="specials")
if specials:
for special in specials[0].get_all(tag=""):
sname = T(special, "name")
if sname:
s.append(sname)
else:
error_message(
"Warning: Weapon special %s has no name for %s.\n" % (
special.name.decode("utf8"), uid))
s = "<br/>".join(s)
write("<td>%s</td>" % s)
write("</tr>")
write("</table>\n")
# Write info about resistances.
resistances = [
("blade", "attacks/sword-human.png"),
("pierce", "attacks/spear.png"),
("impact", "attacks/club.png"),
("fire", "attacks/fireball.png"),
("cold", "attacks/iceball.png"),
("arcane", "attacks/faerie-fire.png")]
write("<h2>%s</h2>\n" % _("Resistances: ").strip(" :"))
write("<table class=\"unitinfo resistances\">\n")
write('<colgroup><col class="col0" /><col class="col1" /><col class="col2" /><col class="col3" /><col class="col4" /><col class="col5" /><col class="col6" /><col class="col7" /></colgroup>')
write("<tr>\n")
write("</tr>\n")
row = 0
for rid, ricon in resistances:
special, r = find_attr("resistance", rid)
if r == "-": r = 100
try: r = "<i>%d%%</i>" % (100 - int(r))
except ValueError:
error_message("Warning: Invalid resistance %s for %s.\n" % (
r, uid))
rcell = "td"
if special: rcell += ' class="special"'
if row % 2 == 0: write("<tr>\n")
else: write("<td></td>")
picname = image_collector.add_image(self.addon, ricon,
no_tc = True)
icon = os.path.join(pics_location, picname)
write("<td><img src=\"%s\" alt=\"(icon)\" /></td>\n" % (icon, ))
write("<th>%s</th><td class=\"num\">%s</td>\n" % (_(rid), r))
if row % 2 == 1: write("</tr>\n")
row += 1
write("</table>\n")
# end left column
write('</div>')
write('<div class="unit-column-right">')
for si in range(2):
if si and not female: break
if si:
sportrait = fportrait
simage = fimage
else:
simage = image
sportrait = portrait
style = "background-image: url(%s);" % simage
write('<div class="portrait">')
write('<div style="%s"> </div>' % style)
if portrait:
write('<img src="%s" alt="(portrait)" />\n' % sportrait)
write('</div>')
# Write info about movement costs and terrain defense.
write("<h2>" + _("Terrain", "wesnoth-help") + "</h2>\n")
write("<table class=\"unitinfo terrain\">\n")
write('<colgroup><col class="col0" /><col class="col1" /><col class="col2" /><col class="col3" /><col class="col4" /></colgroup>')
write("<tr><th colspan=\"2\"></th><th colspan=\"2\">%s</th></tr>\n" % (
_("Movement Cost", "wesnoth-help")))
write("<tr><th colspan=\"2\">%s</th><th></th><th class=\"numheader\">%s</th></tr>\n" % (
_("Terrain", "wesnoth-help"), _("Defense", "wesnoth-help")))
terrains = self.wesnoth.terrain_lookup
terrainlist = []
already = {}
for tstring, t in list(terrains.items()):
tid = t.get_text_val("id")
if tid in ["off_map", "off_map2", "fog", "shroud", "impassable",
"void", "rails"]: continue
if t.get_all(att="aliasof"): continue
if tid in already: continue
already[tid] = 1
name = T(t, "name")
ticon = t.get_text_val("symbol_image")
if not ticon:
ticon = t.get_text_val("icon_image")
# Use nice images for known mainline terrain types
if tid == "fungus": ticon = "forest/mushrooms-tile"
elif tid == "cave": ticon = "cave/floor6"
elif tid == "sand": ticon = "sand/beach"
elif tid == "reef": ticon = "water/reef-tropical-tile"
elif tid == "hills": ticon = "hills/regular"
elif tid == "swamp_water": ticon = "swamp/water-tile"
elif tid == "shallow_water": ticon = "water/coast-tile"
elif tid == "castle": ticon = "castle/castle-tile"
elif tid == "mountains": ticon = "mountains/snow-tile"
elif tid == "deep_water": ticon = "water/ocean-tile"
elif tid == "flat": ticon = "grass/green-symbol"
elif tid == "forest": ticon = "forest/pine-tile"
elif tid == "frozen": ticon = "frozen/ice"
elif tid == "village": ticon = "village/human-tile"
elif tid == "impassable": ticon = "void/void"
elif tid == "unwalkable": ticon = "unwalkable/lava"
elif tid == "rails": ticon = "misc/rails-ne-sw"
if ticon:
terrainlist.append((name, tid, ticon))
else:
error_message("Terrain " + tid + " has no symbol_image\n")
terrainlist.sort()
for tname, tid, ticon in terrainlist:
not_from_race, c = find_attr("movement_costs", tid)
ccell = "td"
if c == "99": ccell += ' class="grayed"'
dcell = "td"
not_from_race, d = find_attr("defense", tid)
if d == "-": d = 100
try:
d = int(d)
# negative defense has something to do with best defense if
# there's multiple terrain types
if d < 0: d = -d
d = "%d%%" % (100 - d)
except ValueError:
error_message("Warning: Invalid defense %s for %s.\n" % (
d, uid))
write("<tr>\n")
picname = image_collector.add_image(self.addon,
"terrain/" + ticon + ".png", no_tc=True)
icon = os.path.join(pics_location, picname)
write("<td><img src=\"%s\" alt=\"(icon)\" /></td>\n" % (icon, ))
write("<td>%s</td><%s><i>%s</i></td><%s class=\"num\"><i>%s</i></td>\n" % (
tname, ccell, c, dcell, d))
write("</tr>\n")
write("</table>\n")
write('</div>') # right column
write('</div>') # columns parent
self.output.write('<div id="clear" style="clear:both;"></div>')
write('</div>') # main
self.output.write(html_footer % {
"generation_note": "generated on " + time.ctime()})
def generate_campaign_report(addon, isocode, campaign, wesnoth):
if campaign:
cid = campaign.get_text_val("id")
else:
cid = "mainline"
if not cid: cid = addon + "_" + campaign.get_text_val("define")
print(("campaign " + addon + " " + cid + " " + isocode))
path = os.path.join(options.output, addon, isocode)
if not os.path.isdir(path): os.mkdir(path)
output = MyFile(os.path.join(path, "%s.html" % cid), "w")
html = HTMLOutput(isocode, output, addon, cid, False, wesnoth)
html.target = "%s.html" % cid
grouper = GroupByRace(wesnoth, cid)
if campaign:
title = campaign.get_text_val("name", translation = html.translate)
else:
title = html.translate("Units", "wesnoth-help")
if not title:
title = cid
n = html.write_units_tree(grouper, title, True)
output.close()
return n
def generate_era_report(addon, isocode, era, wesnoth):
eid = era.get_text_val("id")
print(("era " + addon + " " + eid + " " + isocode))
path = os.path.join(options.output, addon, isocode)
if not os.path.isdir(path): os.mkdir(path)
output = MyFile(os.path.join(path, "%s.html" % eid), "w")
html = HTMLOutput(isocode, output, addon, eid, True, wesnoth)
html.target = "%s.html" % eid
grouper = GroupByFaction(wesnoth, eid)
ename = era.get_text_val("name", translation = html.translate)
n = html.write_units_tree(grouper, ename, False)
output.close()
return n
def generate_single_unit_reports(addon, isocode, wesnoth):
path = os.path.join(options.output, addon, isocode)
if not os.path.isdir(path): os.mkdir(path)
html = HTMLOutput(isocode, None, addon, "units", False, wesnoth)
grouper = GroupByNothing()
html.analyze_units(grouper, True)
for uid, unit in list(wesnoth.unit_lookup.items()):
if unit.hidden: continue
if "mainline" in unit.campaigns and addon != "mainline": continue
try:
htmlname = "%s.html" % uid
filename = os.path.join(path, htmlname)
# We probably can come up with something better.
if os.path.exists(filename):
age = time.time() - os.path.getmtime(filename)
# was modified in the last 12 hours - we should be ok
if age < 3600 * 12: continue
except (UnicodeDecodeError, UnicodeEncodeError) as e:
traceback.print_exc()
error_message("Unicode problem: " + repr(path) + " + " + repr(uid) + "\n")
error_message(str(e) + "\n")
continue
output = MyFile(filename, "w")
html.target = "%s.html" % uid
html.write_unit_report(output, unit)
output.close()
def html_postprocess_file(filename, isocode, batchlist):
print(("postprocessing " + repr(filename)))
chtml = ""
ehtml = ""
cids = [[], []]
for addon in batchlist:
for campaign in addon.get("campaigns", []):
if campaign["units"] == "?": continue
if campaign["units"] <= 0: continue
if addon["name"] == "mainline": lang = isocode
else: lang = "en_US"
c = addon["name"], campaign["id"], campaign["translations"].get(
lang, campaign["name"]), lang
if addon["name"] == "mainline":
cids[0].append(c)
else:
cids[1].append(c)
for i in range(2):
campaigns = cids[i]
campaigns.sort(key = lambda x: "A" if x[1] == "mainline" else "B" + x[2])
for campaign in campaigns:
addon, cname, campname, lang = campaign
chtml += " <a title=\"%s\" href=\"../../%s/%s/%s.html\">%s</a><br/>\n" % (
campname, addon, lang, cname, campname)
if i == 0 and cids[1]:
chtml += "-<br/>\n"
eids = [[], []]
for addon in batchlist:
for era in addon.get("eras", []):
if era["units"] == "?": continue
if era["units"] <= 0: continue
if addon["name"] == "mainline": lang = isocode
else: lang = "en_US"
e = addon["name"], era["id"], era["translations"].get(
lang, era["name"]), lang
if addon["name"] == "mainline":
eids[0].append(e)
else:
eids[1].append(e)
for i in range(2):
eras = eids[i]
eras.sort(key = lambda x: x[2])
for era in eras:
addon, eid, eraname, lang = era
ehtml += " <a title=\"%s\" href=\"../../%s/%s/%s.html\">%s</a><br/>" % (
eraname, addon, lang, eid, eraname)
if i == 0 and eids[1]:
ehtml += "-<br/>\n"
f = open(filename, "r+b")
html = f.read().decode("utf8")
html = html.replace("PLACE CAMPAIGNS HERE\n", chtml)
html = html.replace("PLACE ERAS HERE\n", ehtml)
f.seek(0)
f.write(html.encode("utf8"))
f.close()
def html_postprocess_all(batchlist):
for isocode, filename in all_written_html_files:
html_postprocess_file(filename, isocode, batchlist)
def write_index(out_path):
output = MyFile(os.path.join(out_path, "index.html"), "w")
output.write("""
<html><head>
<meta http-equiv="refresh" content="0;url=mainline/en_US/mainline.html">
</head>
<body>
<a href="mainline/en_US/mainline.html">Redirecting to Wesnoth units database...</a>
</body>
</html>
""")
| gpl-2.0 |
atosatto/ansible | lib/ansible/modules/database/proxysql/proxysql_query_rules.py | 58 | 23125 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: proxysql_query_rules
version_added: "2.3"
author: "Ben Mildren (@bmildren)"
short_description: Modifies query rules using the proxysql admin interface.
description:
- The M(proxysql_query_rules) module modifies query rules using the
proxysql admin interface.
options:
rule_id:
description:
- The unique id of the rule. Rules are processed in rule_id order.
active:
description:
- A rule with I(active) set to C(False) will be tracked in the database,
but will be never loaded in the in-memory data structures.
username:
description:
- Filtering criteria matching username. If I(username) is non-NULL, a
query will match only if the connection is made with the correct
username.
schemaname:
description:
- Filtering criteria matching schemaname. If I(schemaname) is non-NULL, a
query will match only if the connection uses schemaname as its default
schema.
flagIN:
description:
- Used in combination with I(flagOUT) and I(apply) to create chains of
rules.
client_addr:
description:
- Match traffic from a specific source.
proxy_addr:
description:
- Match incoming traffic on a specific local IP.
proxy_port:
description:
- Match incoming traffic on a specific local port.
digest:
description:
- Match queries with a specific digest, as returned by
stats_mysql_query_digest.digest.
match_digest:
description:
- Regular expression that matches the query digest. The dialect of
regular expressions used is that of re2 - https://github.com/google/re2
match_pattern:
description:
- Regular expression that matches the query text. The dialect of regular
expressions used is that of re2 - https://github.com/google/re2
negate_match_pattern:
description:
- If I(negate_match_pattern) is set to C(True), only queries not matching
the query text will be considered as a match. This acts as a NOT
operator in front of the regular expression matching against
match_pattern.
flagOUT:
description:
- Used in combination with I(flagIN) and apply to create chains of rules.
When set, I(flagOUT) signifies the I(flagIN) to be used in the next
chain of rules.
replace_pattern:
description:
- This is the pattern with which to replace the matched pattern. Note
that this is optional, and when omitted, the query processor will only
cache, route, or set other parameters without rewriting.
destination_hostgroup:
description:
- Route matched queries to this hostgroup. This happens unless there is a
started transaction and the logged in user has
I(transaction_persistent) set to C(True) (see M(proxysql_mysql_users)).
cache_ttl:
description:
- The number of milliseconds for which to cache the result of the query.
Note in ProxySQL 1.1 I(cache_ttl) was in seconds.
timeout:
description:
- The maximum timeout in milliseconds with which the matched or rewritten
query should be executed. If a query run for longer than the specific
threshold, the query is automatically killed. If timeout is not
specified, the global variable mysql-default_query_timeout applies.
retries:
description:
- The maximum number of times a query needs to be re-executed in case of
detected failure during the execution of the query. If retries is not
specified, the global variable mysql-query_retries_on_failure applies.
delay:
description:
- Number of milliseconds to delay the execution of the query. This is
essentially a throttling mechanism and QoS, and allows a way to give
priority to queries over others. This value is added to the
mysql-default_query_delay global variable that applies to all queries.
mirror_flagOUT:
description:
- Enables query mirroring. If set I(mirror_flagOUT) can be used to
evaluates the mirrored query against the specified chain of rules.
mirror_hostgroup:
description:
- Enables query mirroring. If set I(mirror_hostgroup) can be used to
mirror queries to the same or different hostgroup.
error_msg:
description:
- Query will be blocked, and the specified error_msg will be returned to
the client.
log:
description:
- Query will be logged.
apply:
description:
- Used in combination with I(flagIN) and I(flagOUT) to create chains of
rules. Setting apply to True signifies the last rule to be applied.
comment:
description:
- Free form text field, usable for a descriptive comment of the query
rule.
state:
description:
- When C(present) - adds the rule, when C(absent) - removes the rule.
choices: [ "present", "absent" ]
default: present
force_delete:
description:
- By default we avoid deleting more than one schedule in a single batch,
however if you need this behaviour and you're not concerned about the
schedules deleted, you can set I(force_delete) to C(True).
default: False
save_to_disk:
description:
- Save mysql host config to sqlite db on disk to persist the
configuration.
default: True
load_to_runtime:
description:
- Dynamically load mysql host config to runtime memory.
default: True
login_user:
description:
- The username used to authenticate to ProxySQL admin interface.
default: None
login_password:
description:
- The password used to authenticate to ProxySQL admin interface.
default: None
login_host:
description:
- The host used to connect to ProxySQL admin interface.
default: '127.0.0.1'
login_port:
description:
- The port used to connect to ProxySQL admin interface.
default: 6032
config_file:
description:
- Specify a config file from which login_user and login_password are to
be read.
default: ''
'''
EXAMPLES = '''
---
# This example adds a rule to redirect queries from a specific user to another
# hostgroup, it saves the mysql query rule config to disk, but avoids loading
# the mysql query config config to runtime (this might be because several
# rules are being added and the user wants to push the config to runtime in a
# single batch using the M(proxysql_manage_config) module). It uses supplied
# credentials to connect to the proxysql admin interface.
- proxysql_backend_servers:
login_user: admin
login_password: admin
username: 'guest_ro'
destination_hostgroup: 1
active: 1
retries: 3
state: present
load_to_runtime: False
# This example removes all rules that use the username 'guest_ro', saves the
# mysql query rule config to disk, and dynamically loads the mysql query rule
# config to runtime. It uses credentials in a supplied config file to connect
# to the proxysql admin interface.
- proxysql_backend_servers:
config_file: '~/proxysql.cnf'
username: 'guest_ro'
state: absent
force_delete: true
'''
RETURN = '''
stdout:
description: The mysql user modified or removed from proxysql
returned: On create/update will return the newly modified rule, in all
other cases will return a list of rules that match the supplied
criteria.
type: dict
"sample": {
"changed": true,
"msg": "Added rule to mysql_query_rules",
"rules": [
{
"active": "0",
"apply": "0",
"cache_ttl": null,
"client_addr": null,
"comment": null,
"delay": null,
"destination_hostgroup": 1,
"digest": null,
"error_msg": null,
"flagIN": "0",
"flagOUT": null,
"log": null,
"match_digest": null,
"match_pattern": null,
"mirror_flagOUT": null,
"mirror_hostgroup": null,
"negate_match_pattern": "0",
"proxy_addr": null,
"proxy_port": null,
"reconnect": null,
"replace_pattern": null,
"retries": null,
"rule_id": "1",
"schemaname": null,
"timeout": null,
"username": "guest_ro"
}
],
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.mysql import mysql_connect
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import iteritems
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
MYSQLDB_FOUND = False
else:
MYSQLDB_FOUND = True
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if not MYSQLDB_FOUND:
module.fail_json(
msg="the python mysqldb module is required"
)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL QUERY RULES TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL QUERY RULES TO RUNTIME")
return True
class ProxyQueryRule(object):
def __init__(self, module):
self.state = module.params["state"]
self.force_delete = module.params["force_delete"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
config_data_keys = ["rule_id",
"active",
"username",
"schemaname",
"flagIN",
"client_addr",
"proxy_addr",
"proxy_port",
"digest",
"match_digest",
"match_pattern",
"negate_match_pattern",
"flagOUT",
"replace_pattern",
"destination_hostgroup",
"cache_ttl",
"timeout",
"retries",
"delay",
"mirror_flagOUT",
"mirror_hostgroup",
"error_msg",
"log",
"apply",
"comment"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_rule_pk_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `rule_count`
FROM mysql_query_rules
WHERE rule_id = %s"""
query_data = \
[self.config_data["rule_id"]]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['rule_count']) > 0)
def check_rule_cfg_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `rule_count`
FROM mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
check_count = cursor.fetchone()
return int(check_count['rule_count'])
def get_rule_config(self, cursor, created_rule_id=None):
query_string = \
"""SELECT *
FROM mysql_query_rules"""
if created_rule_id:
query_data = [created_rule_id, ]
query_string += "\nWHERE rule_id = %s"
cursor.execute(query_string, query_data)
rule = cursor.fetchone()
else:
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
rule = cursor.fetchall()
return rule
def create_rule_config(self, cursor):
query_string = \
"""INSERT INTO mysql_query_rules ("""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += "\n" + col + ","
query_string = query_string[:-1]
query_string += \
(")\n" +
"VALUES (" +
"%s ," * cols)
query_string = query_string[:-2]
query_string += ")"
cursor.execute(query_string, query_data)
new_rule_id = cursor.lastrowid
return True, new_rule_id
def update_rule_config(self, cursor):
query_string = """UPDATE mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None and col != "rule_id":
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\nSET " + col + "= %s,"
else:
query_string += "\n " + col + " = %s,"
query_string = query_string[:-1]
query_string += "\nWHERE rule_id = %s"
query_data.append(self.config_data["rule_id"])
cursor.execute(query_string, query_data)
return True
def delete_rule_config(self, cursor):
query_string = \
"""DELETE FROM mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
check_count = cursor.rowcount
return True, int(check_count)
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_rule(self, check_mode, result, cursor):
if not check_mode:
result['changed'], new_rule_id = \
self.create_rule_config(cursor)
result['msg'] = "Added rule to mysql_query_rules"
self.manage_config(cursor,
result['changed'])
result['rules'] = \
self.get_rule_config(cursor, new_rule_id)
else:
result['changed'] = True
result['msg'] = ("Rule would have been added to" +
" mysql_query_rules, however" +
" check_mode is enabled.")
def update_rule(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_rule_config(cursor)
result['msg'] = "Updated rule in mysql_query_rules"
self.manage_config(cursor,
result['changed'])
result['rules'] = \
self.get_rule_config(cursor)
else:
result['changed'] = True
result['msg'] = ("Rule would have been updated in" +
" mysql_query_rules, however" +
" check_mode is enabled.")
def delete_rule(self, check_mode, result, cursor):
if not check_mode:
result['rules'] = \
self.get_rule_config(cursor)
result['changed'], result['rows_affected'] = \
self.delete_rule_config(cursor)
result['msg'] = "Deleted rule from mysql_query_rules"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Rule would have been deleted from" +
" mysql_query_rules, however" +
" check_mode is enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
rule_id=dict(type='int'),
active=dict(type='bool'),
username=dict(type='str'),
schemaname=dict(type='str'),
flagIN=dict(type='int'),
client_addr=dict(type='str'),
proxy_addr=dict(type='str'),
proxy_port=dict(type='int'),
digest=dict(type='str'),
match_digest=dict(type='str'),
match_pattern=dict(type='str'),
negate_match_pattern=dict(type='bool'),
flagOUT=dict(type='int'),
replace_pattern=dict(type='str'),
destination_hostgroup=dict(type='int'),
cache_ttl=dict(type='int'),
timeout=dict(type='int'),
retries=dict(type='int'),
delay=dict(type='int'),
mirror_flagOUT=dict(type='int'),
mirror_hostgroup=dict(type='int'),
error_msg=dict(type='str'),
log=dict(type='bool'),
apply=dict(type='bool'),
comment=dict(type='str'),
state=dict(default='present', choices=['present',
'absent']),
force_delete=dict(default=False, type='bool'),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class=MySQLdb.cursors.DictCursor)
except MySQLdb.Error:
e = get_exception()
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % e
)
proxysql_query_rule = ProxyQueryRule(module)
result = {}
result['state'] = proxysql_query_rule.state
if proxysql_query_rule.state == "present":
try:
if not proxysql_query_rule.check_rule_cfg_exists(cursor):
if proxysql_query_rule.config_data["rule_id"] and \
proxysql_query_rule.check_rule_pk_exists(cursor):
proxysql_query_rule.update_rule(module.check_mode,
result,
cursor)
else:
proxysql_query_rule.create_rule(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The rule already exists in" +
" mysql_query_rules and doesn't need to be" +
" updated.")
result['rules'] = \
proxysql_query_rule.get_rule_config(cursor)
except MySQLdb.Error:
e = get_exception()
module.fail_json(
msg="unable to modify rule.. %s" % e
)
elif proxysql_query_rule.state == "absent":
try:
existing_rules = proxysql_query_rule.check_rule_cfg_exists(cursor)
if existing_rules > 0:
if existing_rules == 1 or \
proxysql_query_rule.force_delete:
proxysql_query_rule.delete_rule(module.check_mode,
result,
cursor)
else:
module.fail_json(
msg=("Operation would delete multiple rules" +
" use force_delete to override this")
)
else:
result['changed'] = False
result['msg'] = ("The rule is already absent from the" +
" mysql_query_rules memory configuration")
except MySQLdb.Error:
e = get_exception()
module.fail_json(
msg="unable to remove rule.. %s" % e
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
hybrid-storage-dev/cinder-client-fs-111t-hybrid-cherry | v2/quota_classes.py | 7 | 1439 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import base
class QuotaClassSet(base.Resource):
@property
def id(self):
"""Needed by base.Resource to self-refresh and be indexed."""
return self.class_name
def update(self, *args, **kwargs):
self.manager.update(self.class_name, *args, **kwargs)
class QuotaClassSetManager(base.Manager):
resource_class = QuotaClassSet
def get(self, class_name):
return self._get("/os-quota-class-sets/%s" % (class_name),
"quota_class_set")
def update(self, class_name, **updates):
body = {'quota_class_set': {'class_name': class_name}}
for update in updates:
body['quota_class_set'][update] = updates[update]
self._update('/os-quota-class-sets/%s' % (class_name), body)
| apache-2.0 |
jmschrei/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
kivhift/qmk | src/commands/help.py | 1 | 1442 | #
# Copyright (c) 2009-2012 Joshua Hughes <kivhift@gmail.com>
#
import atexit
import os
import tempfile
import urllib
import webbrowser
import qmk
class HelpCommand(qmk.Command):
'''
View help for all available commands. A new tab will be opened in the
default web browser that contains the help for all of the commands that are
registered.
'''
def __init__(self):
self._name = 'help'
self._help = self.__doc__
h, self.__filename = tempfile.mkstemp(suffix = '.html',
prefix = 'qmkhelp')
os.close(h)
atexit.register(os.remove, self.__filename)
def action(self, arg):
# For now, ignore help requests for specific commands.
# if arg is not None: pass
f = file(self.__filename, 'wb')
f.write('<html><head><title>QMK Help</title></head><body>')
f.write('<h1>QMK Command Help</h1>')
cm = qmk.CommandManager()
f.write('<table border="1"><tr><th>Name</th><th>Help</th></tr>')
for name in cm.commandNames():
cmd = cm.command(name)
ht = cmd.help
f.write('<tr><td><pre>%s</pre></td><td><pre>%s</pre></td></tr>' % (
name, ht.encode('ascii', 'xmlcharrefreplace')))
f.write('</table></body></html>\n')
f.close()
webbrowser.open_new_tab('file:%s' % urllib.pathname2url(
f.name))
def commands(): return [ HelpCommand() ]
| mit |
ljgabc/lfs | usr/lib/python2.7/encodings/cp1255.py | 593 | 12722 | """ Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1255',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xd7' # 0xAA -> MULTIPLICATION SIGN
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xf7' # 0xBA -> DIVISION SIGN
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
u'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
u'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
u'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
u'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
u'\u05b5' # 0xC5 -> HEBREW POINT TSERE
u'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
u'\u05b7' # 0xC7 -> HEBREW POINT PATAH
u'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
u'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
u'\ufffe' # 0xCA -> UNDEFINED
u'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
u'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
u'\u05bd' # 0xCD -> HEBREW POINT METEG
u'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
u'\u05bf' # 0xCF -> HEBREW POINT RAFE
u'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
u'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
u'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
u'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
u'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
u'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
u'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
u'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
u'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
u'\ufffe' # 0xD9 -> UNDEFINED
u'\ufffe' # 0xDA -> UNDEFINED
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\ufffe' # 0xDF -> UNDEFINED
u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
u'\u05d1' # 0xE1 -> HEBREW LETTER BET
u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
u'\u05d4' # 0xE4 -> HEBREW LETTER HE
u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0xE7 -> HEBREW LETTER HET
u'\u05d8' # 0xE8 -> HEBREW LETTER TET
u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
u'\u05db' # 0xEB -> HEBREW LETTER KAF
u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
u'\u05de' # 0xEE -> HEBREW LETTER MEM
u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0xF4 -> HEBREW LETTER PE
u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
u'\u05ea' # 0xFA -> HEBREW LETTER TAV
u'\ufffe' # 0xFB -> UNDEFINED
u'\ufffe' # 0xFC -> UNDEFINED
u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/jupyter_client/tests/test_connect.py | 6 | 4395 | """Tests for kernel connection utilities"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
import nose.tools as nt
from traitlets.config import Config
from jupyter_core.application import JupyterApp
from ipython_genutils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
from ipython_genutils.py3compat import str_to_bytes
from jupyter_client import connect, KernelClient
from jupyter_client.consoleapp import JupyterConsoleApp
from jupyter_client.session import Session
class DummyConsoleApp(JupyterApp, JupyterConsoleApp):
def initialize(self, argv=[]):
JupyterApp.initialize(self, argv=argv)
self.init_connection_file()
sample_info = dict(ip='1.2.3.4', transport='ipc',
shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5,
key=b'abc123', signature_scheme='hmac-md5', kernel_name='python'
)
sample_info_kn = dict(ip='1.2.3.4', transport='ipc',
shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5,
key=b'abc123', signature_scheme='hmac-md5', kernel_name='test'
)
def test_write_connection_file():
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
nt.assert_true(os.path.exists(cf))
with open(cf, 'r') as f:
info = json.load(f)
info['key'] = str_to_bytes(info['key'])
nt.assert_equal(info, sample_info)
def test_load_connection_file_session():
"""test load_connection_file() after """
session = Session()
app = DummyConsoleApp(session=Session())
app.initialize(argv=[])
session = app.session
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
app.connection_file = cf
app.load_connection_file()
nt.assert_equal(session.key, sample_info['key'])
nt.assert_equal(session.signature_scheme, sample_info['signature_scheme'])
def test_load_connection_file_session_with_kn():
"""test load_connection_file() after """
session = Session()
app = DummyConsoleApp(session=Session())
app.initialize(argv=[])
session = app.session
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info_kn)
app.connection_file = cf
app.load_connection_file()
nt.assert_equal(session.key, sample_info_kn['key'])
nt.assert_equal(session.signature_scheme, sample_info_kn['signature_scheme'])
def test_app_load_connection_file():
"""test `ipython console --existing` loads a connection file"""
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
app = DummyConsoleApp(connection_file=cf)
app.initialize(argv=[])
for attr, expected in sample_info.items():
if attr in ('key', 'signature_scheme'):
continue
value = getattr(app, attr)
nt.assert_equal(value, expected, "app.%s = %s != %s" % (attr, value, expected))
def test_load_connection_info():
client = KernelClient()
info = {
'control_port': 53702,
'hb_port': 53705,
'iopub_port': 53703,
'ip': '0.0.0.0',
'key': 'secret',
'shell_port': 53700,
'signature_scheme': 'hmac-sha256',
'stdin_port': 53701,
'transport': 'tcp',
}
client.load_connection_info(info)
assert client.control_port == info['control_port']
assert client.session.key.decode('ascii') == info['key']
assert client.ip == info['ip']
def test_find_connection_file():
cfg = Config()
with TemporaryDirectory() as d:
cfg.ProfileDir.location = d
cf = 'kernel.json'
app = DummyConsoleApp(config=cfg, connection_file=cf)
app.initialize()
security_dir = app.runtime_dir
profile_cf = os.path.join(security_dir, cf)
with open(profile_cf, 'w') as f:
f.write("{}")
for query in (
'kernel.json',
'kern*',
'*ernel*',
'k*',
):
nt.assert_equal(connect.find_connection_file(query, path=security_dir), profile_cf)
JupyterApp._instance = None
| gpl-3.0 |
TeamExodus/legacy_kernel_htc_flounder | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
dyrock/trafficserver | tests/gold_tests/headers/forwarded.test.py | 3 | 12009 | '''
Test the Forwarded header and related configuration..
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
Test.Summary = '''
Test FORWARDED header.
'''
Test.SkipUnless(
Condition.HasCurlFeature('http2'),
Condition.HasCurlFeature('IPv6'),
)
Test.ContinueOnFail = True
testName = "FORWARDED"
server = Test.MakeOriginServer("server", options={'--load': os.path.join(Test.TestDirectory, 'forwarded-observer.py')})
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.no-oride.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-none.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-for.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-ip.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-unknown.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-server-name.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-uuid.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-proto.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-host.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-compact.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-std.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-full.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
# Set up to check the output after the tests have run.
#
forwarded_log_id = Test.Disk.File("forwarded.log")
forwarded_log_id.Content = "forwarded.gold"
def baselineTsSetup(ts, sslPort):
ts.addSSLfile("../remap/ssl/server.pem")
ts.addSSLfile("../remap/ssl/server.key")
ts.Variables.ssl_port = sslPort
ts.Disk.records_config.update({
# 'proxy.config.diags.debug.enabled': 1,
'proxy.config.url_remap.pristine_host_hdr': 1, # Retain Host header in original incoming client request.
'proxy.config.http.cache.http': 0, # Make sure each request is forwarded to the origin server.
'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name.
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.server_ports': (
'ipv4:{0} ipv4:{1}:proto=http2;http:ssl ipv6:{0} ipv6:{1}:proto=http2;http:ssl'
.format(ts.Variables.port, ts.Variables.ssl_port))
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map http://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts = Test.MakeATSProcess("ts", select_ports=False)
baselineTsSetup(ts, 4443)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-none.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=none'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-for.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=for'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-by-ip.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=ip'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-by-unknown.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=unknown'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-by-server-name.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=serverName'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-by-uuid.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=uuid'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-proto.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=proto'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-host.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=host'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-connection-compact.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=compact'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-connection-std.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=std'
)
ts.Disk.remap_config.AddLine(
'map http://www.forwarded-connection-full.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=full'
)
# Basic HTTP 1.1 -- No Forwarded by default
tr = Test.AddTestRun()
# Wait for the micro server
tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
# Delay on readiness of our ssl ports
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
#
tr.Processes.Default.Command = (
'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts.Variables.port)
)
tr.Processes.Default.ReturnCode = 0
def TestHttp1_1(host):
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://{}'.format(ts.Variables.port, host)
)
tr.Processes.Default.ReturnCode = 0
# Basic HTTP 1.1 -- No Forwarded -- explicit configuration.
#
TestHttp1_1('www.forwarded-none.com')
# Test enabling of each forwarded parameter singly.
TestHttp1_1('www.forwarded-for.com')
# Note: forwaded-obsersver.py counts on the "by" tests being done in the order below.
TestHttp1_1('www.forwarded-by-ip.com')
TestHttp1_1('www.forwarded-by-unknown.com')
TestHttp1_1('www.forwarded-by-server-name.com')
TestHttp1_1('www.forwarded-by-uuid.com')
TestHttp1_1('www.forwarded-proto.com')
TestHttp1_1('www.forwarded-host.com')
TestHttp1_1('www.forwarded-connection-compact.com')
TestHttp1_1('www.forwarded-connection-std.com')
TestHttp1_1('www.forwarded-connection-full.com')
ts2 = Test.MakeATSProcess("ts2", command="traffic_manager", select_ports=False)
ts2.Variables.port += 1
baselineTsSetup(ts2, 4444)
ts2.Disk.records_config.update({
'proxy.config.url_remap.pristine_host_hdr': 1, # Retain Host header in original incoming client request.
'proxy.config.http.insert_forwarded': 'by=uuid'})
ts2.Disk.remap_config.AddLine(
'map https://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
# Forwarded header with UUID of 2nd ATS.
tr = Test.AddTestRun()
# Delay on readiness of our ssl ports
tr.Processes.Default.StartBefore(Test.Processes.ts2, ready=When.PortOpen(ts2.Variables.ssl_port))
#
tr.Processes.Default.Command = (
'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)
)
tr.Processes.Default.ReturnCode = 0
# Call traffic_ctrl to set insert_forwarded
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
'traffic_ctl --debug config set proxy.config.http.insert_forwarded' +
' "for|by=ip|by=unknown|by=servername|by=uuid|proto|host|connection=compact|connection=std|connection=full"'
)
tr.Processes.Default.ForceUseShell = False
tr.Processes.Default.Env = ts2.Env
tr.Processes.Default.ReturnCode = 0
# HTTP 1.1
tr = Test.AddTestRun()
# Delay to give traffic_ctl config change time to take effect.
tr.DelayStart = 15
tr.Processes.Default.Command = (
'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)
)
tr.Processes.Default.ReturnCode = 0
# HTTP 1.0
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
'curl --verbose --ipv4 --http1.0 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)
)
tr.Processes.Default.ReturnCode = 0
# HTTP 1.0 -- Forwarded headers already present
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
"curl --verbose -H 'forwarded:for=0.6.6.6' -H 'forwarded:for=_argh' --ipv4 --http1.0" +
" --proxy localhost:{} http://www.no-oride.com".format(ts2.Variables.port)
)
tr.Processes.Default.ReturnCode = 0
# HTTP 2
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
'curl --verbose --ipv4 --http2 --insecure --header "Host: www.no-oride.com"' +
' https://localhost:{}'.format(ts2.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
# TLS
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
'curl --verbose --ipv4 --http1.1 --insecure --header "Host: www.no-oride.com" https://localhost:{}'
.format(ts2.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
# IPv6
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
'curl --verbose --ipv6 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)
)
tr.Processes.Default.ReturnCode = 0
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
'curl --verbose --ipv6 --http1.1 --insecure --header "Host: www.no-oride.com" https://localhost:{}'.format(
ts2.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
| apache-2.0 |
xclusive36/android_kernel_lge_fx3 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
demisto/content | Packs/WindowsForensics/Scripts/RegistryParse/RegistryParse_test.py | 1 | 1112 | import json
import RegistryParse as reg_parse
def util_load_json(path):
with open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_get_sub_keys():
key = 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList'
folder_output_key = 'Sid'
mock_reg = util_load_json('./test_data/mock_reg_users.json')
expected = util_load_json('./test_data/mock_reg_users_result.json')
actual = reg_parse.get_sub_keys(mock_reg, key, folder_output_key)
for actual_items in actual:
for actual_item in actual_items:
assert actual_item in expected[0] or actual_item in expected[1]
def test_parse_reg_values():
expected = 'C:\\Windows\\ServiceProfiles\\LocalService'
hex_value = 'hex(2):43,00,3a,00,5c,00,57,00,69,00,6e,00,64,00,6f,00,77,\
00,73,00,5c,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,50,00,72,00,6f,00,\
66,00,69,00,6c,00,65,00,73,00,5c,00,4c,00,6f,00,63,00,61,00,6c,00,53,00,65,\
00,72,00,76,00,69,00,63,00,65,00,00,00'
actual = reg_parse.parse_reg_value(hex_value)
assert actual == expected
| mit |
okomestudio/moto | tests/test_s3/test_s3_storageclass.py | 3 | 3415 | from __future__ import unicode_literals
import boto
import boto3
from boto.exception import S3CreateError, S3ResponseError
from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule
import sure # noqa
from botocore.exceptions import ClientError
from datetime import datetime
from nose.tools import assert_raises
from moto import mock_s3_deprecated, mock_s3
@mock_s3
def test_s3_storage_class_standard():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="Bucket")
# add an object to the bucket with standard storage
s3.put_object(Bucket="Bucket", Key="my_key", Body="my_value")
list_of_objects = s3.list_objects(Bucket="Bucket")
list_of_objects['Contents'][0]["StorageClass"].should.equal("STANDARD")
@mock_s3
def test_s3_storage_class_infrequent_access():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="Bucket")
# add an object to the bucket with standard storage
s3.put_object(Bucket="Bucket", Key="my_key_infrequent", Body="my_value_infrequent", StorageClass="STANDARD_IA")
D = s3.list_objects(Bucket="Bucket")
D['Contents'][0]["StorageClass"].should.equal("STANDARD_IA")
@mock_s3
def test_s3_storage_class_copy():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="Bucket")
s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD")
s3.create_bucket(Bucket="Bucket2")
# second object is originally of storage class REDUCED_REDUNDANCY
s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2")
s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="ONEZONE_IA")
list_of_copied_objects = s3.list_objects(Bucket="Bucket2")
# checks that a copied object can be properly copied
list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("ONEZONE_IA")
@mock_s3
def test_s3_invalid_copied_storage_class():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="Bucket")
s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD")
s3.create_bucket(Bucket="Bucket2")
s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2", StorageClass="REDUCED_REDUNDANCY")
# Try to copy an object with an invalid storage class
with assert_raises(ClientError) as err:
s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="STANDARD2")
e = err.exception
e.response["Error"]["Code"].should.equal("InvalidStorageClass")
e.response["Error"]["Message"].should.equal("The storage class you specified is not valid")
@mock_s3
def test_s3_invalid_storage_class():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="Bucket")
# Try to add an object with an invalid storage class
with assert_raises(ClientError) as err:
s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD")
e = err.exception
e.response["Error"]["Code"].should.equal("InvalidStorageClass")
e.response["Error"]["Message"].should.equal("The storage class you specified is not valid")
@mock_s3
def test_s3_default_storage_class():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="Bucket")
s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body")
list_of_objects = s3.list_objects(Bucket="Bucket")
# tests that the default storage class is still STANDARD
list_of_objects["Contents"][0]["StorageClass"].should.equal("STANDARD")
| apache-2.0 |
dhermes/gcloud-python | monitoring/noxfile.py | 2 | 4094 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core"))
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", "black", *LOCAL_DEPS)
session.run(
"black",
"--check",
"google",
"tests",
"docs",
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
"""
session.install("black")
session.run(
"black",
"google",
"tests",
"docs",
)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=97",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "../test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=97")
session.run("coverage", "erase")
| apache-2.0 |
aioue/ansible | lib/ansible/plugins/callback/timer.py | 168 | 1125 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from datetime import datetime
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This callback module tells you how long your plays ran for.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'timer'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self.start_time = datetime.now()
def days_hours_minutes_seconds(self, runtime):
minutes = (runtime.seconds // 60) % 60
r_seconds = runtime.seconds - (minutes * 60)
return runtime.days, runtime.seconds // 3600, minutes, r_seconds
def playbook_on_stats(self, stats):
self.v2_playbook_on_stats(stats)
def v2_playbook_on_stats(self, stats):
end_time = datetime.now()
runtime = end_time - self.start_time
self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(runtime)))
| gpl-3.0 |
brunolimawd/talk-nodebots-roll-out | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 1284 | 100329 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
mdraeger/gmapcatcher | gmapcatcher/gps/fake.py | 4 | 24400 | # This file is Copyright (c) 2010 by the GPSD project
# BSD terms apply: see the file COPYING in the distribution root for details.
"""
gpsfake.py -- classes for creating a controlled test environment around gpsd.
The gpsfake(1) regression tester shipped with gpsd is a trivial wrapper
around this code. For a more interesting usage example, see the
valgrind-audit script shipped with the gpsd code.
To use this code, start by instantiating a TestSession class. Use the
prefix argument if you want to run the daemon under some kind of run-time
monitor like valgrind or gdb. Here are some particularly useful possibilities:
valgrind --tool=memcheck --gen-suppressions=yes --leak-check=yes
Run under Valgrind, checking for malloc errors and memory leaks.
xterm -e gdb -tui --args
Run under gdb, controlled from a new xterm.
You can use the options argument to pass in daemon options; normally you will
use this to set the debug-logging level.
On initialization, the test object spawns an instance of gpsd with no
devices or clients attached, connected to a control socket.
TestSession has methods to attach and detch fake GPSes. The
TestSession class simulates GPS devices for you with objects composed
from a pty and a class instance that cycles sentences into the master side
from some specified logfile; gpsd reads the slave side. A fake GPS is
identified by the string naming its slave device.
TestSession also has methods to start and end client sessions. Daemon
responses to a client are fed to a hook function which, by default,
discards them. You can change the hook to sys.stdout.write() to dump
responses to standard output (this is what the gpsfake executable
does) or do something more exotic. A client session is identified by a
small integer that counts the number of client session starts.
There are a couple of convenience methods. TestSession.wait() does nothing,
allowing a specified number of seconds to elapse. TestSession.send()
ships commands to an open client session.
TestSession does not currently capture the daemon's log output. It is
run with -N, so the output will go to stderr (along with, for example,
Valgrind notifications).
Each FakeGPS instance tries to packetize the data from the logfile it
is initialized with. It uses the same packet-getter as the daeomon.
The TestSession code maintains a run queue of FakeGPS and gps.gs (client-
session) objects. It repeatedly cycles through the run queue. For each
client session object in the queue, it tries to read data from gpsd. For
each fake GPS, it sends one line of stored data. When a fake-GPS's
go predicate becomes false, the fake GPS is removed from the run queue.
There are two ways to use this code. The more deterministic is
non-threaded mode: set up your client sessions and fake GPS devices,
then call the run() method. The run() method will terminate when
there are no more objects in the run queue. Note, you must have
created at least one fake client or fake GPS before calling run(),
otherwise it will terminate immediately.
To allow for adding and removing clients while the test is running,
run in threaded mode by calling the start() method. This simply calls
the run method in a subthread, with locking of critical regions.
"""
import os, time, signal, pty, termios # fcntl, array, struct
import exceptions, threading, socket
import gps
import packet as sniffer
# The two magic numbers below have to be derived from observation. If
# they're too high you'll slow the tests down a lot. If they're too low
# you'll get random spurious regression failures that usually look
# like lines missing from the end of the test output relative to the
# check file. These numbers might have to be adjusted upward on faster
# machines. The need for them may be symptomatic of race conditions
# in the pty layer or elsewhere.
# Define a per-line delay on writes so we won't spam the buffers in
# the pty layer or gpsd itself. Removing this entirely was tried but
# caused failures under NetBSD. Values smaller than the system timer
# tick don't make any difference here.
WRITE_PAD = 0.001
# We delay briefly after a GPS source is exhausted before removing it.
# This should give its subscribers time to get gpsd's response before
# we call the cleanup code. Note that using fractional seconds in
# CLOSE_DELAY may have no effect; Python time.time() returns a float
# value, but it is not guaranteed by Python that the C implementation
# underneath will return with precision finer than 1 second. (Linux
# and *BSD return full precision.) Dropping this to 0.1 has been
# tried but caused failures.
CLOSE_DELAY = 0.2
class TestLoadError(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
class TestLoad:
"Digest a logfile into a list of sentences we can cycle through."
def __init__(self, logfp, predump=False):
self.sentences = [] # This is the interesting part
if type(logfp) == type(""):
logfp = open(logfp, "r")
self.name = logfp.name
self.logfp = logfp
self.predump = predump
self.logfile = logfp.name
self.type = None
self.sourcetype = "pty"
self.serial = None
# Grab the packets
getter = sniffer.new()
#gps.packet.register_report(reporter)
type_latch = None
while True:
(plen, ptype, packet, counter) = getter.get(logfp.fileno())
if plen <= 0:
break
elif ptype == sniffer.COMMENT_PACKET:
# Some comments are magic
if "Serial:" in packet:
# Change serial parameters
packet = packet[1:].strip()
try:
(xx, baud, params) = packet.split()
baud = int(baud)
if params[0] in ('7', '8'):
databits = int(params[0])
else:
raise ValueError
if params[1] in ('N', 'O', 'E'):
parity = params[1]
else:
raise ValueError
if params[2] in ('1', '2'):
stopbits = int(params[2])
else:
raise ValueError
except (ValueError, IndexError):
raise TestLoadError("bad serial-parameter spec in %s"%\
logfp.name)
self.serial = (baud, databits, parity, stopbits)
elif "UDP" in packet:
self.sourcetype = "UDP"
else:
if type_latch is None:
type_latch = ptype
if self.predump:
print repr(packet)
if not packet:
raise TestLoadError("zero-length packet from %s"%\
logfp.name)
self.sentences.append(packet)
# Look at the first packet to grok the GPS type
self.textual = (type_latch == sniffer.NMEA_PACKET)
if self.textual:
self.legend = "gpsfake: line %d: "
else:
self.legend = "gpsfake: packet %d"
class PacketError(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
class FakeGPS:
def __init__(self, testload, progress=None):
self.testload = testload
self.progress = progress
self.go_predicate = lambda: True
self.readers = 0
self.index = 0
self.progress("gpsfake: %s provides %d sentences\n" % (self.testload.name, len(self.testload.sentences)))
def write(self, line):
"Throw an error if this superclass is ever instantiated."
raise ValueError, line
def feed(self):
"Feed a line from the contents of the GPS log to the daemon."
line = self.testload.sentences[self.index % len(self.testload.sentences)]
if "%Delay:" in line:
# Delay specified number of seconds
delay = line.split()[1]
time.sleep(int(delay))
# self.write has to be set by the derived class
self.write(line)
if self.progress:
self.progress("gpsfake: %s feeds %d=%s\n" % (self.testload.name, len(line), repr(line)))
time.sleep(WRITE_PAD)
self.index += 1
class FakePTY(FakeGPS):
"A FakePTY is a pty with a test log ready to be cycled to it."
def __init__(self, testload,
speed=4800, databits=8, parity='N', stopbits=1,
progress=None):
FakeGPS.__init__(self, testload, progress)
# Allow Serial: header to be overridden by explicit spped.
if self.testload.serial:
(speed, databits, parity, stopbits) = self.testload.serial
self.speed = speed
baudrates = {
0: termios.B0,
50: termios.B50,
75: termios.B75,
110: termios.B110,
134: termios.B134,
150: termios.B150,
200: termios.B200,
300: termios.B300,
600: termios.B600,
1200: termios.B1200,
1800: termios.B1800,
2400: termios.B2400,
4800: termios.B4800,
9600: termios.B9600,
19200: termios.B19200,
38400: termios.B38400,
57600: termios.B57600,
115200: termios.B115200,
230400: termios.B230400,
}
speed = baudrates[speed] # Throw an error if the speed isn't legal
(self.fd, self.slave_fd) = pty.openpty()
self.byname = os.ttyname(self.slave_fd)
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self.slave_fd)
cc[termios.VMIN] = 1
cflag &= ~(termios.PARENB | termios.PARODD | termios.CRTSCTS)
cflag |= termios.CREAD | termios.CLOCAL
iflag = oflag = lflag = 0
iflag &=~ (termios.PARMRK | termios.INPCK)
cflag &=~ (termios.CSIZE | termios.CSTOPB | termios.PARENB | termios.PARODD)
if databits == 7:
cflag |= termios.CS7
else:
cflag |= termios.CS8
if stopbits == 2:
cflag |= termios.CSTOPB
if parity == 'E':
iflag |= termios.INPCK
cflag |= termios.PARENB
elif parity == 'O':
iflag |= termios.INPCK
cflag |= termios.PARENB | termios.PARODD
ispeed = ospeed = speed
termios.tcsetattr(self.slave_fd, termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
def read(self):
"Discard control strings written by gpsd."
# A tcflush implementation works on Linux but fails on OpenBSD 4.
termios.tcflush(self.fd, termios.TCIFLUSH)
# Alas, the FIONREAD version also works on Linux and fails on OpenBSD.
#try:
# buf = array.array('i', [0])
# fcntl.ioctl(self.master_fd, termios.FIONREAD, buf, True)
# n = struct.unpack('i', buf)[0]
# os.read(self.master_fd, n)
#except IOError:
# pass
def write(self, line):
os.write(self.fd, line)
def drain(self):
"Wait for the associated device to drain (e.g. before closing)."
termios.tcdrain(self.fd)
class FakeUDP(FakeGPS):
"A UDP broadcaster with a test log ready to be cycled to it."
def __init__(self, testload,
ipaddr, port,
progress=None):
FakeGPS.__init__(self, testload, progress)
self.ipaddr = ipaddr
self.port = port
self.byname = "udp://" + ipaddr + ":" + port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def read(self):
"Discard control strings written by gpsd."
pass
def write(self, line):
self.sock.sendto(line, (self.ipaddr, int(self.port)))
def drain(self):
"Wait for the associated device to drain (e.g. before closing)."
pass # shutdown() fails on UDP
class DaemonError(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class DaemonInstance:
"Control a gpsd instance."
def __init__(self, control_socket=None):
self.sockfile = None
self.pid = None
self.tmpdir = os.environ.get('TMPDIR', '/tmp')
if control_socket:
self.control_socket = control_socket
else:
self.control_socket = "%s/gpsfake-%d.sock" % (self.tmpdir, os.getpid())
self.pidfile = "%s/gpsfake-%d.pid" % (self.tmpdir, os.getpid())
def spawn(self, options, port, background=False, prefix=""):
"Spawn a daemon instance."
self.spawncmd = None
# Look for gpsd in GPSD_HOME env variable
if os.environ.get('GPSD_HOME'):
for path in os.environ['GPSD_HOME'].split(':'):
_spawncmd = "%s/gpsd" % path
if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK):
self.spawncmd = _spawncmd
break
# if we could not find it yet try PATH env variable for it
if not self.spawncmd:
if not '/usr/sbin' in os.environ['PATH']:
os.environ['PATH']=os.environ['PATH'] + ":/usr/sbin"
for path in os.environ['PATH'].split(':'):
_spawncmd = "%s/gpsd" % path
if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK):
self.spawncmd = _spawncmd
break
if not self.spawncmd:
raise DaemonError("Cannot execute gpsd: executable not found. Set GPSD_HOME env variable")
# The -b option to suppress hanging on probe returns is needed to cope
# with OpenBSD (and possibly other non-Linux systems) that don't support
# anything we can use to implement the FakeGPS.read() method
self.spawncmd += " -b -N -S %s -F %s -P %s %s" % (port, self.control_socket, self.pidfile, options)
if prefix:
self.spawncmd = prefix + " " + self.spawncmd.strip()
if background:
self.spawncmd += " &"
status = os.system(self.spawncmd)
if os.WIFSIGNALED(status) or os.WEXITSTATUS(status):
raise DaemonError("daemon exited with status %d" % status)
def wait_pid(self):
"Wait for the daemon, get its PID and a control-socket connection."
while True:
try:
fp = open(self.pidfile)
except IOError:
time.sleep(0.1)
continue
try:
fp.seek(0)
pidstr = fp.read()
self.pid = int(pidstr)
except ValueError:
time.sleep(0.5)
continue # Avoid race condition -- PID not yet written
fp.close()
break
def __get_control_socket(self):
# Now we know it's running, get a connection to the control socket.
if not os.path.exists(self.control_socket):
return None
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
self.sock.connect(self.control_socket)
except socket.error:
if self.sock:
self.sock.close()
self.sock = None
return self.sock
def is_alive(self):
"Is the daemon still alive?"
try:
os.kill(self.pid, 0)
return True
except OSError:
return False
def add_device(self, path):
"Add a device to the daemon's internal search list."
if self.__get_control_socket():
self.sock.sendall("+%s\r\n\x00" % path)
self.sock.recv(12)
self.sock.close()
def remove_device(self, path):
"Remove a device from the daemon's internal search list."
if self.__get_control_socket():
self.sock.sendall("-%s\r\n\x00" % path)
self.sock.recv(12)
self.sock.close()
def kill(self):
"Kill the daemon instance."
if self.pid:
try:
os.kill(self.pid, signal.SIGTERM)
# Raises an OSError for ESRCH when we've killed it.
while True:
os.kill(self.pid, signal.SIGTERM)
time.sleep(0.01)
except OSError:
pass
self.pid = None
class TestSessionError(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
class TestSession:
"Manage a session including a daemon with fake GPSes and clients."
def __init__(self, prefix=None, port=None, options=None, verbose=0, predump=False, udp=False):
"Initialize the test session by launching the daemon."
self.prefix = prefix
self.port = port
self.options = options
self.verbose = verbose
self.predump = predump
self.udp = udp
self.daemon = DaemonInstance()
self.fakegpslist = {}
self.client_id = 0
self.readers = 0
self.writers = 0
self.runqueue = []
self.index = 0
if port:
self.port = port
else:
self.port = gps.GPSD_PORT
self.progress = lambda x: None
self.reporter = lambda x: None
self.default_predicate = None
self.fd_set = []
self.threadlock = None
def spawn(self):
for sig in (signal.SIGQUIT, signal.SIGINT, signal.SIGTERM):
signal.signal(sig, lambda unused, dummy: self.cleanup())
self.daemon.spawn(background=True, prefix=self.prefix, port=self.port, options=self.options)
self.daemon.wait_pid()
def set_predicate(self, pred):
"Set a default go predicate for the session."
self.default_predicate = pred
def gps_add(self, logfile, speed=19200, pred=None):
"Add a simulated GPS being fed by the specified logfile."
self.progress("gpsfake: gps_add(%s, %d)\n" % (logfile, speed))
if logfile not in self.fakegpslist:
testload = TestLoad(logfile, predump=self.predump)
if testload.sourcetype == "UDP" or self.udp:
newgps = FakeUDP(testload, ipaddr="127.0.0.1", port="5000",
progress=self.progress)
else:
newgps = FakePTY(testload, speed=speed,
progress=self.progress)
if pred:
newgps.go_predicate = pred
elif self.default_predicate:
newgps.go_predicate = self.default_predicate
self.fakegpslist[newgps.byname] = newgps
self.append(newgps)
newgps.exhausted = 0
self.daemon.add_device(newgps.byname)
return newgps.byname
def gps_remove(self, name):
"Remove a simulated GPS from the daemon's search list."
self.progress("gpsfake: gps_remove(%s)\n" % name)
self.fakegpslist[name].drain()
self.remove(self.fakegpslist[name])
self.daemon.remove_device(name)
del self.fakegpslist[name]
def client_add(self, commands):
"Initiate a client session and force connection to a fake GPS."
self.progress("gpsfake: client_add()\n")
newclient = gps.gps(port=self.port, verbose=self.verbose)
self.append(newclient)
newclient.id = self.client_id + 1
self.client_id += 1
self.progress("gpsfake: client %d has %s\n" % (self.client_id,newclient.device))
if commands:
self.initialize(newclient, commands)
return self.client_id
def client_remove(self, cid):
"Terminate a client session."
self.progress("gpsfake: client_remove(%d)\n" % cid)
for obj in self.runqueue:
if isinstance(obj, gps.gps) and obj.id == cid:
self.remove(obj)
return True
else:
return False
def wait(self, seconds):
"Wait, doing nothing."
self.progress("gpsfake: wait(%d)\n" % seconds)
time.sleep(seconds)
def gather(self, seconds):
"Wait, doing nothing but watching for sentences."
self.progress("gpsfake: gather(%d)\n" % seconds)
#mark = time.time()
time.sleep(seconds)
def cleanup(self):
"We're done, kill the daemon."
self.progress("gpsfake: cleanup()\n")
if self.daemon:
self.daemon.kill()
self.daemon = None
def run(self):
"Run the tests."
try:
self.progress("gpsfake: test loop begins\n")
while self.daemon:
# We have to read anything that gpsd might have tried
# to send to the GPS here -- under OpenBSD the
# TIOCDRAIN will hang, otherwise.
for device in self.runqueue:
if isinstance(device, FakeGPS):
device.read()
had_output = False
chosen = self.choose()
if isinstance(chosen, FakeGPS):
if chosen.exhausted and (time.time() - chosen.exhausted > CLOSE_DELAY):
self.gps_remove(chosen.byname)
self.progress("gpsfake: GPS %s removed\n" % chosen.byname)
elif not chosen.go_predicate(chosen.index, chosen):
if chosen.exhausted == 0:
chosen.exhausted = time.time()
self.progress("gpsfake: GPS %s ran out of input\n" % chosen.byname)
else:
chosen.feed()
elif isinstance(chosen, gps.gps):
if chosen.enqueued:
chosen.send(chosen.enqueued)
chosen.enqueued = ""
while chosen.waiting():
chosen.read()
if chosen.valid & gps.PACKET_SET:
self.reporter(chosen.response)
had_output = True
else:
raise TestSessionError("test object of unknown type")
if not self.writers and not had_output:
self.progress("gpsfake: no writers and no output\n")
break
self.progress("gpsfake: test loop ends\n")
finally:
self.cleanup()
# All knowledge about locks and threading is below this line,
# except for the bare fact that self.threadlock is set to None
# in the class init method.
def append(self, obj):
"Add a producer or consumer to the object list."
if self.threadlock:
self.threadlock.acquire()
self.runqueue.append(obj)
if isinstance(obj, FakeGPS):
self.writers += 1
elif isinstance(obj, gps.gps):
self.readers += 1
if self.threadlock:
self.threadlock.release()
def remove(self, obj):
"Remove a producer or consumer from the object list."
if self.threadlock:
self.threadlock.acquire()
self.runqueue.remove(obj)
if isinstance(obj, FakeGPS):
self.writers -= 1
elif isinstance(obj, gps.gps):
self.readers -= 1
self.index = min(len(self.runqueue)-1, self.index)
if self.threadlock:
self.threadlock.release()
def choose(self):
"Atomically get the next object scheduled to do something."
if self.threadlock:
self.threadlock.acquire()
chosen = self.index
self.index += 1
self.index %= len(self.runqueue)
if self.threadlock:
self.threadlock.release()
return self.runqueue[chosen]
def initialize(self, client, commands):
"Arrange for client to ship specified commands when it goes active."
client.enqueued = ""
if not self.threadlock:
client.send(commands)
else:
client.enqueued = commands
def start(self):
self.threadlock = threading.Lock()
threading.Thread(target=self.run)
# End
| gpl-2.0 |
hohe/scikit-rf | skrf/tlineFunctions.py | 4 | 13802 |
'''
.. module:: skrf.tlineFunctions
===============================================
tlineFunctions (:mod:`skrf.tlineFunctions`)
===============================================
This module provides functions related to transmission line theory.
Impedance and Reflection Coefficient
--------------------------------------
These functions relate basic tranmission line quantities such as
characteristic impedance, input impedance, reflection coefficient, etc.
Each function has two names. One is a long-winded but readable name and
the other is a short-hand variable-like names. Below is a table relating
these two names with each other as well as common mathematical symbols.
==================== ====================== ================================
Symbol Variable Name Long Name
==================== ====================== ================================
:math:`Z_l` z_l load_impedance
:math:`Z_{in}` z_in input_impedance
:math:`\Gamma_0` Gamma_0 reflection_coefficient
:math:`\Gamma_{in}` Gamma_in reflection_coefficient_at_theta
:math:`\\theta` theta electrical_length
==================== ====================== ================================
There may be a bit of confusion about the difference between the load
impedance the input impedance. This is because the load impedance **is**
the input impedance at the load. An illustration may provide some
useful reference.
Below is a (bad) illustration of a section of uniform transmission line
of characteristic impedance :math:`Z_0`, and electrical length
:math:`\\theta`. The line is terminated on the right with some
load impedance, :math:`Z_l`. The input impedance :math:`Z_{in}` and
input reflection coefficient :math:`\\Gamma_{in}` are
looking in towards the load from the distance :math:`\\theta` from the
load.
.. math::
Z_0, \\theta
\\text{o===============o=}[Z_l]
\\to\\qquad\\qquad\\qquad\\quad\\qquad \\qquad \\to \\qquad \\quad
Z_{in},\\Gamma_{in}\\qquad\\qquad\\qquad\\qquad\\quad Z_l,\\Gamma_0 \\qquad
So, to clarify the confusion,
.. math::
Z_{in}= Z_{l},\\qquad\\qquad
\\Gamma_{in}=\\Gamma_l \\text{ at } \\theta=0
Short names
+++++++++++++
.. autosummary::
:toctree: generated/
theta
zl_2_Gamma0
Gamma0_2_zl
zl_2_zin
zl_2_Gamma_in
Gamma0_2_Gamma_in
Gamma0_2_zin
Long-names
++++++++++++++
.. autosummary::
:toctree: generated/
distance_2_electrical_length
electrical_length_2_distance
reflection_coefficient_at_theta
reflection_coefficient_2_input_impedance
reflection_coefficient_2_input_impedance_at_theta
input_impedance_at_theta
load_impedance_2_reflection_coefficient
load_impedance_2_reflection_coefficient_at_theta
Distributed Circuit and Wave Quantities
----------------------------------------
.. autosummary::
:toctree: generated/
distributed_circuit_2_propagation_impedance
propagation_impedance_2_distributed_circuit
Transmission Line Physics
---------------------------------
.. autosummary::
:toctree: generated/
skin_depth
surface_resistivity
'''
import numpy as npy
from numpy import pi, sqrt, exp, array,tan,sin,cos,inf, log, real,imag,\
interp, linspace, shape,zeros, reshape
from scipy.constants import mu_0
from . import mathFunctions as mf
INF = 1e99
ONE = 1.0 + 1/1e14
def skin_depth(f,rho, mu_r):
'''
the skin depth for a material.
see www.microwaves101.com for more info.
Parameters
----------
f : number or array-like
frequency, in Hz
rho : number of array-like
bulk resistivity of material, in ohm*m
mu_r : number or array-like
relative permeability of material
Returns
----------
skin depth : number or array-like
the skin depth, in m
References
--------------
.. [1] http://en.wikipedia.org/wiki/Skin_effect
'''
return sqrt(rho/(pi*f*mu_r*mu_0))
def surface_resistivity(f,rho,mu_r):
'''
surface resistivity.
see www.microwaves101.com for more info.
Parameters
----------
f : number or array-like
frequency, in Hz
rho : number or array-like
bulk resistivity of material, in ohm*m
mu_r : number or array-like
relative permeability of material
Returns
----------
surface resistivity: ohms/square
'''
return rho/skin_depth(rho=rho,f = f, mu_r=mu_r)
def distributed_circuit_2_propagation_impedance( distributed_admittance,\
distributed_impedance):
'''
Converts distrubuted circuit values to wave quantities.
This converts complex distributed impedance and admittance to
propagation constant and characteristic impedance. The relation is
.. math::
Z_0 = \\sqrt{ \\frac{Z^{'}}{Y^{'}}}
\\quad\\quad
\\gamma = \\sqrt{ Z^{'} Y^{'}}
Parameters
------------
distributed_admittance : number, array-like
distributed admittance
distributed_impedance : number, array-like
distributed impedance
Returns
----------
propagation_constant : number, array-like
distributed impedance
characteristic_impedance : number, array-like
distributed impedance
See Also
----------
propagation_impedance_2_distributed_circuit : opposite conversion
'''
propagation_constant = \
sqrt(distributed_impedance*distributed_admittance)
characteristic_impedance = \
sqrt(distributed_impedance/distributed_admittance)
return (propagation_constant, characteristic_impedance)
def propagation_impedance_2_distributed_circuit(propagation_constant, \
characteristic_impedance):
'''
Converts wave quantities to distributed circuit values.
Converts complex propagation constant and characteristic impedance
to distributed impedance and admittance. The relation is,
.. math::
Z^{'} = \\gamma Z_0 \\quad\\quad
Y^{'} = \\frac{\\gamma}{Z_0}
Parameters
------------
propagation_constant : number, array-like
distributed impedance
characteristic_impedance : number, array-like
distributed impedance
Returns
----------
distributed_admittance : number, array-like
distributed admittance
distributed_impedance : number, array-like
distributed impedance
See Also
----------
distributed_circuit_2_propagation_impedance : opposite conversion
'''
distributed_admittance = propagation_constant/characteristic_impedance
distributed_impedance = propagation_constant*characteristic_impedance
return (distributed_admittance,distributed_impedance)
def electrical_length(gamma, f , d, deg=False):
'''
Calculates the electrical length of a section of transmission line.
.. math::
\\theta = \\gamma(f) \\cdot d
Parameters
----------
gamma : function
propagation constant function, which takes frequency in hz as a
sole argument. see Notes.
l : number or array-like
length of line, in meters
f : number or array-like
frequency at which to calculate
deg : Boolean
return in degrees or not.
Returns
----------
theta : number or array-like
electrical length in radians or degrees, depending on value of
deg.
See Also
-----------
electrical_length_2_distance : opposite conversion
Notes
------
the convention has been chosen that forward propagation is
represented by the positive imaginary part of the value returned by
the gamma function
'''
# typecast to a 1D array
f = array(f, dtype=float).reshape(-1)
d = array(d, dtype=float).reshape(-1)
if deg == False:
return gamma(f)*d
elif deg == True:
return mf.radian_2_degree(gamma(f)*d )
def electrical_length_2_distance(theta, gamma, f0,deg=True):
'''
Convert electrical length to a physical distance.
.. math::
d = \\frac{\\theta}{\\gamma(f_0)}
Parameters
----------
theta : number or array-like
electical length. units depend on `deg` option
gamma : function
propagation constant function, which takes frequency in hz as a
sole argument. see Notes
f0 : number or array-like
frequency at which to calculate
deg : Boolean
return in degrees or not.
Returns
----------
d: physical distance
Notes
------
the convention has been chosen that forward propagation is
represented by the positive imaginary part of the value returned by
the gamma function
See Also
---------
distance_2_electrical_length: opposite conversion
'''
if deg == True:
theta = mf.degree_2_radian(theta)
return theta/imag(gamma(f0))
def load_impedance_2_reflection_coefficient(z0, zl):
'''
Returns the reflection coefficient for a given load impedance, and
characteristic impedance.
For a transmission line of characteristic impedance :math:`Z_0`
terminated with load impedance :math:`Z_l`, the complex reflection
coefficient is given by,
.. math::
\\Gamma = \\frac {Z_l - Z_0}{Z_l + Z_0}
Parameters
----------
z0 : number or array-like
characteristic impedance
zl : number or array-like
load impedance (aka input impedance)
Returns
--------
gamma : number or array-like
reflection coefficient
See Also
----------
Gamma0_2_zl : reflection coefficient to load impedance
Notes
------
inputs are typecasted to 1D complex array
'''
# typecast to a complex 1D array. this makes everything easier
z0 = array(z0, dtype=complex).reshape(-1)
zl = array(zl, dtype=complex).reshape(-1)
# handle singularity by numerically representing inf as big number
zl[(zl==npy.inf)] = INF
return ((zl -z0 )/(zl+z0))
def reflection_coefficient_2_input_impedance(z0,Gamma):
'''
calculates the input impedance given a reflection coefficient and
characteristic impedance
.. math::
Z_0 (\\frac {1 + \\Gamma}{1-\\Gamma})
Parameters
----------
Gamma : number or array-like
complex reflection coefficient
z0 : number or array-like
characteristic impedance
Returns
--------
zin : number or array-like
input impedance
'''
# typecast to a complex 1D array. this makes everything easier
Gamma = array(Gamma, dtype=complex).reshape(-1)
z0 = array(z0, dtype=complex).reshape(-1)
#handle singularity by numerically representing inf as close to 1
Gamma[(Gamma == 1)] = ONE
return z0*((1.0+Gamma )/(1.0-Gamma))
def reflection_coefficient_at_theta(Gamma0,theta):
'''
reflection coefficient at a given electrical length.
.. math::
\\Gamma_{in} = \\Gamma_0 e^{-2j\\theta}
Parameters
----------
Gamma0 : number or array-like
reflection coefficient at theta=0
theta : number or array-like
electrical length, (may be complex)
Returns
----------
Gamma_in : number or array-like
input reflection coefficient
'''
Gamma0 = array(Gamma0, dtype=complex).reshape(-1)
theta = array(theta, dtype=complex).reshape(-1)
return Gamma0 * exp(-2j* theta)
def input_impedance_at_theta(z0,zl, theta):
'''
input impedance of load impedance zl at a given electrical length,
given characteristic impedance z0.
Parameters
----------
z0 : characteristic impedance.
zl : load impedance
theta : electrical length of the line, (may be complex)
Returns
---------
'''
Gamma0 = load_impedance_2_reflection_coefficient(z0=z0,zl=zl)
Gamma_in = reflection_coefficient_at_theta(Gamma0=Gamma0, theta=theta)
return reflection_coefficient_2_input_impedance(z0=z0, Gamma=Gamma_in)
def load_impedance_2_reflection_coefficient_at_theta(z0, zl, theta):
Gamma0 = load_impedance_2_reflection_coefficient(z0=z0,zl=zl)
Gamma_in = reflection_coefficient_at_theta(Gamma0=Gamma0, theta=theta)
return Gamma_in
def reflection_coefficient_2_input_impedance_at_theta(z0, Gamma0, theta):
'''
calculates the input impedance at electrical length theta, given a
reflection coefficient and characteristic impedance of the medium
Parameters
----------
z0 - characteristic impedance.
Gamma: reflection coefficient
theta: electrical length of the line, (may be complex)
returns
zin: input impedance at theta
'''
Gamma_in = reflection_coefficient_at_theta(Gamma0=Gamma0, theta=theta)
zin = reflection_coefficient_2_input_impedance(z0=z0,Gamma=Gamma_in)
return zin
# short hand convenience.
# admittedly these follow no logical naming scheme, but they closely
# correspond to common symbolic conventions, and are convenient
theta = electrical_length
distance_2_electrical_length = electrical_length
zl_2_Gamma0 = load_impedance_2_reflection_coefficient
Gamma0_2_zl = reflection_coefficient_2_input_impedance
zl_2_zin = input_impedance_at_theta
zl_2_Gamma_in = load_impedance_2_reflection_coefficient_at_theta
Gamma0_2_Gamma_in = reflection_coefficient_at_theta
Gamma0_2_zin = reflection_coefficient_2_input_impedance_at_theta
| bsd-3-clause |
yashLadha/coala | tests/coalaTest.py | 5 | 7484 | import os
import re
import sys
import unittest
import unittest.mock
from pkg_resources import VersionConflict
from coalib.coala_main import run_coala
from coalib.output.printers.LogPrinter import LogPrinter
from coalib import assert_supported_version, coala
from pyprint.ConsolePrinter import ConsolePrinter
from coala_utils.ContextManagers import prepare_file
from coalib.output.Logging import configure_logging
from tests.TestUtilities import execute_coala, bear_test_module
class coalaTest(unittest.TestCase):
def setUp(self):
self.old_argv = sys.argv
def tearDown(self):
sys.argv = self.old_argv
def test_coala(self):
with bear_test_module(), \
prepare_file(['#fixme'], None) as (lines, filename):
retval, stdout, stderr = execute_coala(
coala.main,
'coala', '-c', os.devnull,
'-f', re.escape(filename),
'-b', 'LineCountTestBear')
self.assertIn('This file has 1 lines.',
stdout,
'The output should report count as 1 lines')
self.assertIn('During execution of coala', stderr)
self.assertNotEqual(retval, 0,
'coala must return nonzero when errors occured')
@unittest.mock.patch('sys.version_info', tuple((2, 7, 11)))
def test_python_version_27(self):
with self.assertRaises(SystemExit):
assert_supported_version()
self.assertEqual(cm.error_code, 4)
@unittest.mock.patch('sys.version_info', tuple((3, 3, 6)))
def test_python_version_33(self):
with self.assertRaises(SystemExit):
assert_supported_version()
self.assertEqual(cm.error_code, 4)
def test_python_version_34(self):
assert_supported_version()
def test_did_nothing(self):
retval, stdout, stderr = execute_coala(coala.main, 'coala', '-I',
'-S', 'cli.enabled=false')
self.assertEqual(retval, 2)
self.assertIn('Did you forget to give the `--files`', stderr)
self.assertFalse(stdout)
retval, stdout, stderr = execute_coala(coala.main, 'coala', '-I',
'-b', 'JavaTestBear', '-f',
'*.java',
'-S', 'cli.enabled=false')
self.assertEqual(retval, 2)
self.assertIn('Nothing to do.', stderr)
self.assertFalse(stdout)
def test_show_all_bears(self):
with bear_test_module():
retval, stdout, stderr = execute_coala(
coala.main, 'coala', '-B', '-I')
self.assertEqual(retval, 0)
# 7 bears plus 1 line holding the closing colour escape sequence.
self.assertEqual(len(stdout.strip().splitlines()), 8)
self.assertFalse(stderr)
def test_show_language_bears(self):
with bear_test_module():
retval, stdout, stderr = execute_coala(
coala.main, 'coala', '-B', '-l', 'java', '-I')
self.assertEqual(retval, 0)
# 2 bears plus 1 line holding the closing colour escape sequence.
self.assertEqual(len(stdout.splitlines()), 3)
self.assertFalse(stderr)
def test_show_capabilities_with_supported_language(self):
with bear_test_module():
retval, stdout, stderr = execute_coala(
coala.main, 'coala', '-p', 'R', '-I')
self.assertEqual(retval, 0)
self.assertEqual(len(stdout.splitlines()), 2)
self.assertFalse(stderr)
@unittest.mock.patch('coalib.parsing.DefaultArgParser.get_all_bears_names')
@unittest.mock.patch('coalib.collecting.Collectors.icollect_bears')
def test_version_conflict_in_collecting_bears(self, import_fn, _):
with bear_test_module():
import_fn.side_effect = VersionConflict('msg1', 'msg2')
retval, stdout, stderr = execute_coala(coala.main, 'coala', '-B')
self.assertEqual(retval, 13)
self.assertIn(('There is a conflict in the version of a '
'dependency you have installed'), stderr)
self.assertIn('pip install "msg2"', stderr)
self.assertFalse(stdout)
self.assertNotEqual(retval, 0,
'coala must return nonzero when errors occured')
@unittest.mock.patch('coalib.collecting.Collectors._import_bears')
def test_unimportable_bear(self, import_fn):
with bear_test_module():
import_fn.side_effect = SyntaxError
retval, stdout, stderr = execute_coala(coala.main, 'coala', '-B')
self.assertEqual(retval, 0)
self.assertIn('Unable to collect bears from', stderr)
self.assertIn('No bears to show.', stdout)
import_fn.side_effect = VersionConflict('msg1', 'msg2')
retval, stdout, stderr = execute_coala(coala.main, 'coala', '-B')
# Note that bear version conflicts don't give exitcode=13,
# they just give a warning with traceback in log_level debug.
self.assertEqual(retval, 0)
self.assertRegex(stderr,
'Unable to collect bears from .* because there '
'is a conflict with the version of a dependency '
'you have installed')
self.assertIn('pip install "msg2"', stderr)
self.assertIn('No bears to show.', stdout)
def test_run_coala_no_autoapply(self):
with bear_test_module(), \
prepare_file(['#fixme '], None) as (lines, filename):
self.assertEqual(
1,
len(run_coala(
console_printer=ConsolePrinter(),
log_printer=LogPrinter(),
arg_list=(
'-c', os.devnull,
'-f', re.escape(filename),
'-b', 'SpaceConsistencyTestBear',
'--apply-patches',
'-S', 'use_spaces=yeah'
),
autoapply=False
)[0]['cli'])
)
self.assertEqual(
0,
len(run_coala(
console_printer=ConsolePrinter(),
log_printer=LogPrinter(),
arg_list=(
'-c', os.devnull,
'-f', re.escape(filename),
'-b', 'SpaceConsistencyTestBear',
'--apply-patches',
'-S', 'use_spaces=yeah'
)
)[0]['cli'])
)
def test_logged_error_causes_non_zero_exitcode(self):
configure_logging()
with bear_test_module(), \
prepare_file(['#fixme '], None) as (lines, filename):
_, exitcode, _ = run_coala(
console_printer=ConsolePrinter(),
log_printer=LogPrinter(),
arg_list=(
'-c', os.devnull,
'-f', re.escape(filename),
'-b', 'ErrorTestBear'
),
autoapply=False
)
assert exitcode == 1
| agpl-3.0 |
cainmatt/django | django/middleware/common.py | 145 | 7274 | import logging
import re
from django import http
from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import PermissionDenied
from django.core.mail import mail_managers
from django.utils.cache import get_conditional_response, set_response_etag
from django.utils.encoding import force_text
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
response_redirect_class = http.HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
raise PermissionDenied('Forbidden user agent')
# Check for a redirect based on settings.PREPEND_WWW
host = request.get_host()
if settings.PREPEND_WWW and host and not host.startswith('www.'):
host = 'www.' + host
# Check if we also need to append a slash so we can do it all
# with a single redirect.
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
return self.response_redirect_class('%s://%s%s' % (request.scheme, host, path))
def should_redirect_with_slash(self, request):
"""
Return True if settings.APPEND_SLASH is True and appending a slash to
the request path turns an invalid path into a valid one.
"""
if settings.APPEND_SLASH and not request.get_full_path().endswith('/'):
urlconf = getattr(request, 'urlconf', None)
return (
not urlresolvers.is_valid_path(request.path_info, urlconf)
and urlresolvers.is_valid_path('%s/' % request.path_info, urlconf)
)
return False
def get_full_path_with_slash(self, request):
"""
Return the full path of the request with a trailing slash appended.
Raise a RuntimeError if settings.DEBUG is True and request.method is
GET, PUT, or PATCH.
"""
new_path = request.get_full_path(force_append_slash=True)
if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'):
raise RuntimeError(
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django settings." % {
'method': request.method,
'url': request.get_host() + new_path,
}
)
return new_path
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
When the status code of the response is 404, it may redirect to a path
with an appended slash if should_redirect_with_slash() returns True.
"""
# If the given URL is "Not Found", then check if we should redirect to
# a path with a slash appended.
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.get_full_path_with_slash(request))
if settings.USE_ETAGS:
if not response.has_header('ETag'):
set_response_etag(response)
if response.has_header('ETag'):
return get_conditional_response(
request,
etag=response['ETag'],
response=response,
)
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = force_text(request.META.get('HTTP_USER_AGENT', '<none>'), errors='replace')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Return True if the given request *shouldn't* notify the site managers
according to project settings or in three specific situations:
- If the referer is empty.
- If a '?' in referer is identified as a search engine source.
- If the referer is equal to the current URL (assumed to be a
malicious bot).
"""
full_url = "%s://%s/%s" % (request.scheme, domain, uri.lstrip('/'))
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer) or
(referer == uri or referer == full_url)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause |
akash1808/nova_test_latest | nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py | 35 | 7367 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from oslo_utils import units
from oslo_vmware.objects import datastore as ds_obj
from nova import test
from nova.virt.vmwareapi import ds_util
ResultSet = collections.namedtuple('ResultSet', ['objects'])
ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('ManagedObjectReference', ['value'])
class VMwareDSUtilDatastoreSelectionTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareDSUtilDatastoreSelectionTestCase, self).setUp()
self.data = [
['VMFS', 'os-some-name', True, 'normal', 987654321, 12346789],
['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
['VMFS', 'some-name-good', False, 'normal', 987654321, 12346789],
['VMFS', 'new-name', True, 'inMaintenance', 987654321, 12346789]
]
def build_result_set(self, mock_data, name_list=None):
# datastores will have a moref_id of ds-000 and
# so on based on their index in the mock_data list
if name_list is None:
name_list = self.propset_name_list
objects = []
for id, row in enumerate(mock_data):
obj = ObjectContent(
obj=MoRef(value="ds-%03d" % id),
propSet=[])
for index, value in enumerate(row):
obj.propSet.append(
DynamicProperty(name=name_list[index], val=row[index]))
objects.append(obj)
return ResultSet(objects=objects)
@property
def propset_name_list(self):
return ['summary.type', 'summary.name', 'summary.accessible',
'summary.maintenanceMode', 'summary.capacity',
'summary.freeSpace']
def test_filter_datastores_simple(self):
datastores = self.build_result_set(self.data)
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores, best_match)
self.assertIsNotNone(rec.ref, "could not find datastore!")
self.assertEqual('ds-001', rec.ref.value,
"didn't find the right datastore!")
self.assertEqual(123467890, rec.freespace,
"did not obtain correct freespace!")
def test_filter_datastores_empty(self):
data = []
datastores = self.build_result_set(data)
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores, best_match)
self.assertEqual(rec, best_match)
def test_filter_datastores_no_match(self):
datastores = self.build_result_set(self.data)
datastore_regex = re.compile('no_match.*')
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores,
best_match,
datastore_regex)
self.assertEqual(rec, best_match, "did not match datastore properly")
def test_filter_datastores_specific_match(self):
data = [
['VMFS', 'os-some-name', True, 'normal', 987654321, 1234678],
['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
['VMFS', 'some-name-good', True, 'normal', 987654321, 12346789],
['VMFS', 'some-other-good', False, 'normal', 987654321000,
12346789000],
['VMFS', 'new-name', True, 'inMaintenance', 987654321000,
12346789000]
]
# only the DS some-name-good is accessible and matches the regex
datastores = self.build_result_set(data)
datastore_regex = re.compile('.*-good$')
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores,
best_match,
datastore_regex)
self.assertIsNotNone(rec, "could not find datastore!")
self.assertEqual('ds-003', rec.ref.value,
"didn't find the right datastore!")
self.assertNotEqual('ds-004', rec.ref.value,
"accepted an unreachable datastore!")
self.assertEqual('some-name-good', rec.name)
self.assertEqual(12346789, rec.freespace,
"did not obtain correct freespace!")
self.assertEqual(987654321, rec.capacity,
"did not obtain correct capacity!")
def test_filter_datastores_missing_props(self):
data = [
['VMFS', 'os-some-name', 987654321, 1234678],
['NFS', 'another-name', 9876543210, 123467890],
]
# no matches are expected when 'summary.accessible' is missing
prop_names = ['summary.type', 'summary.name',
'summary.capacity', 'summary.freeSpace']
datastores = self.build_result_set(data, prop_names)
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores, best_match)
self.assertEqual(rec, best_match, "no matches were expected")
def test_filter_datastores_best_match(self):
data = [
['VMFS', 'spam-good', True, 20 * units.Gi, 10 * units.Gi],
['NFS', 'eggs-good', True, 40 * units.Gi, 15 * units.Gi],
['NFS41', 'nfs41-is-good', True, 35 * units.Gi, 12 * units.Gi],
['BAD', 'some-name-bad', True, 30 * units.Gi, 20 * units.Gi],
['VMFS', 'some-name-good', True, 50 * units.Gi, 5 * units.Gi],
['VMFS', 'some-other-good', True, 10 * units.Gi, 10 * units.Gi],
]
datastores = self.build_result_set(data)
datastore_regex = re.compile('.*-good$')
# the current best match is better than all candidates
best_match = ds_obj.Datastore(ref='ds-100', name='best-ds-good',
capacity=20 * units.Gi, freespace=19 * units.Gi)
rec = ds_util._select_datastore(None,
datastores,
best_match,
datastore_regex)
self.assertEqual(rec, best_match, "did not match datastore properly")
| apache-2.0 |
KDE/twine2 | kdelibs.py | 1 | 37607 | # -*- coding: utf-8 -*-
# Copyright 2009-2010 Simon Edwards <simon@simonzone.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import re
import toolkit
import kbindinggenerator.qtkdemacros
import os.path
import kbindinggenerator.sipsymboldata
outputBaseDirectory = "/home/sbe/devel/git/kde/kdebindings/pykde4"
cmakelistBaseDirectory = "/home/sbe/devel/git/kde/kdelibs"
cmakelistPimlibsBaseDirectory = "/home/sbe/devel/git/kde/kdepimlibs"
cmakelistPhononBaseDirectory = "/home/sbe/devel/git/phonon"
kdelibsBuildDirectory = "/home/sbe/devel/git_build/kde/kdelibs"
kdepimlibsBuildDirectory = "/home/sbe/devel/git_build/kde/kdepimlibs"
cmakelistGitBaseDirectory = "/home/sbe/devel/git"
polkitqtBaseDirectory = "/home/sbe/devel/git/polkit-qt"
sipImportDir = "/home/sbe/devel/kdesvninstall/share/sip/PyQt4"
###########################################################################
kdecore = toolkit.ModuleGenerator(
module="PyKDE4.kdecore",
outputDirectory=os.path.join(outputBaseDirectory, "sip/kdecore"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kdecore"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kdecore/Mainpage.dox"),
# .h file extraction
cmakelists=os.path.join(cmakelistBaseDirectory,"kdecore/CMakeLists.txt"),
ignoreHeaders="""conversion_check.h kallocator.h kdebug.h kcodecs.h kgenericfactory.h ksortablelist.h ktrader.h ktypelist.h kmulticastsocket.h kmulticastsocketdevice.h kdecore_export.h kde_file.h ksocks.h kde_file.h ksharedptr.h klauncher_iface.h k3bufferedsocket.h k3clientsocketbase.h k3datagramsocket.h k3httpproxysocketdevice.h k3iobuffer.h k3processcontroller.h k3process.h k3procio.h k3resolver.h k3reverseresolver.h k3serversocket.h k3socketaddress.h k3socketbase.h k3socketdevice.h k3socks.h k3sockssocketdevice.h k3streamsocket.h qtest_kde.h kdefakes.h kdeversion.h kauth.h ktypelistutils.h ktypetraits.h karchive.h kar.h ktar.h kzip.h kshareddatacache.h kmountpoint.h kdirwatch.h karchive_export.h""".split(" "),
noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KDECORE_EXPORT","KDE_EXPORT","KIO_EXPORT","KDE_DEPRECATED", "KDECORE_EXPORT_DEPRECATED", "KARCHIVE_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtNetwork/QtNetworkmod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KDECORE_EXPORT","KDE_EXPORT","KIO_EXPORT","KDECORE_EXPORT_DEPRECATED","KARCHIVE_EXPORT"],
ignoreBases=[],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="*",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="pParent",
annotations="TransferThis")
]
)
###########################################################################
kdeui = toolkit.ModuleGenerator(
module="PyKDE4.kdeui",
outputDirectory=os.path.join(outputBaseDirectory,"sip/kdeui"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kdeui"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kdeui/Mainpage.dox"),
# .h file extraction
cmakelists=[
os.path.join(cmakelistBaseDirectory,"kdeui/CMakeLists.txt")
#os.path.join(cmakelistBaseDirectory,"kdeui/dialogs/CMakeLists.txt"),
#os.path.join(cmakelistBaseDirectory,"kdeui/util/CMakeLists.txt"),
#os.path.join(cmakelistBaseDirectory,"kdeui/widgets/CMakeLists.txt")
],
ignoreHeaders="""kxerrorhandler.h k3iconview.h k3iconviewsearchline.h k3listview.h k3listviewlineedit.h k3listviewsearchline.h netwm_p.h k3mimesourcefactory.h kdeui_export.h fixx11h.h kglobalshortcutinfo_p.h kkeyserver_mac.h kkeyserver_win.h kimagecache.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KDEUI_EXPORT","KDE_EXPORT","KDE_DEPRECATED","KDEUI_EXPORT_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","QtSvg/QtSvgmod.sip","kdecore/kdecoremod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KDEUI_EXPORT","KDE_EXPORT","KDEUI_EXPORT_DEPRECATED"],
ignoreBases=["Q3GridView"],
noCTSCC=["KWindowSystem","NETRootInfo","NETWinInfo"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer"),
toolkit.PySlotRule(className="KDialogButtonBox",arg1Name="receiver",arg2Name="slot"),
toolkit.PySlotRule(namespaceName="KStandardAction",arg1Name="recvr",arg2Name="slot")
]
)
###########################################################################
kio = toolkit.ModuleGenerator(
module="PyKDE4.kio",
outputDirectory=os.path.join(outputBaseDirectory,"sip/kio"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kio"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kio/Mainpage.dox"),
# .h file extraction
cmakelists=[
os.path.join(cmakelistBaseDirectory,"kio/CMakeLists.txt"),
os.path.join(cmakelistBaseDirectory,"kfile/CMakeLists.txt")
],
headers=[os.path.join(cmakelistBaseDirectory,"kdecore/io/karchive.h"),
os.path.join(cmakelistBaseDirectory,"kdecore/io/kar.h"),
os.path.join(cmakelistBaseDirectory,"kdecore/io/ktar.h"),
os.path.join(cmakelistBaseDirectory,"kdecore/io/kzip.h")],
ignoreHeaders="""http_slave_defaults.h ioslave_defaults.h kmimetyperesolver.h k3mimetyperesolver.h kfiledetailview.h kfileiconview.h kfiletreeview.h kfiletreeviewitem.h ksslpemcallback.h kpropsdialog.h kio_export.h kdirnotify.h k3filedetailview.h k3fileiconview.h k3filetreeview.h k3filetreeviewitem.h k3mimetyperesolver.h kfiletreebranch.h kfile_export.h kurlbar.h kdebug.h kdebugdbusiface_p.h kdirwatch_p.h klimitediodevice_p.h kprocess_p.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1,"Q_OS_UNIX": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KDECORE_EXPORT","KDECORE_EXPORT_DEPRECATED","KIO_EXPORT",
"KFILE_EXPORT","KIO_EXPORT_DEPRECATED","KDE_NO_EXPORT","KDE_EXPORT","KDE_DEPRECATED",
"KDEUI_EXPORT_DEPRECATED","KIO_CONNECTION_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","kdecore/kdecoremod.sip","kdeui/kdeuimod.sip","solid/solidmod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KDECORE_EXPORT","KDECORE_EXPORT_DEPRECATED","KIO_EXPORT","KFILE_EXPORT","KDE_EXPORT","KDEUI_EXPORT_DEPRECATED",
"KIO_CONNECTION_EXPORT","KIO_EXPORT_DEPRECATED"],
#ignoreBases=["Q3GridView"],
noCTSCC=["KonqBookmarkContextMenu","KImportedBookmarkMenu","KBookmark","KBookmarkGroup"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
kutils = toolkit.ModuleGenerator(
module="PyKDE4.kutils",
outputDirectory=os.path.join(outputBaseDirectory,"sip/kutils"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kutils"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kutils/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"kutils/CMakeLists.txt")],
ignoreHeaders="""kcmodulecontainer.h kutils_export.h kcmutils_export.h kemoticons_export.h kidletime_export.h kprintutils_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KUTILS_EXPORT","KDE_EXPORT","KDE_DEPRECATED","KCMUTILS_EXPORT","KEMOTICONS_EXPORT","KIDLETIME_EXPORT","KPRINTUTILS_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","kdecore/kdecoremod.sip","kdeui/kdeuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KUTILS_EXPORT","KDE_EXPORT","KCMUTILS_EXPORT","KEMOTICONS_EXPORT","KIDLETIME_EXPORT","KPRINTUTILS_EXPORT"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
solid = toolkit.ModuleGenerator(
module="PyKDE4.solid",
outputDirectory=os.path.join(outputBaseDirectory,"sip/solid"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/solid"),
mainDocs=os.path.join(cmakelistBaseDirectory,"solid/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"solid/solid/CMakeLists.txt")],
ignoreHeaders="""solid_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["SOLID_EXPORT","KDE_EXPORT","KDE_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","kdecore/kdecoremod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["SOLID_EXPORT","KDE_EXPORT"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
kparts = toolkit.ModuleGenerator(
module="PyKDE4.kparts",
outputDirectory=os.path.join(outputBaseDirectory,"sip/kparts"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kparts"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kparts/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"kparts/CMakeLists.txt")],
ignoreHeaders="""componentfactory.h genericfactory.h kparts_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KPARTS_EXPORT","KDE_EXPORT","KDE_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","kdecore/kdecoremod.sip","kdeui/kdeuimod.sip","kio/kiomod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KPARTS_EXPORT","KDE_EXPORT"],
noCTSCC=["GenericFactoryBase"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
plasma = toolkit.ModuleGenerator(
module="PyKDE4.plasma",
outputDirectory=os.path.join(outputBaseDirectory,"sip/plasma"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/plasma"),
mainDocs=os.path.join(cmakelistBaseDirectory,"plasma/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"plasma/CMakeLists.txt")],
ignoreHeaders="""plasma_export.h credentials.h """.split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1, "QT_VERSION": 0x040600},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["PLASMA_EXPORT","PLASMA_EXPORT_DEPRECATED","KDE_EXPORT",
"KDE_DEPRECATED","Q_INVOKABLE"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"QtGui/QtGuimod.sip",
"QtNetwork/QtNetworkmod.sip",
"QtSvg/QtSvgmod.sip",
"QtWebKit/QtWebKitmod.sip",
"QtXml/QtXmlmod.sip",
"QtDeclarative/QtDeclarativemod.sip",
"QtScript/QtScriptmod.sip",
"kdecore/kdecoremod.sip",
"kdeui/kdeuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["PLASMA_EXPORT","PLASMA_EXPORT_DEPRECATED","KDE_EXPORT"],
#noCTSCC=["GenericFactoryBase"],
ignoreBases=["QSharedData","KShared","QList<KUrl>"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*","QGraphicsWidget*"],
parameterNameMatch=["parent","pParent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*","QGraphicsWidget*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
khtml = toolkit.ModuleGenerator(
module="PyKDE4.khtml",
outputDirectory=os.path.join(outputBaseDirectory,"sip/khtml"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/khtml"),
mainDocs=os.path.join(cmakelistBaseDirectory,"khtml/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"khtml/CMakeLists.txt"),
#os.path.join(cmakelistBaseDirectory,"khtml/dom/CMakeLists.txt")
],
ignoreHeaders="""khtmldefaults.h dom_core.h dom_html.h khtml_events.h khtml_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KHTML_EXPORT","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"QtGui/QtGuimod.sip",
"QtXml/QtXmlmod.sip",
"kdecore/kdecoremod.sip",
"kdeui/kdeuimod.sip",
"kio/kiomod.sip",
"kutils/kutilsmod.sip",
"kparts/kpartsmod.sip",],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KHTML_EXPORT","KDE_EXPORT"],
noCTSCC=["CSSRule","CSSCharsetRule","CSSFontFaceRule","CSSImportRule","CSSMediaRule","CSSPageRule",
"CSSStyleRule","CSSUnknownRule","CSSStyleSheet","CSSPrimitiveValue","CSSValueList","CSSNamespaceRule"],
ignoreBases=["khtml::KHTMLWidget"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
def KNewStuffMapper(mod,headerName):
print("KNewStuffMapper: "+headerName)
filename = os.path.basename(headerName)
if filename.endswith(".h"):
sipName = filename[:-2]+".sip"
if "knewstuff3" in headerName:
return "knewstuff3_"+sipName
else:
return sipName
return filename
def KNewStuffCppHeaderMapper(mod,filename):
if "knewstuff3" in filename:
return "knewstuff3/" + os.path.basename(filename)
else:
return os.path.basename(filename)
knewstuff = toolkit.ModuleGenerator(
module="PyKDE4.knewstuff",
outputDirectory=os.path.join(outputBaseDirectory,"sip/knewstuff"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/knewstuff"),
mainDocs=os.path.join(cmakelistBaseDirectory,"knewstuff/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"knewstuff/CMakeLists.txt"),
os.path.join(cmakelistBaseDirectory,"knewstuff/knewstuff2/CMakeLists.txt"),
os.path.join(cmakelistBaseDirectory,"knewstuff/knewstuff3/CMakeLists.txt")],
ignoreHeaders="""knewstuff_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KNEWSTUFF_EXPORT","KNEWSTUFF_EXPORT_DEPRECATED","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"QtGui/QtGuimod.sip",
"QtXml/QtXmlmod.sip",
"kdecore/kdecoremod.sip",
"kdeui/kdeuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KNEWSTUFF_EXPORT","KNEWSTUFF_EXPORT_DEPRECATED","KDE_EXPORT"],
#noCTSCC=[],
#ignoreBases=["khtml::KHTMLWidget"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
],
filenameMappingFunction=KNewStuffMapper,
cppHeaderMappingFunction=KNewStuffCppHeaderMapper
)
###########################################################################
dnssd = toolkit.ModuleGenerator(
module="PyKDE4.dnssd",
outputDirectory=os.path.join(outputBaseDirectory,"sip/dnssd"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/dnssd"),
mainDocs=os.path.join(cmakelistBaseDirectory,"dnssd/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"dnssd/CMakeLists.txt")],
ignoreHeaders="""dnssd_export.h settings.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KDNSSD_EXPORT","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"QtGui/QtGuimod.sip",
"kdecore/kdecoremod.sip",
"kdeui/kdeuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KDNSSD_EXPORT","KDE_EXPORT"],
#noCTSCC=[],
#ignoreBases=["khtml::KHTMLWidget"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
nepomuk = toolkit.ModuleGenerator(
module="PyKDE4.nepomuk",
outputDirectory=os.path.join(outputBaseDirectory,"sip/nepomuk"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/nepomuk"),
mainDocs=os.path.join(cmakelistBaseDirectory,"nepomuk/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"nepomuk/CMakeLists.txt"),
os.path.join(cmakelistBaseDirectory,"nepomuk/query/CMakeLists.txt")],
headers = [os.path.join(kdelibsBuildDirectory,"nepomuk",x)
for x in "ncal.h nco.h ndo.h nfo.h nie.h nmm.h nuao.h pimo.h tmo.h".split(" ")],
ignoreHeaders="""nepomuk_export.h ontologyloader.h desktopontologyloader.h fileontologyloader.h ontologymanager.h nepomukontologyloader.h nepomukquery_export.h kmetadatatagwidget.h ncal.h nco.h ndo.h nexif.h nfo.h nie.h nmm.h nmo.h nuao.h pimo.h tmo.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["NEPOMUK_EXPORT","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE","NEPOMUKQUERY_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"kdecore/kdecoremod.sip",
"soprano/sopranomod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["NEPOMUK_EXPORT","KDE_EXPORT","NEPOMUKQUERY_EXPORT"],
noCTSCC=["Term","GroupTerm","AndTerm","OrTerm","LiteralTerm","ResourceTerm","SimpleTerm","ComparisonTerm","ResourceTypeTerm","NegationTerm","OptionalTerm","FileQuery"],
#ignoreBases=["khtml::KHTMLWidget"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
soprano = toolkit.ModuleGenerator(
module="PyKDE4.soprano",
outputDirectory=os.path.join(outputBaseDirectory,"sip/soprano"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/soprano"),
mainDocs=os.path.join(cmakelistGitBaseDirectory,"soprano/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistGitBaseDirectory,"soprano/CMakeLists.txt"),
os.path.join(cmakelistGitBaseDirectory,"soprano/soprano/CMakeLists.txt"),
os.path.join(cmakelistGitBaseDirectory,"soprano/server/CMakeLists.txt"),
#os.path.join(cmakelistGitBaseDirectory,"soprano/server/sparql/CMakeLists.txt"),
os.path.join(cmakelistGitBaseDirectory,"soprano/server/dbus/CMakeLists.txt")],
ignoreHeaders="""soprano_export.h sopranomacros.h soprano.h vocabulary.h iterator.h version.h iteratorbackend.h""".split(" "),
#noUpdateSip=["iterator.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1, "USING_SOPRANO_NRLMODEL_UNSTABLE_API":1, "QT_VERSION": 0x040700},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["SOPRANO_EXPORT","SOPRANO_CLIENT_EXPORT","SOPRANO_SERVER_EXPORT",
"USING_SOPRANO_NRLMODEL_UNSTABLE_API","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE",
"SOPRANO_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtNetwork/QtNetworkmod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["SOPRANO_EXPORT","SOPRANO_CLIENT_EXPORT","SOPRANO_SERVER_EXPORT","KDE_EXPORT"],
#noCTSCC=[],
ignoreBases=["IteratorBackend<BindingSet>","Iterator<Node>","Iterator<BindingSet>","Iterator<Statement>"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
akonadi = toolkit.ModuleGenerator(
module="PyKDE4.akonadi",
outputDirectory=os.path.join(outputBaseDirectory,"sip/akonadi"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/akonadi"),
mainDocs=os.path.join(cmakelistPimlibsBaseDirectory,"akonadi/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistPimlibsBaseDirectory,"akonadi/CMakeLists.txt"),
os.path.join(cmakelistPimlibsBaseDirectory,"akonadi/kmime/CMakeLists.txt"),
os.path.join(cmakelistPimlibsBaseDirectory,"akonadi/kabc/CMakeLists.txt")],
ignoreHeaders="""akonadi_export.h akonadi-kmime_export.h akonadi-kabc_export.h itempayloadinternals_p.h collectionpathresolver_p.h qtest_akonadi.h exception.h contactparts.h cachepolicypage.h resourcebasesettings.h dbusconnectionpool.h """.split(" "),
#addressee.h kabc_export.h
headers=[os.path.join(kdepimlibsBuildDirectory,"akonadi/resourcebasesettings.h")],
# headers=[
# os.path.join(kdepimlibsBuildDirectory, "addressee.h")],
#resourcebase.h agentbase.h
#noUpdateSip=["iterator.sip"],
ignoreBases=["QDBusContext"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros( \
[(re.compile(r'Latin1\( "ISO-8859-1" \)'),r'Latin1'),
(re.compile(r'kmime_mk_trivial_ctor\(\s*(\w+)\s*\)'),r'public: explicit \1( Content *parent = 0 ); \1( Content *parent, const QByteArray &s ); \1( Content *parent, const QString &s, const QByteArray &charset ); ~\1();'),
(re.compile(r'kmime_mk_dptr_ctor\(\s*(\w+)\s*\)'), r'protected: explicit \1( \1::Private *d, KMime::Content *parent = 0 );'),
(re.compile(r'kmime_mk_trivial_ctor_with_name\(\s*(\w+)\s*\)'),r'public: explicit \1( Content *parent = 0 ); \1( Content *parent, const QByteArray &s ); \1( Content *parent, const QString &s, const QByteArray &charset ); ~\1();const char *type() const; static const char *staticType();'),
]),
#[(re.compile(r'AKONADI_COLLECTION_PROPERTIES_PAGE_FACTORY\s*\(\s*(\S+)\s*,\s*(\w+)\s*\)'),r'']),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(["AKONADI_DECLARE_PRIVATE"]),
bareMacros=qtkdemacros.QtBareMacros(["AKONADI_EXPORT","AKONADI_EXPORT_DEPRECATED","KDE_EXPORT",
"KDE_DEPRECATED","Q_INVOKABLE","KABC_EXPORT","KABC_EXPORT_DEPRECATED","AKONADI_KABC_EXPORT","AKONADI_KMIME_EXPORT","AKONADI_KMIME_EXPORT_DEPRECATED","KMIME_EXPORT","KMIME_EXPORT_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","kdeui/kdeuimod.sip","kdecore/kdecoremod.sip","kio/kiomod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["AKONADI_EXPORT","AKONADI_KABC_EXPORT","AKONADI_KMIME_EXPORT","KDE_EXPORT","AKONADI_EXPORT_DEPRECATED","AKONADI_KMIME_EXPORT_DEPRECATED","KABC_EXPORT","KABC_EXPORT_DEPRECATED","KMIME_EXPORT","KMIME_EXPORT_DEPRECATED"],
noCTSCC=["Collection","Entity","Item"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
polkitqt = toolkit.ModuleGenerator(
module="PyKDE4.polkitqt",
outputDirectory=os.path.join(outputBaseDirectory,"sip/polkitqt"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/polkitqt"),
mainDocs=os.path.join(polkitqtBaseDirectory,"Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(polkitqtBaseDirectory,"CMakeLists.txt")],
ignoreHeaders="""export.h polkitqtversion.h""".split(" "),
#resourcebase.h agentbase.h
#noUpdateSip=["iterator.sip"],
#ignoreBases=["QDBusContext"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["POLKIT_QT_EXPORT","POLKITQT1_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["POLKIT_QT_EXPORT","KDE_EXPORT"],
#noCTSCC=[],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
phonon = toolkit.ModuleGenerator(
module="PyKDE4.phonon",
outputDirectory=os.path.join(outputBaseDirectory,"sip/phonon"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/phonon"),
mainDocs=os.path.join(cmakelistPhononBaseDirectory,"Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistPhononBaseDirectory,"phonon/CMakeLists.txt")],
ignoreHeaders="""phonondefs.h phonon_export.h export.h kaudiodevicelist_export.h phononnamespace.h addoninterface.h volumefaderinterface.h backendinterface.h effectinterface.h mediaobjectinterface.h platformplugin.h audiodataoutputinterface.h audiooutputinterface.h""".split(" "),
noUpdateSip=["phononnamespace.sip"],
ignoreBases=["QSharedData"],
#ignoreBases=["AbstractAudioOutput", "Phonon::AbstractAudioOutput", "QSharedData", "AbstractVideoOutput",
# "Phonon::AbstractVideoOutput"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1, "QT_VERSION": "0x040400", "_MSC_VER": 0},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["PHONON_EXPORT","PHONONEXPERIMENTAL_EXPORT", "PHONON_DEPRECATED",
"PHONON_EXPORT_DEPRECATED", "KAUDIODEVICELIST_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","solid/solidmod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["PHONON_EXPORT", "KDE_EXPORT", "PHONONEXPERIMENTAL_EXPORT", "KAUDIODEVICELIST_EXPORT", "PHONON_DEPRECATED", "PHONON_EXPORT_DEPRECATED"],
#noCTSCC=[],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
def updateSIP():
kdecore.run()
plasma.run()
kdeui.run()
kio.run()
kutils.run()
solid.run()
kparts.run()
khtml.run()
knewstuff.run()
dnssd.run()
nepomuk.run()
soprano.run()
akonadi.run()
polkitqt.run()
phonon.run()
def updateDocs():
classNames = []
nsNames = []
def UpdateClassNamespaceList(moduleName,sipScopes):
nsNames.append( (moduleName,'global', 'global') )
def ExtractClassNamespace(scope):
for item in scope:
if isinstance(item,sipsymboldata.SymbolData.SipClass):
classNames.append( (moduleName, item.fqPythonName(), item.fqPythonName()) )
ExtractClassNamespace(item)
elif isinstance(item,sipsymboldata.SymbolData.Namespace):
nsTuple = (moduleName,item.fqPythonName(),item.fqPythonName())
if nsTuple not in nsNames:
nsNames.append( nsTuple )
ExtractClassNamespace(item)
for scope in sipScopes:
ExtractClassNamespace(scope)
UpdateClassNamespaceList('kdecore',kdecore.docs())
UpdateClassNamespaceList('plasma',plasma.docs())
UpdateClassNamespaceList('kdeui',kdeui.docs())
UpdateClassNamespaceList('kio',kio.docs())
UpdateClassNamespaceList('kutils',kutils.docs())
UpdateClassNamespaceList('solid',solid.docs())
UpdateClassNamespaceList('kparts',kparts.docs())
UpdateClassNamespaceList('khtml',khtml.docs())
UpdateClassNamespaceList('knewstuff',knewstuff.docs())
UpdateClassNamespaceList('dnssd',dnssd.docs())
UpdateClassNamespaceList('nepomuk',nepomuk.docs())
UpdateClassNamespaceList('soprano',soprano.docs())
UpdateClassNamespaceList('akonadi',akonadi.docs())
UpdateClassNamespaceList('polkitqt',polkitqt.docs())
UpdateClassNamespaceList('phonon',phonon.docs())
print("Writing all classes index:")
toolkit.ModuleGenerator.WriteAllClasses(os.path.join(outputBaseDirectory,"docs/html"),nsNames,classNames)
print("Done")
def main():
updateSIP()
updateDocs()
if __name__=="__main__":
main()
| lgpl-3.0 |
josiah-wolf-oberholtzer/supriya | tests/nonrealtime/test_nonrealtime_Session_zero_duration.py | 1 | 2475 | import pytest
import supriya.assets.synthdefs
import supriya.nonrealtime
import supriya.synthdefs
import supriya.ugens
def test_manual_with_gate():
session = supriya.nonrealtime.Session(0, 2)
with session.at(0):
group = session.add_group(duration=4)
for i in range(4):
with session.at(i):
group.add_synth(duration=0)
d_recv_commands = pytest.helpers.build_d_recv_commands(
[supriya.assets.synthdefs.default]
)
assert session.to_lists(duration=5) == [
[
0.0,
[
*d_recv_commands,
["/g_new", 1000, 0, 0],
["/s_new", "da0982184cc8fa54cf9d288a0fe1f6ca", 1001, 0, 1000],
["/n_set", 1001, "gate", 0],
],
],
[
1.0,
[
["/s_new", "da0982184cc8fa54cf9d288a0fe1f6ca", 1002, 0, 1000],
["/n_set", 1002, "gate", 0],
],
],
[
2.0,
[
["/s_new", "da0982184cc8fa54cf9d288a0fe1f6ca", 1003, 0, 1000],
["/n_set", 1003, "gate", 0],
],
],
[
3.0,
[
["/s_new", "da0982184cc8fa54cf9d288a0fe1f6ca", 1004, 0, 1000],
["/n_set", 1004, "gate", 0],
],
],
[4.0, [["/n_free", 1000]]],
[5.0, [[0]]],
]
def test_manual_without_gate():
with supriya.synthdefs.SynthDefBuilder() as builder:
source = supriya.ugens.DC.ar(1)
supriya.ugens.Out.ar(bus=0, source=source)
source_synthdef = builder.build()
session = supriya.nonrealtime.Session(0, 1)
with session.at(0):
group = session.add_group(duration=4)
for i in range(4):
with session.at(i):
group.add_synth(duration=0, synthdef=source_synthdef)
assert session.to_lists(duration=10) == [
[
0.0,
[
["/d_recv", bytearray(source_synthdef.compile())],
["/g_new", 1000, 0, 0],
["/s_new", "7839f99c38c2ac4326388a013cdd643c", 1001, 0, 1000],
],
],
[1.0, [["/s_new", "7839f99c38c2ac4326388a013cdd643c", 1002, 0, 1000]]],
[2.0, [["/s_new", "7839f99c38c2ac4326388a013cdd643c", 1003, 0, 1000]]],
[3.0, [["/s_new", "7839f99c38c2ac4326388a013cdd643c", 1004, 0, 1000]]],
[4.0, [["/n_free", 1000]]],
[10.0, [[0]]],
]
| mit |
JGiola/swift | utils/gyb_syntax_support/TypeNodes.py | 10 | 8308 | from .Child import Child
from .Node import Node # noqa: I201
TYPE_NODES = [
# simple-type-identifier -> identifier generic-argument-clause?
Node('SimpleTypeIdentifier', kind='Type',
children=[
Child('Name', kind='Token', classification='TypeIdentifier',
token_choices=[
'IdentifierToken',
'CapitalSelfToken',
'AnyToken',
]),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# member-type-identifier -> type '.' identifier generic-argument-clause?
Node('MemberTypeIdentifier', kind='Type',
children=[
Child('BaseType', kind='Type'),
Child('Period', kind='Token',
token_choices=[
'PeriodToken',
'PrefixPeriodToken',
]),
Child('Name', kind='Token', classification='TypeIdentifier',
token_choices=[
'IdentifierToken',
'CapitalSelfToken',
'AnyToken',
]),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# class-restriction-type -> 'class'
Node('ClassRestrictionType', kind='Type',
children=[
Child('ClassKeyword', kind='ClassToken'),
]),
# array-type -> '[' type ']'
Node('ArrayType', kind='Type',
children=[
Child('LeftSquareBracket', kind='LeftSquareBracketToken'),
Child('ElementType', kind='Type'),
Child('RightSquareBracket', kind='RightSquareBracketToken'),
]),
# dictionary-type -> '[' type ':' type ']'
Node('DictionaryType', kind='Type',
children=[
Child('LeftSquareBracket', kind='LeftSquareBracketToken'),
Child('KeyType', kind='Type'),
Child('Colon', kind='ColonToken'),
Child('ValueType', kind='Type'),
Child('RightSquareBracket', kind='RightSquareBracketToken'),
]),
# metatype-type -> type '.' 'Type'
# | type '.' 'Protocol
Node('MetatypeType', kind='Type',
children=[
Child('BaseType', kind='Type'),
Child('Period', kind='PeriodToken'),
Child('TypeOrProtocol', kind='IdentifierToken',
text_choices=[
'Type',
'Protocol',
]),
]),
# optional-type -> type '?'
Node('OptionalType', kind='Type',
children=[
Child('WrappedType', kind='Type'),
Child('QuestionMark', kind='PostfixQuestionMarkToken'),
]),
# some type -> some 'type'
Node('SomeType', kind='Type',
children=[
Child('SomeSpecifier', kind='IdentifierToken',
classification='Keyword',
text_choices=['some']),
Child('BaseType', kind='Type'),
]),
# implicitly-unwrapped-optional-type -> type '!'
Node('ImplicitlyUnwrappedOptionalType', kind='Type',
children=[
Child('WrappedType', kind='Type'),
Child('ExclamationMark', kind='ExclamationMarkToken'),
]),
# composition-type-element -> type '&'
Node('CompositionTypeElement', kind='Syntax',
children=[
Child('Type', kind='Type'),
Child('Ampersand', kind='Token',
text_choices=['&'],
is_optional=True),
]),
# composition-typeelement-list -> composition-type-element
# composition-type-element-list?
Node('CompositionTypeElementList', kind='SyntaxCollection',
element='CompositionTypeElement'),
# composition-type -> composition-type-element-list
Node('CompositionType', kind='Type',
children=[
Child('Elements', kind='CompositionTypeElementList',
collection_element_name='Element'),
]),
# tuple-type-element -> identifier? ':'? type-annotation ','?
Node('TupleTypeElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('InOut', kind='InoutToken',
is_optional=True),
Child('Name', kind='Token',
is_optional=True,
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('SecondName', kind='Token',
is_optional=True,
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Type', kind='Type'),
Child('Ellipsis', kind='EllipsisToken',
is_optional=True),
Child('Initializer', kind='InitializerClause',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# tuple-type-element-list -> tuple-type-element tuple-type-element-list?
Node('TupleTypeElementList', kind='SyntaxCollection',
element='TupleTypeElement'),
# tuple-type -> '(' tuple-type-element-list ')'
Node('TupleType', kind='Type',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Elements', kind='TupleTypeElementList',
collection_element_name='Element'),
Child('RightParen', kind='RightParenToken'),
]),
# throwing-specifier -> 'throws' | 'rethrows'
# function-type -> attribute-list '(' function-type-argument-list ')'
# async? throwing-specifier? '->'? type?
Node('FunctionType', kind='Type',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='TupleTypeElementList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken'),
Child('AsyncKeyword', kind='IdentifierToken',
classification='Keyword',
text_choices=['async'], is_optional=True),
Child('ThrowsOrRethrowsKeyword', kind='Token',
is_optional=True,
token_choices=[
'ThrowsToken',
'RethrowsToken',
'ThrowToken',
]),
Child('Arrow', kind='ArrowToken'),
Child('ReturnType', kind='Type'),
]),
# attributed-type -> type-specifier? attribute-list? type
# type-specifiyer -> 'inout' | '__owned' | '__unowned'
Node('AttributedType', kind='Type',
children=[
Child('Specifier', kind='Token',
text_choices=['inout', '__shared', '__owned'],
is_optional=True),
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('BaseType', kind='Type'),
]),
# generic-argument-list -> generic-argument generic-argument-list?
Node('GenericArgumentList', kind='SyntaxCollection',
element='GenericArgument'),
# A generic argument.
# Dictionary<Int, String>
# ^~~~ ^~~~~~
Node('GenericArgument', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('ArgumentType', kind='Type'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# generic-argument-clause -> '<' generic-argument-list '>'
Node('GenericArgumentClause', kind='Syntax',
children=[
Child('LeftAngleBracket', kind='LeftAngleToken'),
Child('Arguments', kind='GenericArgumentList',
collection_element_name='Argument'),
Child('RightAngleBracket', kind='RightAngleToken'),
]),
]
| apache-2.0 |
frouty/odoo_oph | addons/mail/res_users.py | 36 | 10508 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class res_users(osv.Model):
""" Update of res.users class
- add a preference about sending emails about notifications
- make a new user follow itself
- add a welcome message
"""
_name = 'res.users'
_inherit = ['res.users']
_inherits = {'mail.alias': 'alias_id'}
_columns = {
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Email address internally associated with this user. Incoming "\
"emails will appear in the user's notifications."),
}
_defaults = {
'alias_domain': False, # always hide alias during creation
}
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on notification_email_send
and alias fields. Access rights are disabled by default, but allowed
on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.append('notification_email_send')
# duplicate list to avoid modifying the original reference
self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
self.SELF_READABLE_FIELDS.extend(['notification_email_send', 'alias_domain', 'alias_name'])
return init_res
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, partner following themselves """
# create aliases for all users and avoid constraint errors
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(res_users, self)._auto_init,
self._columns['alias_id'], 'login', alias_force_key='id', context=context)
def create(self, cr, uid, data, context=None):
# create default alias same as the login
if not data.get('login', False):
raise osv.except_osv(_('Invalid Action!'), _('You may not create a user. To create new users, you should use the "Settings > Users" menu.'))
mail_alias = self.pool.get('mail.alias')
alias_id = mail_alias.create_unique_alias(cr, uid, {'alias_name': data['login']}, model_name=self._name, context=context)
data['alias_id'] = alias_id
data.pop('alias_name', None) # prevent errors during copy()
# create user
user_id = super(res_users, self).create(cr, uid, data, context=context)
user = self.browse(cr, uid, user_id, context=context)
# alias
mail_alias.write(cr, SUPERUSER_ID, [alias_id], {"alias_force_thread_id": user_id}, context)
# create a welcome message
self._create_welcome_message(cr, uid, user, context=context)
return user_id
def _create_welcome_message(self, cr, uid, user, context=None):
if not self.has_group(cr, uid, 'base.group_user'):
return False
company_name = user.company_id.name if user.company_id else ''
body = _('%s has joined the %s network.') % (user.name, company_name)
# TODO change SUPERUSER_ID into user.id but catch errors
return self.pool.get('res.partner').message_post(cr, SUPERUSER_ID, [user.partner_id.id],
body=body, context=context)
def write(self, cr, uid, ids, vals, context=None):
# User alias is sync'ed with login
if vals.get('login'):
vals['alias_name'] = vals['login']
return super(res_users, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the user.
alias_pool = self.pool.get('mail.alias')
alias_ids = [user.alias_id.id for user in self.browse(cr, uid, ids, context=context) if user.alias_id]
res = super(res_users, self).unlink(cr, uid, ids, context=context)
alias_pool.unlink(cr, uid, alias_ids, context=context)
return res
def _message_post_get_pid(self, cr, uid, thread_id, context=None):
assert thread_id, "res.users does not support posting global messages"
if context and 'thread_model' in context:
context['thread_model'] = 'res.partner'
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
return self.browse(cr, SUPERUSER_ID, thread_id).partner_id.id
def message_post(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users to the related partner.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
partner_id = self._message_post_get_pid(cr, uid, thread_id, context=context)
return self.pool.get('res.partner').message_post(cr, uid, partner_id, context=context, **kwargs)
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
for id in ids:
partner_id = self.browse(cr, SUPERUSER_ID, id).partner_id.id
self.pool.get('res.partner').message_update(cr, uid, [partner_id], msg_dict, update_vals=update_vals, context=context)
return True
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
for id in ids:
partner_id = self.browse(cr, SUPERUSER_ID, id).partner_id.id
self.pool.get('res.partner').message_subscribe(cr, uid, [partner_id], partner_ids, subtype_ids=subtype_ids, context=context)
return True
def message_get_partner_info_from_emails(self, cr, uid, emails, link_mail=False, context=None):
return self.pool.get('res.partner').message_get_partner_info_from_emails(cr, uid, emails, link_mail=link_mail, context=context)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
partner_ids = []
for id in ids:
partner_ids.append(self.browse(cr, SUPERUSER_ID, id).partner_id.id)
return self.pool.get('res.partner').message_get_suggested_recipients(cr, uid, partner_ids, context=context)
#------------------------------------------------------
# Compatibility methods: do not use
# TDE TODO: remove me in 8.0
#------------------------------------------------------
def message_post_user_api(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users to the related partner.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
partner_id = self._message_post_get_pid(cr, uid, thread_id, context=context)
return self.pool.get('res.partner').message_post_user_api(cr, uid, partner_id, context=context, **kwargs)
def message_create_partners_from_emails(self, cr, uid, emails, context=None):
return self.pool.get('res.partner').message_create_partners_from_emails(cr, uid, emails, context=context)
class res_users_mail_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check mail.groups linked to this user
group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
# FP Note: to improve, post processing may be better ?
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_users_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('groups_id'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_group_ids = [command[1] for command in vals['groups_id'] if command[0] == 4]
user_group_ids += [id for command in vals['groups_id'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', user_group_ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, ids, context=context)
return write_res
class res_groups_mail_group(osv.Model):
""" Update of res.groups class
- if adding users from a group, check mail.groups linked to this user
group and subscribe them. This is done by overriding the write method.
"""
_name = 'res.groups'
_inherit = 'res.groups'
# FP Note: to improve, post processeing, after the super may be better
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_groups_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('users'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_ids = [command[1] for command in vals['users'] if command[0] == 4]
user_ids += [id for command in vals['users'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, user_ids, context=context)
return write_res
| agpl-3.0 |
pombredanne/hitch | hitch/commandline.py | 1 | 10374 | """High level command line interface to hitch."""
from subprocess import call, PIPE, STDOUT, CalledProcessError, Popen
from hitch.click import command, group, argument, option
from os import path, makedirs, listdir, kill, remove
from sys import stderr, exit, modules, argv
from functools import partial
from hitch import hitchdir
import shutil
import signal
import copy
def check_output(command, stdout=PIPE, stderr=PIPE):
"""Re-implemented subprocess.check_output since it is not available < python 2.7."""
return Popen(command, stdout=stdout, stderr=stderr).communicate()[0]
@group()
def cli():
pass
@command()
@option(
'-p', '--python', default=None,
help="""Create hitch virtualenv using specific python version"""
""" (e.g. /usr/bin/python3). Defaults to using python3 on the system path."""
)
@option(
'-v', '--virtualenv', default=None,
help="""Create hitch virtualenv using specific virtualenv"""
""" (e.g. /usr/bin/virtualenv). Defaults to using virtualenv on the system path."""
)
def init(python, virtualenv):
"""Initialize hitch in this directory."""
if virtualenv is None:
if call(["which", "virtualenv"], stdout=PIPE, stderr=PIPE):
stderr.write("You must have virtualenv installed to use hitch.\n")
stderr.flush()
exit(1)
virtualenv = check_output(["which", "virtualenv"]).decode('utf8').replace("\n", "")
else:
if path.exists(virtualenv):
if python is None:
python = path.join(path.dirname(virtualenv), "python")
else:
stderr.write("{} not found.\n".format(virtualenv))
if python is None:
if call(["which", "python3"], stdout=PIPE, stderr=PIPE):
stderr.write(
"To use Hitch, you must have python 3 installed on your system "
"and available. If your python3 is not on the system path with "
"the name python3, specify its exact location using --python.\n"
)
stderr.flush()
exit(1)
python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "")
else:
if path.exists(python):
python3 = python
else:
stderr.write("{} not found.\n".format(python))
exit(1)
str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '')
tuple_version = tuple([int(v) for v in str_version.replace('Python ', '').split('.')])
if tuple_version < (3, 3):
stderr.write(
"The hitch environment must have python >=3.3 installed to be built.\n Your "
"app can run with earlier versions of python, but the testing environment can't.\n"
)
exit(1)
if hitchdir.hitch_exists():
stderr.write("Hitch has already been initialized in this directory or a directory above it.\n")
stderr.write("If you wish to re-initialize hitch in this directory, run 'hitch clean' in the")
stderr.write("directory containing the .hitch directory and run hitch init here again.\n")
stderr.flush()
exit(1)
makedirs(".hitch")
pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip"))
call([virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3])
call([pip, "install", "-U", "pip"])
if path.exists("hitchreqs.txt"):
call([pip, "install", "-r", "hitchreqs.txt"])
else:
call([pip, "install", "hitchtest"])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
def update_requirements():
"""Check hitchreqs.txt match what's installed via pip freeze. If not, update."""
pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip")
hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt")
pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n')
hitchreqs_handle = ""
with open(hitchreqs_filename, "r") as hitchreqs_handle:
hitchreqs = hitchreqs_handle.read().split('\n')
if not sorted(pip_freeze) == sorted(hitchreqs):
call([pip, "install", "-r", "hitchreqs.txt"])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
def get_pip():
"""Get the file path to the hitch pip."""
return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip")
@command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd")
@argument('arguments', nargs=-1)
def runpackage(arguments):
# Generic method to run any installed app in the virtualenv whose name starts with hitch*
update_requirements()
binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{}".format(argv[1]))
command = [binfile, ] + argv[2:]
# When receiving an exit signal, just forward it to process child.
def forward_signal_to_child(pid, signum, frame):
kill(pid, signum)
process = Popen(command)
signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid))
signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid))
signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid))
signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid))
return_code = process.wait()
exit(return_code)
@command()
@argument('package', required=True)
def uninstall(package):
"""Uninstall hitch package."""
pip = get_pip()
call([pip, "uninstall", package] )
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
@command()
@argument('package', required=True)
def install(package):
"""Install hitch package."""
pip = get_pip()
call([pip, "install", package, "-U", ])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
@command()
def upgrade():
"""Upgrade all installed hitch packages."""
pip = get_pip()
package_list = [
p for p in check_output([pip, "freeze"]).decode('utf8').split('\n')
if p != "" and "==" in p
]
version_fixed_package_list = [p.split("==")[0] for p in package_list]
for package in version_fixed_package_list:
call([pip, "install", package, "-U", ])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
@command()
def freeze():
"""List installed hitch packages."""
pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip")
call([pip, "freeze", ])
@command()
def clean():
"""Remove the hitch directory entirely."""
if hitchdir.hitch_exists():
hitch_directory = hitchdir.get_hitch_directory_or_fail()
shutil.rmtree(hitch_directory)
else:
stderr.write("No hitch directory found. Doing nothing.\n")
stderr.flush()
@command()
@option(
'-p', '--packages', default=None, help=(
"Specify precise packages to remove - "
"e.g. postgresql, postgresql-9.3.9, python, python2.6.8"
)
)
def cleanpkg(packages):
"""Remove installed packages from the .hitchpkg directory."""
hitchpkg = path.join(path.expanduser("~"), ".hitchpkg")
if path.exists(hitchpkg):
if packages is None:
shutil.rmtree(hitchpkg)
else:
for file_or_dir in os.listdir(hitchpkg):
if file_or_dir.startswith(packages):
if path.isdir(file_or_dir)
shutil.rmtree(path.join(hitchpkg, file_or_dir))
else:
remove(path.join(hitchpkg, file_or_dir))
def run():
"""Run hitch bootstrap CLI"""
def stop_everything(sig, frame):
"""Exit hitch."""
exit(1)
signal.signal(signal.SIGINT, stop_everything)
signal.signal(signal.SIGTERM, stop_everything)
signal.signal(signal.SIGHUP, stop_everything)
signal.signal(signal.SIGQUIT, stop_everything)
if hitchdir.hitch_exists():
if not path.exists(path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin")):
stderr.write("Hitch was initialized in this directory (or one above it), but something.\n")
stderr.write("was corrupted. Try running 'hitch clean' and then run 'hitch init' again.")
stderr.flush()
exit(1)
# Get packages from bin folder that are hitch related
python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python")
packages = [
package.replace("hitch", "") for package in listdir(
path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin")
)
if package.startswith("hitch") and package != "hitch"
]
# Add packages that start with hitch* to the list of commands available
for package in packages:
cmd = copy.deepcopy(runpackage)
cmd.name = package
try:
description = check_output([
python_bin, '-c',
'import sys;sys.stdout.write(__import__("hitch{}").commandline.cli.help)'.format(
package
)
]).decode('utf8')
except CalledProcessError:
description = ""
cmd.help = description
cmd.short_help = description
cli.add_command(cmd)
cli.add_command(install)
cli.add_command(uninstall)
cli.add_command(upgrade)
cli.add_command(clean)
cli.add_command(freeze)
cli.add_command(init)
cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory())
else:
cli.add_command(init)
cli.add_command(clean)
cli.help = "Hitch bootstrapper - '.hitch' directory not detected here."
cli()
if __name__ == '__main__':
run()
| agpl-3.0 |
JavaRabbit/CS496_capstone | blog/introduction_to_data_models_in_cloud_datastore/blog.py | 9 | 3743 | # Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
from google.cloud import datastore
def path_to_key(datastore, path):
"""
Translates a file system path to a datastore key. The basename becomes the
key name and the extension becomes the kind.
Examples:
/file.ext -> key(ext, file)
/parent.ext/file.ext -> key(ext, parent, ext, file)
"""
key_parts = []
path_parts = path.strip(u'/').split(u'/')
for n, x in enumerate(path_parts):
name, ext = x.rsplit('.', 1)
key_parts.extend([ext, name])
return datastore.key(*key_parts)
def create_user(ds, username, profile):
key = path_to_key(ds, '{0}.user'.format(username))
entity = datastore.Entity(key)
entity.update(profile)
ds.put(entity)
def create_post(ds, username, post_content):
now = datetime.datetime.utcnow()
key = path_to_key(ds, '{0}.user/{1}.post'.format(username, now))
entity = datastore.Entity(key)
entity.update({
'created': now,
'created_by': username,
'content': post_content
})
ds.put(entity)
def repost(ds, username, original):
now = datetime.datetime.utcnow()
new_key = path_to_key(ds, '{0}.user/{1}.post'.format(username, now))
new = datastore.Entity(new_key)
new.update(original)
ds.put(new)
def list_posts_by_user(ds, username):
user_key = path_to_key(ds, '{0}.user'.format(username))
return ds.query(kind='post', ancestor=user_key).fetch()
def list_all_posts(ds):
return ds.query(kind='post').fetch()
def main(project_id):
ds = datastore.Client(project_id)
print("Creating users...")
create_user(ds, 'tonystark',
{'name': 'Tony Stark', 'location': 'Stark Island'})
create_user(ds, 'peterparker',
{'name': 'Peter Parker', 'location': 'New York City'})
print("Creating posts...")
for n in range(1, 10):
create_post(ds, 'tonystark', "Tony's post #{0}".format(n))
create_post(ds, 'peterparker', "Peter's post #{0}".format(n))
print("Re-posting tony's post as peter...")
tonysposts = list_posts_by_user(ds, 'tonystark')
for post in tonysposts:
original_post = post
break
repost(ds, 'peterparker', original_post)
print('Posts by tonystark:')
for post in list_posts_by_user(ds, 'tonystark'):
print("> {0} on {1}".format(post['content'], post['created']))
print('Posts by peterparker:')
for post in list_posts_by_user(ds, 'peterparker'):
print("> {0} on {1}".format(post['content'], post['created']))
print('Posts by everyone:')
for post in list_all_posts(ds):
print("> {0} on {1}".format(post['content'], post['created']))
print('Cleaning up...')
ds.delete_multi([
path_to_key(ds, 'tonystark.user'),
path_to_key(ds, 'peterparker.user')
])
ds.delete_multi([
x.key for x in list_all_posts(ds)])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Demonstrates wiki data model.')
parser.add_argument('project_id', help='Your cloud project ID.')
args = parser.parse_args()
main(args.project_id)
| apache-2.0 |
liorvh/infernal-twin | build/reportlab/src/reportlab/graphics/widgets/markers.py | 34 | 8442 | #Copyright ReportLab Europe Ltd. 2000-2013
#see license.txt for license details
__version__=''' $Id$ '''
__doc__="""This modules defines a collection of markers used in charts.
"""
from reportlab.graphics.shapes import Rect, Line, Circle, Polygon, Drawing, Group
from reportlab.graphics.widgets.signsandsymbols import SmileyFace
from reportlab.graphics.widgetbase import Widget
from reportlab.lib.validators import isNumber, isColorOrNone, OneOf, Validator
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.lib.colors import black
from reportlab.lib.utils import isFunction, isClass
from reportlab.graphics.widgets.flags import Flag
from math import sin, cos, pi
_toradians = pi/180.0
class Marker(Widget):
'''A polymorphic class of markers'''
_attrMap = AttrMap(BASE=Widget,
kind = AttrMapValue(
OneOf(None, 'Square', 'Diamond', 'Circle', 'Cross', 'Triangle', 'StarSix',
'Pentagon', 'Hexagon', 'Heptagon', 'Octagon', 'StarFive',
'FilledSquare', 'FilledCircle', 'FilledDiamond', 'FilledCross',
'FilledTriangle','FilledStarSix', 'FilledPentagon', 'FilledHexagon',
'FilledHeptagon', 'FilledOctagon', 'FilledStarFive',
'Smiley','ArrowHead', 'FilledArrowHead'),
desc='marker type name'),
size = AttrMapValue(isNumber,desc='marker size'),
x = AttrMapValue(isNumber,desc='marker x coordinate'),
y = AttrMapValue(isNumber,desc='marker y coordinate'),
dx = AttrMapValue(isNumber,desc='marker x coordinate adjustment'),
dy = AttrMapValue(isNumber,desc='marker y coordinate adjustment'),
angle = AttrMapValue(isNumber,desc='marker rotation'),
fillColor = AttrMapValue(isColorOrNone, desc='marker fill colour'),
strokeColor = AttrMapValue(isColorOrNone, desc='marker stroke colour'),
strokeWidth = AttrMapValue(isNumber, desc='marker stroke width'),
arrowBarbDx = AttrMapValue(isNumber, desc='arrow only the delta x for the barbs'),
arrowHeight = AttrMapValue(isNumber, desc='arrow only height'),
)
def __init__(self,*args,**kw):
self.setProperties(kw)
self._setKeywords(
kind = None,
strokeColor = black,
strokeWidth = 0.1,
fillColor = None,
size = 5,
x = 0,
y = 0,
dx = 0,
dy = 0,
angle = 0,
arrowBarbDx = -1.25,
arrowHeight = 1.875,
)
def clone(self,**kwds):
n = self.__class__(**self.__dict__)
if kwds: n.__dict__.update(kwds)
return n
def _Smiley(self):
x, y = self.x+self.dx, self.y+self.dy
d = self.size/2.0
s = SmileyFace()
s.fillColor = self.fillColor
s.strokeWidth = self.strokeWidth
s.strokeColor = self.strokeColor
s.x = x-d
s.y = y-d
s.size = d*2
return s
def _Square(self):
x, y = self.x+self.dx, self.y+self.dy
d = self.size/2.0
s = Rect(x-d,y-d,2*d,2*d,fillColor=self.fillColor,strokeColor=self.strokeColor,strokeWidth=self.strokeWidth)
return s
def _Diamond(self):
d = self.size/2.0
return self._doPolygon((-d,0,0,d,d,0,0,-d))
def _Circle(self):
x, y = self.x+self.dx, self.y+self.dy
s = Circle(x,y,self.size/2.0,fillColor=self.fillColor,strokeColor=self.strokeColor,strokeWidth=self.strokeWidth)
return s
def _Cross(self):
x, y = self.x+self.dx, self.y+self.dy
s = float(self.size)
h, s = s/2, s/6
return self._doPolygon((-s,-h,-s,-s,-h,-s,-h,s,-s,s,-s,h,s,h,s,s,h,s,h,-s,s,-s,s,-h))
def _Triangle(self):
x, y = self.x+self.dx, self.y+self.dy
r = float(self.size)/2
c = 30*_toradians
s = sin(30*_toradians)*r
c = cos(c)*r
return self._doPolygon((0,r,-c,-s,c,-s))
def _StarSix(self):
r = float(self.size)/2
c = 30*_toradians
s = sin(c)*r
c = cos(c)*r
z = s/2
g = c/2
return self._doPolygon((0,r,-z,s,-c,s,-s,0,-c,-s,-z,-s,0,-r,z,-s,c,-s,s,0,c,s,z,s))
def _StarFive(self):
R = float(self.size)/2
r = R*sin(18*_toradians)/cos(36*_toradians)
P = []
angle = 90
for i in range(5):
for radius in R, r:
theta = angle*_toradians
P.append(radius*cos(theta))
P.append(radius*sin(theta))
angle = angle + 36
return self._doPolygon(P)
def _Pentagon(self):
return self._doNgon(5)
def _Hexagon(self):
return self._doNgon(6)
def _Heptagon(self):
return self._doNgon(7)
def _Octagon(self):
return self._doNgon(8)
def _ArrowHead(self):
s = self.size
h = self.arrowHeight
b = self.arrowBarbDx
return self._doPolygon((0,0,b,-h,s,0,b,h))
def _doPolygon(self,P):
x, y = self.x+self.dx, self.y+self.dy
if x or y: P = list(map(lambda i,P=P,A=[x,y]: P[i] + A[i&1], list(range(len(P)))))
return Polygon(P, strokeWidth =self.strokeWidth, strokeColor=self.strokeColor, fillColor=self.fillColor)
def _doFill(self):
old = self.fillColor
if old is None:
self.fillColor = self.strokeColor
r = (self.kind and getattr(self,'_'+self.kind[6:]) or Group)()
self.fillColor = old
return r
def _doNgon(self,n):
P = []
size = float(self.size)/2
for i in range(n):
r = (2.*i/n+0.5)*pi
P.append(size*cos(r))
P.append(size*sin(r))
return self._doPolygon(P)
_FilledCircle = _doFill
_FilledSquare = _doFill
_FilledDiamond = _doFill
_FilledCross = _doFill
_FilledTriangle = _doFill
_FilledStarSix = _doFill
_FilledPentagon = _doFill
_FilledHexagon = _doFill
_FilledHeptagon = _doFill
_FilledOctagon = _doFill
_FilledStarFive = _doFill
_FilledArrowHead = _doFill
def draw(self):
if self.kind:
m = getattr(self,'_'+self.kind)
if self.angle:
_x, _dx, _y, _dy = self.x, self.dx, self.y, self.dy
self.x, self.dx, self.y, self.dy = 0,0,0,0
try:
m = m()
finally:
self.x, self.dx, self.y, self.dy = _x, _dx, _y, _dy
if not isinstance(m,Group):
_m, m = m, Group()
m.add(_m)
if self.angle: m.rotate(self.angle)
x, y = _x+_dx, _y+_dy
if x or y: m.shift(x,y)
else:
m = m()
else:
m = Group()
return m
def uSymbol2Symbol(uSymbol,x,y,color):
if isFunction(uSymbol):
symbol = uSymbol(x, y, 5, color)
elif isClass(uSymbol) and issubclass(uSymbol,Widget):
size = 10.
symbol = uSymbol()
symbol.x = x - (size/2)
symbol.y = y - (size/2)
try:
symbol.size = size
symbol.color = color
except:
pass
elif isinstance(uSymbol,Marker) or isinstance(uSymbol,Flag):
symbol = uSymbol.clone()
if isinstance(uSymbol,Marker): symbol.fillColor = symbol.fillColor or color
symbol.x, symbol.y = x, y
else:
symbol = None
return symbol
class _isSymbol(Validator):
def test(self,x):
return hasattr(x,'__call__') or isinstance(x,Marker) or isinstance(x,Flag) or (isinstance(x,type) and issubclass(x,Widget))
isSymbol = _isSymbol()
def makeMarker(name,**kw):
if Marker._attrMap['kind'].validate(name):
m = Marker(**kw)
m.kind = name
elif name[-5:]=='_Flag' and Flag._attrMap['kind'].validate(name[:-5]):
m = Flag(**kw)
m.kind = name[:-5]
m.size = 10
else:
raise ValueError("Invalid marker name %s" % name)
return m
if __name__=='__main__':
D = Drawing()
D.add(Marker())
D.save(fnRoot='Marker',formats=['pdf'], outDir='/tmp')
| gpl-3.0 |
christoph-buente/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/config.py | 126 | 6136 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrapper objects for WebKit-specific utility routines."""
# FIXME: This file needs to be unified with common/config/ports.py .
import logging
from webkitpy.common import webkit_finder
_log = logging.getLogger(__name__)
#
# FIXME: This is used to record if we've already hit the filesystem to look
# for a default configuration. We cache this to speed up the unit tests,
# but this can be reset with clear_cached_configuration(). This should be
# replaced with us consistently using MockConfigs() for tests that don't
# hit the filesystem at all and provide a reliable value.
#
_have_determined_configuration = False
_configuration = "Release"
def clear_cached_configuration():
global _have_determined_configuration, _configuration
_have_determined_configuration = False
_configuration = "Release"
class Config(object):
_FLAGS_FROM_CONFIGURATIONS = {
"Debug": "--debug",
"Release": "--release",
}
def __init__(self, executive, filesystem, port_implementation=None):
self._executive = executive
self._filesystem = filesystem
self._webkit_finder = webkit_finder.WebKitFinder(self._filesystem)
self._default_configuration = None
self._build_directories = {}
self._port_implementation = port_implementation
def build_directory(self, configuration):
"""Returns the path to the build directory for the configuration."""
if configuration:
flags = ["--configuration", self.flag_for_configuration(configuration)]
else:
configuration = ""
flags = []
if self._port_implementation:
flags.append('--' + self._port_implementation)
if not self._build_directories.get(configuration):
args = ["perl", self._webkit_finder.path_to_script("webkit-build-directory")] + flags
output = self._executive.run_command(args, cwd=self._webkit_finder.webkit_base(), return_stderr=False).rstrip()
parts = output.split("\n")
self._build_directories[configuration] = parts[0]
if len(parts) == 2:
default_configuration = parts[1][len(parts[0]):]
if default_configuration.startswith("/"):
default_configuration = default_configuration[1:]
self._build_directories[default_configuration] = parts[1]
return self._build_directories[configuration]
def flag_for_configuration(self, configuration):
return self._FLAGS_FROM_CONFIGURATIONS[configuration]
def default_configuration(self):
"""Returns the default configuration for the user.
Returns the value set by 'set-webkit-configuration', or "Release"
if that has not been set. This mirrors the logic in webkitdirs.pm."""
if not self._default_configuration:
self._default_configuration = self._determine_configuration()
if not self._default_configuration:
self._default_configuration = 'Release'
if self._default_configuration not in self._FLAGS_FROM_CONFIGURATIONS:
_log.warn("Configuration \"%s\" is not a recognized value.\n" % self._default_configuration)
_log.warn("Scripts may fail. See 'set-webkit-configuration --help'.")
return self._default_configuration
def _determine_configuration(self):
# This mirrors the logic in webkitdirs.pm:determineConfiguration().
#
# FIXME: See the comment at the top of the file regarding unit tests
# and our use of global mutable static variables.
# FIXME: We should just @memoize this method and then this will only
# be read once per object lifetime (which should be sufficiently fast).
global _have_determined_configuration, _configuration
if not _have_determined_configuration:
contents = self._read_configuration()
if not contents:
contents = "Release"
if contents == "Deployment":
contents = "Release"
if contents == "Development":
contents = "Debug"
_configuration = contents
_have_determined_configuration = True
return _configuration
def _read_configuration(self):
try:
configuration_path = self._filesystem.join(self.build_directory(None), "Configuration")
if not self._filesystem.exists(configuration_path):
return None
except:
return None
return self._filesystem.read_text_file(configuration_path).rstrip()
| bsd-3-clause |
locaweb/simplenet | src/simplenet/common/event.py | 1 | 4587 | # Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Thiago Morello, Locaweb.
# @author: Willian Molinari, Locaweb.
# @author: Juliano Martinez, Locaweb.
import socket
from kombu import BrokerConnection, Exchange, Queue
from simplenet.common.config import config, get_logger
logger = get_logger()
class EventManager(object):
def __init__(self):
self.url = config.get("event", "broker")
def raise_fanout_event(self, exchange, event_type, params, **kwargs):
logger.debug("Raising event %s with params: %s" % (event_type, params))
with BrokerConnection(self.url) as conn:
conn.ensure_connection()
media_exchange = Exchange(
"dhcp:fanout:%s" % exchange,
type="fanout",
durable=True)
if 'route' in kwargs:
routing_key = kwargs['route']
else:
queue = Queue(
event_type,
exchange=media_exchange,
routing_key=event_type
)
if params['action'] == 'new' or params['action'] == 'rebuild_queues':
queue(conn.channel()).declare()
return
elif params['action'] == 'remove':
try:
queue(conn.channel()).unbind()
except AttributeError:
queue(conn.channel()).unbind_from(exchange=media_exchange, routing_key=event_type)
return
else:
routing_key = event_type
with conn.Producer(exchange=media_exchange, serializer="json",
routing_key=routing_key) as producer:
logger.debug("Publishing %s" % params)
producer.publish(params)
def raise_event(self, event_type, params, **kwargs):
logger.debug("Raising event %s with params: %s" % (event_type, params))
with BrokerConnection(self.url) as conn:
conn.ensure_connection()
media_exchange = Exchange(
"simplenet",
type="direct",
durable=True)
if 'route' in kwargs:
routing_key = kwargs['route']
else:
queue = Queue(
event_type,
exchange=media_exchange,
routing_key=event_type
)
queue(conn.channel()).declare()
routing_key = event_type
with conn.Producer(exchange=media_exchange, serializer="json",
routing_key=routing_key) as producer:
logger.debug("Publishing %s" % params)
producer.publish(params)
def listen_event(self, queue_name, callback):
with BrokerConnection(self.url) as conn:
conn.ensure_connection()
media_exchange = Exchange(
"simplenet",
type="direct",
durable=True
)
queue = Queue(
queue_name,
exchange=media_exchange,
routing_key=queue_name
)
logger.info("Listening for data...")
with conn.Consumer([queue], callbacks=[callback]) as consumer:
while True:
conn.drain_events()
def bind_queue(self, queue_name, routing_key):
with BrokerConnection(self.url) as conn:
conn.ensure_connection()
media_exchange = Exchange(
"simplenet",
type="direct",
durable=True
)
queue = Queue(
queue_name,
exchange=media_exchange,
routing_key=routing_key
)
queue(conn.channel()).declare()
| mit |
jeffrey4l/nova | nova/tests/unit/test_safeutils.py | 55 | 4340 | # Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import safe_utils
from nova import test
class GetCallArgsTestCase(test.NoDBTestCase):
def _test_func(self, instance, red=None, blue=None):
pass
def test_all_kwargs(self):
args = ()
kwargs = {'instance': {'uuid': 1}, 'red': 3, 'blue': 4}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertEqual(4, callargs['blue'])
def test_all_args(self):
args = ({'uuid': 1}, 3, 4)
kwargs = {}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertEqual(4, callargs['blue'])
def test_mixed_args(self):
args = ({'uuid': 1}, 3)
kwargs = {'blue': 4}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertEqual(4, callargs['blue'])
def test_partial_kwargs(self):
args = ()
kwargs = {'instance': {'uuid': 1}, 'red': 3}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertIsNone(callargs['blue'])
def test_partial_args(self):
args = ({'uuid': 1}, 3)
kwargs = {}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertIsNone(callargs['blue'])
def test_partial_mixed_args(self):
args = (3,)
kwargs = {'instance': {'uuid': 1}}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertIsNone(callargs['blue'])
def test_no_named_args(self):
def _fake(*args, **kwargs):
pass
# This is not captured by getcallargs
args = (3,)
kwargs = {'instance': {'uuid': 1}}
callargs = safe_utils.getcallargs(_fake, *args, **kwargs)
self.assertEqual(1, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
| apache-2.0 |
tuaris/bitcoin | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
EmreAtes/spack | var/spack/repos/builtin/packages/py-mg-rast-tools/package.py | 3 | 2387 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyMgRastTools(PythonPackage):
"""Repository of scripts and libraries for using the MG-RAST API and
MG-RAST data."""
homepage = "https://github.com/MG-RAST/MG-RAST-Tools"
version('2018.04.17', git='https://github.com/MG-RAST/MG-RAST-Tools.git', commit='a40c6e6539ad0bc1c08e1b03dfc0a9759755a326')
depends_on('perl', type=('build', 'run'))
depends_on('py-setuptools@28.0:', type='build')
depends_on('py-prettytable@0.7:', type=('build', 'run'))
depends_on('py-poster@0.8.1:', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-requests-toolbelt@0.8:', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('r-matr', type=('build', 'run'))
depends_on('shocklibs@0.1.30:')
depends_on('perl-list-moreutils', type=('build', 'run'))
depends_on('perl-exporter-tiny', type=('build', 'run'))
depends_on('perl-libwww-perl', type=('build', 'run'))
depends_on('perl-http-message', type=('build', 'run'))
depends_on('perl-json', type=('build', 'run'))
| lgpl-2.1 |
georgewhewell/CouchPotatoServer | libs/tornado/escape.py | 5 | 14094 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
import sys
from tornado.util import bytes_type, unicode_type, basestring_type, u
try:
from urllib.parse import parse_qs as _parse_qs # py3
except ImportError:
from urlparse import parse_qs as _parse_qs # Python 2.6+
try:
import htmlentitydefs # py2
except ImportError:
import html.entities as htmlentitydefs # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
import json
try:
unichr
except NameError:
unichr = chr
_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"',
'\'': '''}
def xhtml_escape(value):
"""Escapes a string so it is valid within HTML or XML."""
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
# The fact that json_encode wraps json.dumps is an implementation detail.
# Please see https://github.com/facebook/tornado/pull/706
# before sending a pull request that adds **kwargs to this function.
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javscript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(value).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value, plus=True):
"""Returns a URL-encoded version of the given value.
If ``plus`` is true (the default), spaces will be represented
as "+" instead of "%20". This is appropriate for query strings
but not for the path component of a URL. Note that this default
is the reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
quote = urllib_parse.quote_plus if plus else urllib_parse.quote
return quote(utf8(value))
# python 3 changed things around enough that we need two separate
# implementations of url_unescape. We also need our own implementation
# of parse_qs since python 3's version insists on decoding everything.
if sys.version_info[0] < 3:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
if encoding is None:
return unquote(utf8(value))
else:
return unicode_type(unquote(utf8(value)), encoding)
parse_qs_bytes = _parse_qs
else:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace('+', ' ')
return urllib_parse.unquote_to_bytes(value)
else:
unquote = (urllib_parse.unquote_plus if plus
else urllib_parse.unquote)
return unquote(to_basestring(value), encoding=encoding)
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor of character encodings.
result = _parse_qs(qs, keep_blank_values, strict_parsing,
encoding='latin1', errors='strict')
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode('latin1') for i in v]
return encoded
_UTF8_TYPES = (bytes_type, type(None))
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str = to_unicode
else:
native_str = utf8
_BASESTRING_TYPES = (basestring_type, type(None))
def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
if not isinstance(value, bytes_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
def recursive_unicode(obj):
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
"""
if isinstance(obj, dict):
return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(recursive_unicode(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes_type):
return to_unicode(obj)
else:
return obj
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)"""))
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u('<a href="%s"%s>%s</a>') % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _convert_entity(m):
if m.group(1) == "#":
try:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
unicode_map = {}
for name, value in htmlentitydefs.name2codepoint.items():
unicode_map[name] = unichr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map()
| gpl-3.0 |
wlerin/streamlink | src/streamlink/stream/flvconcat.py | 6 | 10722 | from __future__ import division
import logging
from collections import namedtuple
from io import IOBase
from itertools import chain, islice
from threading import Thread
from ..buffers import RingBuffer
from ..packages.flashmedia import FLVError
from ..packages.flashmedia.tag import (AudioData, AACAudioData, VideoData,
AVCVideoData, VideoCommandFrame,
Header, ScriptData, Tag)
from ..packages.flashmedia.tag import (AAC_PACKET_TYPE_SEQUENCE_HEADER,
AVC_PACKET_TYPE_SEQUENCE_HEADER,
AUDIO_CODEC_ID_AAC,
VIDEO_CODEC_ID_AVC,
TAG_TYPE_AUDIO,
TAG_TYPE_VIDEO)
__all__ = ["extract_flv_header_tags", "FLVTagConcat", "FLVTagConcatIO"]
log = logging.getLogger(__name__)
FLVHeaderTags = namedtuple("FLVHeaderTags", "metadata aac vc")
def iter_flv_tags(fd=None, buf=None, strict=False, skip_header=False):
if not (fd or buf):
return
offset = 0
if not skip_header:
if fd:
Header.deserialize(fd)
elif buf:
header, offset = Header.deserialize_from(buf, offset)
while fd or buf and offset < len(buf):
try:
if fd:
tag = Tag.deserialize(fd, strict=strict)
elif buf:
tag, offset = Tag.deserialize_from(buf, offset, strict=strict)
except (IOError, FLVError) as err:
if "Insufficient tag header" in str(err):
break
raise IOError(err)
yield tag
def extract_flv_header_tags(stream):
fd = stream.open()
metadata = aac_header = avc_header = None
for tag_index, tag in enumerate(iter_flv_tags(fd)):
if isinstance(tag.data, ScriptData) and tag.data.name == "onMetaData":
metadata = tag
elif (isinstance(tag.data, VideoData) and
isinstance(tag.data.data, AVCVideoData)):
if tag.data.data.type == AVC_PACKET_TYPE_SEQUENCE_HEADER:
avc_header = tag
elif (isinstance(tag.data, AudioData) and
isinstance(tag.data.data, AACAudioData)):
if tag.data.data.type == AAC_PACKET_TYPE_SEQUENCE_HEADER:
aac_header = tag
if aac_header and avc_header and metadata:
break
# Give up after 10 tags
if tag_index == 9:
break
return FLVHeaderTags(metadata, aac_header, avc_header)
class FLVTagConcat(object):
def __init__(self, duration=None, tags=[], has_video=True, has_audio=True,
flatten_timestamps=False, sync_headers=False):
self.duration = duration
self.flatten_timestamps = flatten_timestamps
self.has_audio = has_audio
self.has_video = has_video
self.sync_headers = sync_headers
self.tags = tags
if not (has_audio and has_video):
self.sync_headers = False
self.audio_header_written = False
self.flv_header_written = False
self.video_header_written = False
self.timestamps_add = {}
self.timestamps_orig = {}
self.timestamps_sub = {}
@property
def headers_written(self):
return self.audio_header_written and self.video_header_written
def verify_tag(self, tag):
if tag.filter:
raise IOError("Tag has filter flag set, probably encrypted")
# Only AAC and AVC has detectable headers
if isinstance(tag.data, AudioData) and tag.data.codec != AUDIO_CODEC_ID_AAC:
self.audio_header_written = True
if isinstance(tag.data, VideoData) and tag.data.codec != VIDEO_CODEC_ID_AVC:
self.video_header_written = True
# Make sure there is no timestamp gap between audio and video when syncing
if self.sync_headers and self.timestamps_sub and not self.headers_written:
self.timestamps_sub = {}
if isinstance(tag.data, AudioData):
if isinstance(tag.data.data, AACAudioData):
if tag.data.data.type == AAC_PACKET_TYPE_SEQUENCE_HEADER:
if self.audio_header_written:
return
self.audio_header_written = True
else:
if self.sync_headers and not self.headers_written:
return
if not self.audio_header_written:
return
else:
if self.sync_headers and not self.headers_written:
return
elif isinstance(tag.data, VideoData):
if isinstance(tag.data.data, AVCVideoData):
if tag.data.data.type == AVC_PACKET_TYPE_SEQUENCE_HEADER:
if self.video_header_written:
return
self.video_header_written = True
else:
if self.sync_headers and not self.headers_written:
return
if not self.video_header_written:
return
elif isinstance(tag.data.data, VideoCommandFrame):
return
else:
if self.sync_headers and not self.headers_written:
return
elif isinstance(tag.data, ScriptData):
if tag.data.name == "onMetaData":
if self.duration:
tag.data.value["duration"] = self.duration
elif "duration" in tag.data.value:
del tag.data.value["duration"]
else:
return False
return True
def adjust_tag_gap(self, tag):
timestamp_gap = tag.timestamp - self.timestamps_orig.get(tag.type, 0)
timestamp_sub = self.timestamps_sub.get(tag.type)
if timestamp_gap > 1000 and timestamp_sub is not None:
self.timestamps_sub[tag.type] += timestamp_gap
self.timestamps_orig[tag.type] = tag.timestamp
def adjust_tag_timestamp(self, tag):
timestamp_offset_sub = self.timestamps_sub.get(tag.type)
if timestamp_offset_sub is None and tag not in self.tags:
self.timestamps_sub[tag.type] = tag.timestamp
timestamp_offset_sub = self.timestamps_sub.get(tag.type)
timestamp_offset_add = self.timestamps_add.get(tag.type)
if timestamp_offset_add:
tag.timestamp = max(0, tag.timestamp + timestamp_offset_add)
elif timestamp_offset_sub:
tag.timestamp = max(0, tag.timestamp - timestamp_offset_sub)
def analyze_tags(self, tag_iterator):
tags = list(islice(tag_iterator, 10))
audio_tags = len(list(filter(lambda t: t.type == TAG_TYPE_AUDIO, tags)))
video_tags = len(list(filter(lambda t: t.type == TAG_TYPE_VIDEO, tags)))
self.has_audio = audio_tags > 0
self.has_video = video_tags > 0
if not (self.has_audio and self.has_video):
self.sync_headers = False
return tags
def iter_tags(self, fd=None, buf=None, skip_header=None):
if skip_header is None:
skip_header = not not self.tags
tags_iterator = filter(None, self.tags)
flv_iterator = iter_flv_tags(fd=fd, buf=buf, skip_header=skip_header)
for tag in chain(tags_iterator, flv_iterator):
yield tag
def iter_chunks(self, fd=None, buf=None, skip_header=None):
"""Reads FLV tags from fd or buf and returns them with adjusted
timestamps."""
timestamps = dict(self.timestamps_add)
tag_iterator = self.iter_tags(fd=fd, buf=buf, skip_header=skip_header)
if not self.flv_header_written:
analyzed_tags = self.analyze_tags(tag_iterator)
else:
analyzed_tags = []
for tag in chain(analyzed_tags, tag_iterator):
if not self.flv_header_written:
flv_header = Header(has_video=self.has_video,
has_audio=self.has_audio)
yield flv_header.serialize()
self.flv_header_written = True
if self.verify_tag(tag):
self.adjust_tag_gap(tag)
self.adjust_tag_timestamp(tag)
if self.duration:
norm_timestamp = tag.timestamp / 1000
if norm_timestamp > self.duration:
break
yield tag.serialize()
timestamps[tag.type] = tag.timestamp
if not self.flatten_timestamps:
self.timestamps_add = timestamps
self.tags = []
class FLVTagConcatWorker(Thread):
def __init__(self, iterator, stream):
self.error = None
self.stream = stream
self.stream_iterator = iterator
self.concater = FLVTagConcat(stream.duration, stream.tags,
**stream.concater_params)
Thread.__init__(self)
self.daemon = True
def run(self):
for fd in self.stream_iterator:
try:
chunks = self.concater.iter_chunks(
fd, skip_header=self.stream.skip_header
)
for chunk in chunks:
self.stream.buffer.write(chunk)
if not self.running:
return
except IOError as err:
self.error = err
break
self.stop()
def stop(self):
self.running = False
self.stream.buffer.close()
def start(self):
self.running = True
return Thread.start(self)
class FLVTagConcatIO(IOBase):
__worker__ = FLVTagConcatWorker
def __init__(self, session, duration=None, tags=[], skip_header=None,
timeout=30, **concater_params):
self.session = session
self.timeout = timeout
self.concater_params = concater_params
self.duration = duration
self.skip_header = skip_header
self.tags = tags
def open(self, iterator):
self.buffer = RingBuffer(self.session.get_option("ringbuffer-size"))
self.worker = self.__worker__(iterator, self)
self.worker.start()
def close(self):
self.worker.stop()
if self.worker.is_alive():
self.worker.join()
def read(self, size=-1):
if not self.buffer:
return b""
if self.worker.error:
raise self.worker.error
return self.buffer.read(size, block=self.worker.is_alive(),
timeout=self.timeout)
| bsd-2-clause |
certik/sfepy | sfepy/base/conf.py | 1 | 10020 | import re
from base import Struct, IndexedStruct, dict_to_struct, pause, output, copy,\
import_file, assert_, get_default
from reader import Reader
_required = ['filename_mesh', 'field_[0-9]+|fields',
'ebc_[0-9]+|ebcs', 'fe', 'equations',
'region_[0-9]+|regions', 'variable_[0-9]+|variables',
'material_[0-9]+|materials', 'integral_[0-9]+|integrals',
'solver_[0-9]+|solvers']
_other = ['epbc_[0-9]+|epbcs', 'lcbc_[0-9]+|lcbcs', 'nbc_[0-9]+|nbcs',
'ic_[0-9]+|ics', 'options']
##
# c: 19.02.2008, r: 19.02.2008
def get_standard_keywords():
return copy( _required ), copy( _other )
##
# c: 10.04.2008, r: 10.04.2008
def tuple_to_conf( name, vals, order ):
conf = Struct( name = name )
for ii, key in enumerate( order ):
setattr( conf, key, vals[ii] )
return conf
##
# Short syntax: key is suffixed with '__<number>' to prevent collisions with
# long syntax keys -> both cases can be used in a single input.
def transform_variables( adict ):
d2 = {}
for ii, (key, conf) in enumerate( adict.iteritems() ):
if isinstance( conf, tuple ):
c2 = tuple_to_conf( key, conf, ['kind', 'field'] )
if len( conf ) >= 3:
kind = c2.kind.split()[0]
if kind == 'unknown':
c2.order = conf[2]
elif kind == 'test':
c2.dual = conf[2]
elif kind == 'parameter':
c2.like = conf[2]
if len( conf ) == 4:
c2.history = conf[3]
d2['variable_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1( conf )
d2['variable_'+c2.name] = c2
return d2
##
# c: 10.04.2008, r: 06.05.2008
def transform_ebcs( adict ):
d2 = {}
for ii, (key, conf) in enumerate( adict.iteritems() ):
if isinstance( conf, tuple ):
c2 = tuple_to_conf( key, conf, ['region', 'dofs'] )
d2['ebc_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1( conf )
d2['ebc_'+c2.name] = c2
return d2
def transform_ics( adict ):
d2 = {}
for ii, (key, conf) in enumerate( adict.iteritems() ):
if isinstance( conf, tuple ):
c2 = tuple_to_conf( key, conf, ['region', 'dofs'] )
d2['ic_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1( conf )
d2['ic_'+c2.name] = c2
return d2
##
# c: 02.05.2008, r: 06.05.2008
def transform_regions( adict ):
d2 = {}
for ii, (key, conf) in enumerate( adict.iteritems() ):
if isinstance( conf, tuple ):
c2 = tuple_to_conf( key, conf, ['select', 'flags'] )
for flag, val in c2.flags.iteritems():
setattr( c2, flag, val )
delattr( c2, 'flags' )
d2['region_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1( conf )
d2['region_'+c2.name] = c2
print d2
return d2
##
# c: 20.06.2007, r: 18.02.2008
def transform_to_struct_1( adict ):
return dict_to_struct( adict, flag = (1,) )
def transform_to_i_struct_1( adict ):
return dict_to_struct( adict, flag = (1,), constructor = IndexedStruct )
def transform_to_struct_01( adict ):
return dict_to_struct( adict, flag = (0,1) )
def transform_to_struct_10( adict ):
return dict_to_struct( adict, flag = (1,0) )
transforms = {
'options' : transform_to_i_struct_1,
'solvers' : transform_to_struct_01,
'integrals' : transform_to_struct_01,
'opt' : transform_to_struct_1,
'fe' : transform_to_struct_1,
'regions' : transform_regions,
'shape_opt' : transform_to_struct_10,
'fields' : transform_to_struct_01,
'variables' : transform_variables,
'ebcs' : transform_ebcs,
'epbcs' : transform_to_struct_01,
'nbcs' : transform_to_struct_01,
'lcbcs' : transform_to_struct_01,
'ics' : transform_ics,
}
##
# 27.10.2005, c
class ProblemConf( Struct ):
"""
Problem configuration, corresponding to an input (problem description
file). It validates the input using lists of required and other keywords
that have to/can appear in the input. Default keyword lists can be obtained
by sfepy.base.conf.get_standard_keywords().
ProblemConf instance is used to construct a ProblemDefinition instance via
ProblemDefinition.from_conf( conf ).
"""
##
# c: 25.07.2006, r: 10.07.2008
def from_file( filename, required = None, other = None ):
"""
Loads the problem definition from a file.
The filename can either contain plain definitions, or it can contain
the define() function, in which case it will be called to return the
input definitions.
The job of the define() function is to return a dictionary of
parameters. How the dictionary is constructed is not our business, but
the usual way is to simply have a function define() along these lines
in the input file:
def define():
options = {
'save_eig_vectors' : None,
'eigen_solver' : 'eigen1',
}
region_2 = {
'name' : 'Surface',
'select' : 'nodes of surface',
}
...
return locals()
"""
funmod = import_file( filename )
obj = ProblemConf()
if "define" in funmod.__dict__:
define_dict = funmod.__dict__["define"]()
else:
define_dict = funmod.__dict__
obj.__dict__.update( define_dict )
obj.setup( define_dict, funmod, filename, required, other )
return obj
from_file = staticmethod( from_file )
def from_module( module, required = None, other = None ):
obj = ProblemConf()
obj.__dict__.update( module.__dict__ )
obj.setup( funmod = module, required = required, other = other )
return obj
from_module = staticmethod( from_module )
def from_dict( dict_, funmod, required = None, other = None ):
obj = ProblemConf()
obj.__dict__.update( dict_ )
obj.setup( funmod = funmod, required = required, other = other )
return obj
from_dict = staticmethod( from_dict )
def setup( self, define_dict = None, funmod = None, filename = None,
required = None, other = None ):
define_dict = get_default( define_dict, self.__dict__ )
self._filename = filename
other_missing = self.validate( required = required, other = other )
for name in other_missing:
setattr( self, name, None )
self.transform_input_trivial()
self._raw = {}
for key, val in define_dict.iteritems():
if isinstance( val, dict ):
self._raw[key] = copy( val )
self.transform_input()
self.funmod = funmod
##
# 27.10.2005, c
# 19.09.2006
# 05.06.2007
def _validate_helper( self, items, but_nots ):
keys = self.__dict__.keys()
left_over = keys[:]
if but_nots is not None:
for item in but_nots:
match = re.compile( '^' + item + '$' ).match
for key in keys:
if match( key ):
left_over.remove( key )
missing = []
if items is not None:
for item in items:
found = False
match = re.compile( '^' + item + '$' ).match
for key in keys:
if match( key ):
found = True
left_over.remove( key )
if not found:
missing.append( item )
return left_over, missing
##
# c: 27.10.2005, r: 11.07.2008
def validate( self, required = None, other = None ):
required_left_over, required_missing \
= self._validate_helper( required, other )
other_left_over, other_missing \
= self._validate_helper( other, required )
assert_( required_left_over == other_left_over )
err = False
if required_missing:
err = True
output( 'error: required missing:', required_missing )
if other_left_over:
output( 'left over:', other_left_over )
if err:
raise ValueError
return other_missing
##
# c: 31.10.2005, r: 10.07.2008
def transform_input_trivial( self ):
"""Trivial input transformations."""
##
# Unordered inputs.
tr_list = ['([a-zA-Z0-9]+)_[0-9]+']
# Keywords not in 'required', but needed even empty (e.g. for run_tests).
for key in transforms.keys():
if not self.__dict__.has_key( key ):
self.__dict__[key] = {}
keys = self.__dict__.keys()
for item in tr_list:
match = re.compile( item ).match
for key in keys:
obj = match( key )
if obj:
new = obj.group( 1 ) + 's'
result = {key : self.__dict__[key]}
try:
self.__dict__[new].update( result )
except:
self.__dict__[new] = result
del self.__dict__[key]
def transform_input( self ):
keys = self.__dict__.keys()
for key, transform in transforms.iteritems():
if not key in keys: continue
self.__dict__[key] = transform( self.__dict__[key] )
def get_raw( self, key = None ):
if key is None:
return self._raw
else:
return self._raw[key]
def edit( self, key, newval ):
self.__dict__[key] = transforms[key]( newval )
| bsd-3-clause |
keisuke-umezawa/chainer | chainer/links/connection/embed_id.py | 4 | 2230 | from chainer.functions.connection import embed_id
from chainer.initializers import normal
from chainer import link
from chainer import variable
class EmbedID(link.Link):
"""Efficient linear layer for one-hot input.
This is a link that wraps the :func:`~chainer.functions.embed_id` function.
This link holds the ID (word) embedding matrix ``W`` as a parameter.
Args:
in_size (int): Number of different identifiers (a.k.a. vocabulary
size).
out_size (int): Size of embedding vector.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 2.
ignore_label (int or None): If ``ignore_label`` is an int value,
``i``-th column of return value is filled with ``0``.
.. seealso:: :func:`~chainer.functions.embed_id`
Attributes:
W (~chainer.Variable): Embedding parameter matrix.
.. admonition:: Example
>>> W = np.array([[0, 0, 0],
... [1, 1, 1],
... [2, 2, 2]]).astype(np.float32)
>>> W
array([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.]], dtype=float32)
>>> l = L.EmbedID(W.shape[0], W.shape[1], initialW=W)
>>> x = np.array([2, 1]).astype(np.int32)
>>> x
array([2, 1], dtype=int32)
>>> y = l(x)
>>> y.array
array([[2., 2., 2.],
[1., 1., 1.]], dtype=float32)
"""
ignore_label = None
def __init__(self, in_size, out_size, initialW=None, ignore_label=None):
super(EmbedID, self).__init__()
self.ignore_label = ignore_label
with self.init_scope():
if initialW is None:
initialW = normal.Normal(1.0)
self.W = variable.Parameter(initialW, (in_size, out_size))
def forward(self, x):
"""Extracts the word embedding of given IDs.
Args:
x (~chainer.Variable): Batch vectors of IDs.
Returns:
~chainer.Variable: Batch of corresponding embeddings.
"""
return embed_id.embed_id(x, self.W, ignore_label=self.ignore_label)
| mit |
cjmayo/mapnik | scons/scons-local-2.3.6/SCons/Platform/os2.py | 4 | 2200 | """SCons.Platform.os2
Platform-specific initialization for OS/2 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/os2.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import win32
def generate(env):
if 'ENV' not in env:
env['ENV'] = {}
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = '$LIBPREFIX'
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['HOST_OS'] = 'os2'
env['HOST_ARCH'] = win32.get_architecture().arch
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
Franky666/programmiersprachen-raytracer | external/boost_1_59_0/libs/mpl/doc/src/refmanual/refmanual.py | 10 | 3851 | # Copyright (c) Aleksey Gurtovoy 2001-2009
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import fnmatch
import os
import sys
import re
import string
underlines = ['+', '/']
special_cases = [ 'inserter', '_1,_2,..._n' ]
def __section_header(section):
parts = section.split('/')
underline = underlines[len(parts) - 1] * len(parts[-1])
if len(parts) > 0:
hidden_target = '.. _`label-%s`:' % '-'.join( parts )
return '\n%s\n%s\n%s\n\n' % (parts[-1], underline, hidden_target )
else:
return '\n%s\n%s\n\n' % (parts[-1], underline )
def __section_intro(section):
parts = section.split('/')
return '%s.rst' % '-'.join( [x.split(' ')[0] for x in parts] )
def __include_page( output, src_dir, page, name = None ):
output.write( '.. include:: %s\n' % os.path.join( src_dir, page ) )
# output.write( '.. raw:: LaTeX\n\n' )
# output.write( ' \\newpage\n\n')
if name and name not in special_cases: ref = name
else: ref = '/'.join( page.split('.')[0].split('-') )
if ref.upper() == ref or ref.lower() == ref:
output.write(
( '.. |%(ref)s| replace:: `%(ref)s`_\n' )
% { 'ref': ref }
)
else:
if ref.find( '/' ) == -1:
ref = ' '.join( filter( lambda x: len( x.strip() ) > 0, re.split( '([A-Z][a-z]+)', ref ) ) )
output.write( '.. |%(ref)s| replace:: `%(ref)s`_\n' % { 'ref': ref } )
output.write( '\n' )
def __write_index( filename, index ):
index_file = open( filename, 'w' )
index.sort()
for x in index:
index_file.write( '* |%s|\n' % x )
index_file.close()
def main( filename, src_dir, build_dir ):
sources = filter(
lambda x: fnmatch.fnmatch(x,"*.rst") and x != filename
, os.listdir( src_dir )
)
toc = [ t.strip() for t in open( os.path.join( src_dir, '%s.toc' % filename) ).readlines() ]
topics = {}
for t in toc: topics[t] = []
concept_index = []
index = []
output = open( os.path.join( build_dir, '%s.gen' % filename ), 'w')
output.writelines( open( os.path.join( src_dir, '%s.rst' % filename ), 'r' ).readlines() )
re_topic = re.compile(r'^..\s+(.+?)//(.+?)(\s*\|\s*(\d+))?\s*$')
for src in sources:
placement_spec = open( os.path.join( src_dir, src ), 'r' ).readline()
topic = 'Unclassified'
name = None
order = -1
match = re_topic.match(placement_spec)
if match:
topic = match.group(1)
name = match.group(2)
if match.group(3):
order = int(match.group(4))
if not topics.has_key(topic):
topics[topic] = []
topics[topic].append((src, order, name))
if name:
if topic.find( '/Concepts' ) == -1:
index.append( name )
else:
concept_index.append( name )
for t in toc:
content = topics[t]
content.sort( lambda x,y: x[1] - y[1] )
output.write( __section_header(t) )
intro = __section_intro( t )
if os.path.exists( os.path.join( src_dir, intro ) ):
__include_page( output, src_dir, intro )
for src in content:
__include_page( output, src_dir, src[0], src[2] )
output.close()
__write_index( os.path.join( build_dir, 'concepts.gen' ), concept_index )
__write_index( os.path.join( build_dir, 'index.gen' ), index )
main( 'refmanual', os.path.dirname( __file__ ), sys.argv[1] )
| mit |
SSJohns/osf.io | scripts/impute_names.py | 64 | 2374 | """
Email users to verify citation information.
"""
import re
import logging
from framework.auth.utils import impute_names
from framework.email.tasks import send_email
from website.app import init_app
from website import models
app = init_app('website.settings', set_backends=True, routes=True)
logging.basicConfig(filename='impute_names.log', level=logging.DEBUG)
email_template = u'''Hello, {fullname},
Along with a shorter domain name (http://osf.io), the Open Science Framework
has recently introduced a citation widget on project and component dashboards.
As such, we are expanding user settings to include Citation Style Language name
specifications that will allow us to accurately produce these citations. Your full
name can be different than the parts of the name used in citations.
Based upon your full name, "{fullname}", we've done our best to automatically infer the following:
Given name: {given_name}
Middle name(s): {middle_names}
Family name: {family_name}
Suffix: {suffix}
If this information is correct, you don't need to do anything. If you'd like
to make an adjustment or test the parsing algorithm, please browse to
http://osf.io/settings
If you have any questions or comments, please contact us at feedback+citations@osf.io (don't reply to this email).
I remain,
Sincerely yours,
The OSF Robot.
'''
def clean_template(template):
cleaned = ''
for line in template.splitlines():
cleaned += line or '\n'
cleaned = re.sub(' +', ' ', cleaned)
return cleaned
email_template = clean_template(email_template)
def email_name(user):
logging.debug('Emailing user {0}'.format(user.fullname))
names = {'fullname': user.fullname}
names.update(impute_names(user.fullname))
message=email_template.format(**names).encode('utf-8')
success = send_email(
from_addr='openscienceframework-robot@osf.io',
to_addr=user.username,
subject='Open Science Framework: Verify your citation information',
message=message,
mimetype='plain',
)
if success:
logging.debug('Emailing user {0}: Success'.format(user.fullname))
else:
logging.debug('Emailing user {0}: Failure'.format(user.fullname))
def email_names():
for user in models.User.find():
email_name(user)
#if __name__ == '__main__':
# impute_names('names.tsv')
| apache-2.0 |
jollyroger/debian-buildbot | buildbot/test/fake/fakebuild.py | 2 | 2121 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
import posixpath
from buildbot import config
from buildbot import interfaces
from buildbot.process import factory
from buildbot.process import properties
from twisted.python import components
class FakeBuildStatus(properties.PropertiesMixin, mock.Mock):
# work around http://code.google.com/p/mock/issues/detail?id=105
def _get_child_mock(self, **kw):
return mock.Mock(**kw)
def getInterestedUsers(self):
return []
components.registerAdapter(
lambda build_status: build_status.properties,
FakeBuildStatus, interfaces.IProperties)
class FakeBuild(properties.PropertiesMixin):
def __init__(self, props=None):
self.build_status = FakeBuildStatus()
self.builder = mock.Mock(name='build.builder')
self.builder.config = config.BuilderConfig(
name='bldr',
slavenames=['a'],
factory=factory.BuildFactory())
self.path_module = posixpath
self.workdir = 'build'
self.sources = {}
if props is None:
props = properties.Properties()
props.build = self
self.build_status.properties = props
def getSourceStamp(self, codebase):
if codebase in self.sources:
return self.sources[codebase]
return None
components.registerAdapter(
lambda build: build.build_status.properties,
FakeBuild, interfaces.IProperties)
| gpl-2.0 |
NL66278/OCB | addons/multi_company/__openerp__.py | 52 | 1754 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multi-Company',
'version': '1.0',
'category': 'Tools',
'description': """
This module is for managing a multicompany environment.
=======================================================
This module is the base module for other multi-company modules.
""",
'author': 'OpenERP SA,SYLEAM',
'website': 'https://www.odoo.com',
'depends': [
'base',
'sale_stock',
'project',
],
'data': ['res_company_view.xml'],
'demo': ['multi_company_demo.xml'],
'installable': True,
'auto_install': False,
'images': ['images/companies.jpeg','images/default_company_per_object_form.jpeg', 'images/default_company_per_object_list.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dharmabumstead/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_launch.py | 23 | 4262 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_launch
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: Launch an Ansible Job.
description:
- Launch an Ansible Tower jobs. See
U(https://www.ansible.com/tower) for an overview.
options:
job_template:
description:
- Name of the job template to use.
required: True
job_explanation:
description:
- Job explanation field.
job_type:
description:
- Job_type to use for the job, only used if prompt for job_type is set.
choices: ["run", "check", "scan"]
inventory:
description:
- Inventory to use for the job, only used if prompt for inventory is set.
credential:
description:
- Credential to use for job, only used if prompt for credential is set.
extra_vars:
description:
- Extra_vars to use for the job_template. Prepend C(@) if a file.
limit:
description:
- Limit to use for the I(job_template).
tags:
description:
- Specific tags to use for from playbook.
use_job_endpoint:
description:
- Disable launching jobs from job template.
type: bool
default: 'no'
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Launch a job
tower_job_launch:
job_template: "My Job Template"
register: job
- name: Wait for job max 120s
tower_job_wait:
job_id: job.id
timeout: 120
'''
RETURN = '''
id:
description: job id of the newly launched job
returned: success
type: int
sample: 86
status:
description: status of newly launched job
returned: success
type: string
sample: pending
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode, tower_argument_spec, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
job_template=dict(required=True),
job_type=dict(choices=['run', 'check', 'scan']),
inventory=dict(),
credential=dict(),
limit=dict(),
tags=dict(type='list'),
extra_vars=dict(type='list'),
))
module = AnsibleModule(
argument_spec,
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
json_output = {}
tags = module.params.get('tags')
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
try:
params = module.params.copy()
if isinstance(tags, list):
params['tags'] = ','.join(tags)
job = tower_cli.get_resource('job')
lookup_fields = ('job_template', 'inventory', 'credential')
for field in lookup_fields:
try:
name = params.pop(field)
result = tower_cli.get_resource(field).get(name=name)
params[field] = result['id']
except exc.NotFound as excinfo:
module.fail_json(msg='Unable to launch job, {0}/{1} was not found: {2}'.format(field, name, excinfo), changed=False)
result = job.launch(no_input=True, **params)
json_output['id'] = result['id']
json_output['status'] = result['status']
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Unable to launch job: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
jiangzhonghui/thumbor | vows/detector_vows.py | 11 | 1975 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from pyvows import Vows, expect
ctx = Vows.Context
from thumbor.detectors import BaseDetector
def get_detector(name):
class MockDetector:
def __init__(self, context, index, detectors):
self.context = context
self.index = index
self.detectors = detectors
self.name = name
def detect(self, callback):
callback(self.name)
return MockDetector
@Vows.batch
class BaseDetectorVows(ctx):
class CreateInstanceVows(ctx):
def topic(self):
return BaseDetector("context", 1, "detectors")
def should_not_be_null(self, topic):
expect(topic).not_to_be_null()
expect(topic).not_to_be_an_error()
class DetectShouldRaise(ctx):
@Vows.capture_error
def topic(self):
BaseDetector("context", 1, "detectors").detect(None)
def should_be_an_error(self, topic):
expect(topic).to_be_an_error()
expect(topic).to_be_an_error_like(NotImplementedError)
class NextVows(ctx):
@Vows.async_topic
def topic(self, callback):
detector = BaseDetector("context", 0, [
get_detector("a"),
get_detector("b")
])
return detector.next(callback)
def should_be_detector_b(self, topic):
expect(topic.args[0]).to_equal("b")
class LastDetectorVows(ctx):
@Vows.async_topic
def topic(self, callback):
detector = BaseDetector("context", 0, [
get_detector("a")
])
return detector.next(callback)
def should_be_null(self, topic):
expect(topic.args).to_length(0)
| mit |
muare/PCV | PCV/localdescriptors/dsift.py | 11 | 1360 | from PIL import Image
from numpy import *
import os
from PCV.localdescriptors import sift
def process_image_dsift(imagename,resultname,size=20,steps=10,force_orientation=False,resize=None):
""" Process an image with densely sampled SIFT descriptors
and save the results in a file. Optional input: size of features,
steps between locations, forcing computation of descriptor orientation
(False means all are oriented upwards), tuple for resizing the image."""
im = Image.open(imagename).convert('L')
if resize!=None:
im = im.resize(resize)
m,n = im.size
if imagename[-3:] != 'pgm':
#create a pgm file
im.save('tmp.pgm')
imagename = 'tmp.pgm'
# create frames and save to temporary file
scale = size/3.0
x,y = meshgrid(range(steps,m,steps),range(steps,n,steps))
xx,yy = x.flatten(),y.flatten()
frame = array([xx,yy,scale*ones(xx.shape[0]),zeros(xx.shape[0])])
savetxt('tmp.frame',frame.T,fmt='%03.3f')
if force_orientation:
cmmd = str("sift "+imagename+" --output="+resultname+
" --read-frames=tmp.frame --orientations")
else:
cmmd = str("sift "+imagename+" --output="+resultname+
" --read-frames=tmp.frame")
os.system(cmmd)
print 'processed', imagename, 'to', resultname
| bsd-2-clause |
asposeforcloud/Aspose_Cloud_SDK_For_Python | asposecloud/email/__init__.py | 1 | 7149 | __author__ = 'assadmahmood'
import requests
import json
from asposecloud import Product
from asposecloud import AsposeApp
from asposecloud.common import Utils
# ========================================================================
# DOCUMENT CLASS
# ========================================================================
class Document:
def __init__(self, filename):
self.filename = filename
if not filename:
raise ValueError("filename not specified")
self.base_uri = Product.product_uri + 'email/' + self.filename
def get_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None):
"""
:param property_name:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
str_uri = self.base_uri + '/properties/' + property_name
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.get(signed_uri, headers={
'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0'
})
response.raise_for_status()
response = response.json()
except requests.HTTPError as e:
print e
print response.content
exit(1)
return response['EmailProperty']['Value']
def set_property(self, property_name, property_value, remote_folder='', storage_type='Aspose', storage_name=None):
"""
:param property_name:
:param property_value:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
str_uri = self.base_uri + '/properties/' + property_name
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
json_data = json.dumps({'Value': property_value})
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.put(signed_uri, json_data, headers={
'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0'
})
response.raise_for_status()
response = response.json()
except requests.HTTPError as e:
print e
print response.content
exit(1)
return response['EmailProperty']['Value']
def get_attachment(self, attachment_name, remote_folder='', storage_type='Aspose', storage_name=None):
"""
:param attachment_name:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
if not attachment_name:
raise ValueError("attachment_name not specified")
str_uri = self.base_uri + '/attachments/' + attachment_name
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.get(signed_uri, headers={
'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0'
}, stream=True)
response.raise_for_status()
except requests.HTTPError as e:
print e
print response.content
exit(1)
validate_output = Utils.validate_result(response)
if not validate_output:
output_path = AsposeApp.output_path + attachment_name
Utils.save_file(response, output_path)
return output_path
else:
return validate_output
def add_attachment(self, attachment_name, remote_folder='', storage_type='Aspose', storage_name=None):
"""
:param attachment_name:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
str_uri = self.base_uri + '/attachments/' + attachment_name
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.post(signed_uri, None, headers={
'content-type': 'application/json', 'accept': 'application/json'
})
response.raise_for_status()
response = response.json()
except requests.HTTPError as e:
print e
print response.content
exit(1)
return response
# ========================================================================
# CONVERTER CLASS
# ========================================================================
class Converter:
def __init__(self, filename):
self.filename = filename
if not filename:
raise ValueError("filename not specified")
self.base_uri = Product.product_uri + 'email/' + self.filename
def convert(self, save_format, stream_out=False, output_filename=None,
remote_folder='', storage_type='Aspose', storage_name=None):
"""
convert an email message document to a different format
:param save_format:
:param output_filename:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
if not save_format:
raise ValueError("save_format not specified")
str_uri = self.base_uri + '?format=' + save_format
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.get(signed_uri, headers={
'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0'
}, stream=True)
response.raise_for_status()
except requests.HTTPError as e:
print e
print response.content
exit(1)
validate_output = Utils.validate_result(response)
if not validate_output:
if not stream_out:
if output_filename is None:
output_filename = self.filename
output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format
Utils.save_file(response, output_path)
return output_path
else:
return response.content
else:
return validate_output
| mit |
Glottotopia/aagd | moin/local/moin/MoinMoin/support/pygments/lexers/_clbuiltins.py | 3 | 14247 | # -*- coding: utf-8 -*-
"""
pygments.lexers._clbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~
ANSI Common Lisp builtins.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTIN_FUNCTIONS = [ # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
'apropos-list', 'aref', 'arithmetic-error-operands',
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
'characterp', 'char-code', 'char-downcase', 'char-equal',
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
'close', 'clrhash', 'code-char', 'coerce', 'compile',
'compiled-function-p', 'compile-file', 'compile-file-pathname',
'compiler-macro-function', 'complement', 'complex', 'complexp',
'compute-applicable-methods', 'compute-restarts', 'concatenate',
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
'delete-package', 'denominator', 'deposit-field', 'describe',
'describe-object', 'digit-char', 'digit-char-p', 'directory',
'directory-namestring', 'disassemble', 'documentation', 'dpb',
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
'enough-namestring', 'ensure-directories-exist',
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
'file-error-pathname', 'file-length', 'file-namestring',
'file-position', 'file-string-length', 'file-write-date',
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
'fround', 'ftruncate', 'funcall', 'function-keywords',
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
'gethash', 'get-internal-real-time', 'get-internal-run-time',
'get-macro-character', 'get-output-stream-string', 'get-properties',
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'host-namestring', 'identity', 'imagpart', 'import',
'initialize-instance', 'input-stream-p', 'inspect',
'integer-decode-float', 'integer-length', 'integerp',
'interactive-stream-p', 'intern', 'intersection',
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
'listen', 'list-length', 'listp', 'load',
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
'make-instance', 'make-instances-obsolete', 'make-list',
'make-load-form', 'make-load-form-saving-slots', 'make-package',
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
'merge', 'merge-pathnames', 'method-combination-error',
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
'package-name', 'package-nicknames', 'packagep',
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
'pathname-device', 'pathname-directory', 'pathname-host',
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
'read-from-string', 'read-line', 'read-preserving-whitespace',
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
'search', 'second', 'set', 'set-difference',
'set-dispatch-macro-character', 'set-exclusive-or',
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
'simple-condition-format-arguments', 'simple-condition-format-control',
'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
'slot-unbound', 'slot-value', 'software-type', 'software-version',
'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
'standard-char-p', 'store-value', 'stream-element-type',
'stream-error-stream', 'stream-external-format', 'streamp', 'string',
'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
'string-capitalize', 'string-downcase', 'string-equal',
'string-greaterp', 'string-left-trim', 'string-lessp',
'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
'translate-logical-pathname', 'translate-pathname', 'tree-equal',
'truename', 'truncate', 'two-way-stream-input-stream',
'two-way-stream-output-stream', 'type-error-datum',
'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
'update-instance-for-different-class',
'update-instance-for-redefined-class', 'upgraded-array-element-type',
'upgraded-complex-part-type', 'upper-case-p', 'use-package',
'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
'y-or-n-p', 'zerop',
]
SPECIAL_FORMS = [
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
'unwind-protect',
]
MACROS = [
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
'define-compiler-macro', 'define-condition', 'define-method-combination',
'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
'multiple-value-setq', 'nth-value', 'or', 'pop',
'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
'with-condition-restarts', 'with-hash-table-iterator',
'with-input-from-string', 'with-open-file', 'with-open-stream',
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
'with-slots', 'with-standard-io-syntax',
]
LAMBDA_LIST_KEYWORDS = [
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
]
DECLARATIONS = [
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
'ignorable', 'notinline', 'type',
]
BUILTIN_TYPES = [
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
'simple-vector', 'standard-char', 'unsigned-byte',
# Condition Types
'arithmetic-error', 'cell-error', 'condition', 'control-error',
'division-by-zero', 'end-of-file', 'error', 'file-error',
'floating-point-inexact', 'floating-point-overflow',
'floating-point-underflow', 'floating-point-invalid-operation',
'parse-error', 'package-error', 'print-not-readable', 'program-error',
'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
'undefined-function', 'warning',
]
BUILTIN_CLASSES = [
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
'integer', 'list', 'logical-pathname', 'method-combination', 'method',
'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
'real', 'random-state', 'restart', 'sequence', 'standard-class',
'standard-generic-function', 'standard-method', 'standard-object',
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
]
| mit |
GarySparrow/mFlaskWeb | venv/Lib/site-packages/pygments/styles/pastie.py | 50 | 2473 | # -*- coding: utf-8 -*-
"""
pygments.styles.pastie
~~~~~~~~~~~~~~~~~~~~~~
Style similar to the `pastie`_ default style.
.. _pastie: http://pastie.caboo.se/
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class PastieStyle(Style):
"""
Style similar to the pastie default style.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: '#888888',
Comment.Preproc: 'bold #cc0000',
Comment.Special: 'bg:#fff0f0 bold #cc0000',
String: 'bg:#fff0f0 #dd2200',
String.Regex: 'bg:#fff0ff #008800',
String.Other: 'bg:#f0fff0 #22bb22',
String.Symbol: '#aa6600',
String.Interpol: '#3333bb',
String.Escape: '#0044dd',
Operator.Word: '#008800',
Keyword: 'bold #008800',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#888888',
Name.Class: 'bold #bb0066',
Name.Exception: 'bold #bb0066',
Name.Function: 'bold #0066bb',
Name.Property: 'bold #336699',
Name.Namespace: 'bold #bb0066',
Name.Builtin: '#003388',
Name.Variable: '#336699',
Name.Variable.Class: '#336699',
Name.Variable.Instance: '#3333bb',
Name.Variable.Global: '#dd7700',
Name.Constant: 'bold #003366',
Name.Tag: 'bold #bb0066',
Name.Attribute: '#336699',
Name.Decorator: '#555555',
Name.Label: 'italic #336699',
Number: 'bold #0000DD',
Generic.Heading: '#333',
Generic.Subheading: '#666',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| mit |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/idlelib/ClassBrowser.py | 40 | 7017 | """Class browser.
XXX TO DO:
- reparse when source changed (maybe just a button would be OK?)
(or recheck on window popup)
- add popup menu with more options (e.g. doc strings, base classes, imports)
- show function argument list? (have to do pattern matching on source)
- should the classes and methods lists also be in the module's menu bar?
- add base classes to class browser tree
"""
import os
import sys
import pyclbr
from idlelib import PyShell
from idlelib.WindowList import ListedToplevel
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.configHandler import idleConf
file_open = None # Method...Item and Class...Item use this.
# Normally PyShell.flist.open, but there is no PyShell.flist for htest.
class ClassBrowser:
def __init__(self, flist, name, path, _htest=False):
# XXX This API should change, if the file doesn't end in ".py"
# XXX the code here is bogus!
"""
_htest - bool, change box when location running htest.
"""
global file_open
if not _htest:
file_open = PyShell.flist.open
self.name = name
self.file = os.path.join(path[0], self.name + ".py")
self._htest = _htest
self.init(flist)
def close(self, event=None):
self.top.destroy()
self.node.destroy()
def init(self, flist):
self.flist = flist
# reset pyclbr
pyclbr._modules.clear()
# create top
self.top = top = ListedToplevel(flist.root)
top.protocol("WM_DELETE_WINDOW", self.close)
top.bind("<Escape>", self.close)
if self._htest: # place dialog below parent if running htest
top.geometry("+%d+%d" %
(flist.root.winfo_rootx(), flist.root.winfo_rooty() + 200))
self.settitle()
top.focus_set()
# create scrolled canvas
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = self.rootnode()
self.node = node = TreeNode(sc.canvas, None, item)
node.update()
node.expand()
def settitle(self):
self.top.wm_title("Class Browser - " + self.name)
self.top.wm_iconname("Class Browser")
def rootnode(self):
return ModuleBrowserTreeItem(self.file)
class ModuleBrowserTreeItem(TreeItem):
def __init__(self, file):
self.file = file
def GetText(self):
return os.path.basename(self.file)
def GetIconName(self):
return "python"
def GetSubList(self):
sublist = []
for name in self.listclasses():
item = ClassBrowserTreeItem(name, self.classes, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if os.path.normcase(self.file[-3:]) != ".py":
return
if not os.path.exists(self.file):
return
PyShell.flist.open(self.file)
def IsExpandable(self):
return os.path.normcase(self.file[-3:]) == ".py"
def listclasses(self):
dir, file = os.path.split(self.file)
name, ext = os.path.splitext(file)
if os.path.normcase(ext) != ".py":
return []
try:
dict = pyclbr.readmodule_ex(name, [dir] + sys.path)
except ImportError:
return []
items = []
self.classes = {}
for key, cl in dict.items():
if cl.module == name:
s = key
if hasattr(cl, 'super') and cl.super:
supers = []
for sup in cl.super:
if type(sup) is type(''):
sname = sup
else:
sname = sup.name
if sup.module != cl.module:
sname = "%s.%s" % (sup.module, sname)
supers.append(sname)
s = s + "(%s)" % ", ".join(supers)
items.append((cl.lineno, s))
self.classes[s] = cl
items.sort()
list = []
for item, s in items:
list.append(s)
return list
class ClassBrowserTreeItem(TreeItem):
def __init__(self, name, classes, file):
self.name = name
self.classes = classes
self.file = file
try:
self.cl = self.classes[self.name]
except (IndexError, KeyError):
self.cl = None
self.isfunction = isinstance(self.cl, pyclbr.Function)
def GetText(self):
if self.isfunction:
return "def " + self.name + "(...)"
else:
return "class " + self.name
def GetIconName(self):
if self.isfunction:
return "python"
else:
return "folder"
def IsExpandable(self):
if self.cl:
try:
return not not self.cl.methods
except AttributeError:
return False
def GetSubList(self):
if not self.cl:
return []
sublist = []
for name in self.listmethods():
item = MethodBrowserTreeItem(name, self.cl, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = file_open(self.file)
if hasattr(self.cl, 'lineno'):
lineno = self.cl.lineno
edit.gotoline(lineno)
def listmethods(self):
if not self.cl:
return []
items = []
for name, lineno in self.cl.methods.items():
items.append((lineno, name))
items.sort()
list = []
for item, name in items:
list.append(name)
return list
class MethodBrowserTreeItem(TreeItem):
def __init__(self, name, cl, file):
self.name = name
self.cl = cl
self.file = file
def GetText(self):
return "def " + self.name + "(...)"
def GetIconName(self):
return "python" # XXX
def IsExpandable(self):
return 0
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = file_open(self.file)
edit.gotoline(self.cl.methods[self.name])
def _class_browser(parent): #Wrapper for htest
try:
file = __file__
except NameError:
file = sys.argv[0]
if sys.argv[1:]:
file = sys.argv[1]
else:
file = sys.argv[0]
dir, file = os.path.split(file)
name = os.path.splitext(file)[0]
flist = PyShell.PyShellFileList(parent)
global file_open
file_open = flist.open
ClassBrowser(flist, name, [dir], _htest=True)
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_class_browser)
| gpl-2.0 |
ormandj/stalker | stalkerweb/stalkerweb/auth.py | 3 | 2492 | from flask.ext.bcrypt import generate_password_hash, check_password_hash
from flask import request, redirect, url_for, session, abort
from functools import wraps
from stalkerweb import rdb, app
from stalkerweb.stutils import genPrimaryKey64
import rethinkdb as r
def is_valid_email_login(email, password):
uinfo = list(r.table("users").filter({"email": email}).run(rdb.conn))
if len(uinfo) != 1:
return False
else:
if check_password_hash(uinfo[0]['hash'], password):
return True
else:
return False
def is_valid_login(username, password):
uinfo = list(r.table("users").get_all(
username, index="username").run(rdb.conn))
if len(uinfo) != 1:
return False
else:
if check_password_hash(uinfo[0]['hash'], password):
return True
else:
return False
def add_user(username, password, email):
pw_hash = generate_password_hash(password)
try:
res = r.table("users").insert({'id': genPrimaryKey64("%s%s" % (
username, email)), 'username': username, 'hash': pw_hash, 'email': email}).run(rdb.conn)
if res["inserted"] == 1:
return True
else:
return False
except Exception:
return False
def change_pass(username, email, password):
pw_hash = generate_password_hash(password)
try:
q = r.table("users").get(genPrimaryKey64("%s%s" % (username, email))).update(
{"hash": pw_hash}).run(rdb.conn)
if q["replaced"]:
return True
else:
return False
except Exception as err:
print err
return False
def remove_user(username, email):
try:
q = r.table("users").get(
genPrimaryKey64("%s%s" % (username, email))).delete().run(rdb.conn)
if q["deleted"] == 1:
return True
else:
return False
except Exception as err:
print err
return False
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.headers.get('X-API-KEY'):
if not session.get('logged_in', False):
return redirect(url_for('signin', next=request.url))
return f(*args, **kwargs)
else:
if request.headers.get('X-API-KEY') == app.config['API_KEY']:
return f(*args, **kwargs)
else:
abort(403)
return decorated_function
| apache-2.0 |
swprojects/wxPieTool | pyimager.py | 1 | 8720 | """
wxPieTool - wxPython Image Embedding Tool
Copyright 2016 Simon Wu <swprojects@gmx.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import base64
import binascii
import multiprocessing
import os
import tempfile
import wx
import importlib.util
from wx.lib.embeddedimage import PyEmbeddedImage
#------------------------------------------------------------------------------
PROCESS_COUNT = multiprocessing.cpu_count() - 1
def WritePyImageFile(output_file, pyfiledata):
""" writes a new pyImages file from pyfiledata """
py_images_file = open(output_file, 'w') # delete any existing file.
"""
Write the relevant header portion and the import statements
Writes Python statements to the output pyImages file.
"""
py_images_file.write('#' + '-'*69 + '\n\n')
line = '# This file was generated by %s\n\n' %("wxImage Embedding Tool")
py_images_file.write(line)
py_images_file.write('import wx\n')
py_images_file.write('from wx.lib.embeddedimage import PyEmbeddedImage\n')
py_images_file.write('\n')
py_images_file.write('image_index = {}\n')
py_images_file.write('image_catalog = {}\n')
"""
Writes the Python code to the output pyImages file that both define an image
and to be able to generate raw data, wx.Image, wx.Bitmap and wx.Icon objects
when its pyImmages file is imported by any Python application.
"""
for index in sorted(pyfiledata.keys()):
values = pyfiledata[index]
name = values["name"]
data = values["data"]
py_images_file.write('#' + '-'*69 + '\n\n')
py_images_file.write('image_catalog["%s"] = PyEmbeddedImage(\n%s\n' % (name, data))
py_images_file.write(' )\n\n')
# When the PyImages file is imported,
# the following dictionary idName value will become a function name.
py_images_file.write('image_index[%s] = "%s"\n' % (str(index), name))
py_images_file.write('\n')
"""
Writes the Get functions at the end of the file
"""
py_images_file.write('#' + '-'*69 + '\n\n')
# get data function
py_images_file.write('def GetData(name):\n')
py_images_file.write(' ')
py_images_file.write('return image_catalog[name].GetData()\n')
py_images_file.write('\n')
# scale image function
py_images_file.write('def ScaleImage(name, width, height):\n')
py_images_file.write(' ')
py_images_file.write('image = image_catalog[name].GetImage()\n')
py_images_file.write(' ')
py_images_file.write('image = image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)\n')
py_images_file.write(' ')
py_images_file.write('return image\n')
py_images_file.write('\n')
for func_name in ["Image","Bitmap","Icon"]:
py_images_file.write('def Get%s(name, width=-1, height=-1):\n' % func_name)
py_images_file.write(' ')
py_images_file.write('if (width,height) == (-1,-1):\n')
py_images_file.write(' ')
py_images_file.write('return image_catalog[name].Get%s()\n' % func_name)
py_images_file.write(' ')
py_images_file.write('else:\n')
py_images_file.write(' ')
py_images_file.write('image = ScaleImage(name, width, height)\n')
py_images_file.write(' ')
py_images_file.write('image = wx.%s(image)\n' % func_name)
py_images_file.write(' ')
py_images_file.write('return image\n')
py_images_file.write('\n')
py_images_file.close()
#end WritePyImageFile def
#------------------------------------------------------------------------------
def B64EncodeBinaryData(image_data):
"""
B64 encodes a binary byte string. Returns a list of lines of strings
suitable for embedding in a Python file.
"""
# Encode the PNG file's lossless-compressed binary image data into a single, big b64 string.
encoded_data = binascii.b2a_base64(image_data)[:-1]
# encoded_data= image_data.encode("base64")
# encoded_data=image_data
# Chop the b64 character-encoded encoded_data into manageable
# line lengths for writing to a file.
data_list = [] # b64 linesOfEncPngImgData list.
while encoded_data:
line_of_data = encoded_data[:57] # A chunk length of 72 chars
encoded_data = encoded_data[57:] # The remainder of data to be encoded.
# extract the string from b"<str>"
line_of_data = line_of_data.decode("utf8")
line_of_data = ' "%s"' %(line_of_data)
data_list.append(line_of_data)
image_data_list = '\n'.join(data_list)
return image_data_list
#end B64EncodeBinaryData def
#------------------------------------------------------------------------------
def BitmapToPngFile(bitmap, tmp_file) :
"""
Save a wx.Bitmap to a PNG file. The contents of this file is intended
to be b64 encoded in order to finally save it to the output pyImages file.
"""
if bitmap.SaveFile(tmp_file, wx.BITMAP_TYPE_PNG): # SaveFile() success
return True
elif wx.Image(bitmap).SaveFile(tmp_file, wx.BITMAP_TYPE_PNG):
# wx.Bitmap.SaveFile() has failed.
# Try a different save method.
return True
else:
return None
#end BitmapToPngFile def
def CreatePngFileData(path) :
"""
return data of image file, which can than be passed to B64EncodeBinaryData
"""
if not os.path.exists(path):
return None #"File no longer exists. Cancel import"
try:
bitmap = wx.Bitmap(path, wx.BITMAP_TYPE_ANY)
except:
return None #"File no longer exists. Cancel import"
# Is image file bad?
if not bitmap.IsOk():
return None #"File no longer exists. Cancel import"
# Read the original image file and write it to a new PNG file.
tmp_file = tempfile.TemporaryFile()
tmp_file = tmp_file.name # get the path of temporary file
# print(dir(tmp_file))
bmp_to_png = BitmapToPngFile(bitmap, tmp_file)
if not bmp_to_png:
print("cannot write to temporary file")
# Encode the PNG file's lossless-compressed binary image data into a single, big b64 string.
png_file = open(tmp_file, 'rb')
image_data = png_file.read()
# b64 = image_data.encode ('base64')
png_file.close()
os.remove(tmp_file)
# print("creating temporary file",tmp_file, image_data )
from wx.lib.embeddedimage import PyEmbeddedImage
return image_data
#end CreatePngFileData def
def GetPyImageData(pyimage):
"""
Import the embedded_image_file and add its images to image_dict{}.
The file's existance is expected to have been verified.
"""
file_name, file_ext = os.path.splitext(os.path.basename(pyimage))
print(file_name, file_ext)
# import using the full path of the filename
# """http://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path"""
try:
spec = importlib.util.spec_from_file_location("file_name", pyimage)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
except:
print("Failed to load file. Is it a python file?")
return
# check if the python file is actually a PyImages file.
try:
image_index = foo.image_index # should have been defined
# image_catalog = foo.image_catalog
print(image_index.items())
data = {}
for index, image_name in image_index.items():
data[index] = {"name":image_name,
"data":foo.GetData(image_name),
"bitmap":foo.GetBitmap(image_name)}
except NameError:
print("Failed to load file. Is it a valid PyEmbeddedImage File?" )
return
return data
#end GetPyImageData def | gpl-2.0 |
rocket-league-replays/rocket-league-replay-parser | replay_parser/replay_parser.py | 2 | 14301 | # -*- coding: utf-8 -*-
import pprint
import re
import sys
import struct
class ReplayParser:
SERVER_REGEX = r'((EU|USE|USW|OCE|SAM)\d+(-[A-Z][a-z]+)?)'
def __init__(self, debug=False):
self.debug = debug
def parse(self, replay_file):
# Work out what type of file we're dealing with.
if hasattr(replay_file, 'read'):
replay_file.seek(0)
elif hasattr(replay_file, 'file'):
replay_file = open(replay_file.file.path, 'rb')
elif isinstance(replay_file, str):
replay_file = open(replay_file, 'rb')
else:
raise TypeError("Unable to determine file type.")
data = {}
# Length of properties section (+36)
properties_length = self._read_integer(replay_file)
# CRC check.
crc = self._read_unknown(replay_file, 4)
# Version number
data['version_number'] = '{}.{}'.format(
self._read_integer(replay_file),
self._read_integer(replay_file)
)
# Identifier
data['version'] = self._read_string(replay_file)
data['header'] = self._read_properties(replay_file)
if 'Team0Score' not in data['header']:
data['header']['Team0Score'] = 0
if 'Team1Score' not in data['header']:
data['header']['Team1Score'] = 0
self.number_of_goals = data['header']['Team0Score'] + data['header']['Team1Score']
if 'Goals' not in data['header']:
data['header']['Goals'] = []
assert replay_file.tell() == properties_length + 8
# Size of remaining data.
remaining_length = self._read_integer(replay_file)
# TODO: Potentially a CRC check?
crc_2 = self._read_unknown(replay_file, 4)
data['level_info'] = self._read_level_info(replay_file)
data['key_frames'] = self._read_key_frames(replay_file)
data['network_stream'] = self._read_network_stream(replay_file)
data['debug_strings'] = self._read_debug_strings(replay_file)
data['goal_ticks'] = self._read_goal_ticks(replay_file)
data['packages'] = self._read_packages(replay_file)
data['objects'] = self._read_objects(replay_file)
data['name_table'] = self._read_name_table(replay_file)
data['classes'] = self._read_classes(replay_file)
data['property_tree'] = self._read_property_tree(replay_file, data['objects'], data['classes'])
assert replay_file.tell() == properties_length + remaining_length + 16
# Run some manual parsing operations.
data = self.manual_parse(data, replay_file)
# data['network_stream'] = self._process_network_stream(data['network_stream'])
if hasattr(replay_file, 'file') and replay_file.file.__class__.__name__ != 'InMemoryUploadedFile':
replay_file.close()
return data
def _read_properties(self, replay_file):
results = {}
while True:
property_info = self._read_property(replay_file)
if property_info:
results[property_info['name']] = property_info['value']
else:
return results
def _read_property(self, replay_file):
name_length = self._read_integer(replay_file)
property_name = self._read_string(replay_file, name_length)
if property_name == 'None':
return None
type_name = self._read_string(replay_file)
value = None
if type_name == 'IntProperty':
value_length = self._read_integer(replay_file, 8)
value = self._read_integer(replay_file, value_length)
elif type_name == 'StrProperty':
unknown = self._read_integer(replay_file, 8)
length = self._read_integer(replay_file)
if length < 0:
length = abs(length) * 2
value = self._read_string(replay_file, length)[:-1].decode('utf-16').encode('utf-8')
else:
value = self._read_string(replay_file, length)
elif type_name == 'FloatProperty':
length = self._read_integer(replay_file, 8)
value = self._read_float(replay_file, length)
elif type_name == 'NameProperty':
unknown = self._read_integer(replay_file, 8)
value = self._read_string(replay_file)
elif type_name == 'ArrayProperty':
# I imagine that this is the length of bytes that the data
# in the "array" actually take up in the file.
unknown = self._read_integer(replay_file, 8)
array_length = self._read_integer(replay_file)
value = [
self._read_properties(replay_file)
for x in xrange(array_length)
]
elif type_name == 'ByteProperty':
# This could be a new array type.
# 25 (8) / 15 (4) / Str len 15 / Int (4) - 21 / Str len 21
self._read_integer(replay_file, 8)
key_length = self._read_integer(replay_file, 4)
byte_key = self._read_string(replay_file, length=key_length)
byte_value = self._read_string(replay_file)
value = {
byte_key: byte_value
}
elif type_name == 'QWordProperty':
# 64 bit int, 8 bytes.
length = self._read_integer(replay_file, 8)
value = self._read_integer(replay_file, length)
elif type_name == 'BoolProperty':
unknown = self._read_integer(replay_file, 8)
value = self._read_integer(replay_file, 1)
if value == 0:
value = False
elif value == 1:
value = True
else:
raise Exception("Unknown type: {}".format(type_name))
return {'name': property_name, 'value': value}
def _read_level_info(self, replay_file):
map_names = []
number_of_maps = self._read_integer(replay_file)
for x in xrange(number_of_maps):
map_names.append(self._read_string(replay_file))
return map_names
def _read_key_frames(self, replay_file):
number_of_key_frames = self._read_integer(replay_file)
key_frames = [
self._read_key_frame(replay_file)
for x in xrange(number_of_key_frames)
]
return key_frames
def _read_key_frame(self, replay_file):
time = self._read_float(replay_file, 4)
frame = self._read_integer(replay_file)
file_position = self._read_integer(replay_file)
return {
'time': time,
'frame': frame,
'file_position': file_position
}
def _read_network_stream(self, replay_file):
array_length = self._read_integer(replay_file)
network_stream = self._read_unknown(replay_file, array_length)
def _read_debug_strings(self, replay_file):
array_length = self._read_integer(replay_file)
if array_length == 0:
return []
debug_strings = []
unknown = self._read_integer(replay_file)
while len(debug_strings) < array_length:
player_name = self._read_string(replay_file)
debug_string = self._read_string(replay_file)
debug_strings.append({
'PlayerName': player_name,
'DebugString': debug_string,
})
if len(debug_strings) < array_length:
# Seems to be some nulls and an ACK?
unknown = self._read_integer(replay_file)
return debug_strings
def _read_goal_ticks(self, replay_file):
goal_ticks = []
num_goals = self._read_integer(replay_file)
for x in xrange(num_goals):
team = self._read_string(replay_file)
frame = self._read_integer(replay_file)
goal_ticks.append({
'Team': team,
'frame': frame,
})
return goal_ticks
def _read_packages(self, replay_file):
num_packages = self._read_integer(replay_file)
packages = []
for x in xrange(num_packages):
packages.append(self._read_string(replay_file))
return packages
def _read_objects(self, replay_file):
num_objects = self._read_integer(replay_file)
objects = []
for x in xrange(num_objects):
objects.append(self._read_string(replay_file))
return objects
def _read_name_table(self, replay_file):
name_table_length = self._read_integer(replay_file)
table = []
for x in xrange(name_table_length):
table.append(self._read_string(replay_file))
return table
def _read_classes(self, replay_file):
class_index_map_length = self._read_integer(replay_file)
class_index_map = {}
for x in xrange(class_index_map_length):
name = self._read_string(replay_file)
integer = self._read_integer(replay_file)
class_index_map[integer] = name
return class_index_map
def _read_property_tree(self, replay_file, objects, classes):
branches = []
property_tree_length = self._read_integer(replay_file)
for x in xrange(property_tree_length):
data = {
'class': self._read_integer(replay_file),
'parent_id': self._read_integer(replay_file),
'id': self._read_integer(replay_file),
'properties': {}
}
if data['id'] == data['parent_id']:
data['id'] = 0
length = self._read_integer(replay_file)
for x in xrange(length):
index = self._read_integer(replay_file)
value = self._read_integer(replay_file)
data['properties'][index] = value
branches.append(data)
# Map the property keys against the class list.
classed = {}
def map_properties(id):
for branch in branches:
if branch['id'] == id:
props = {}
if branch['parent_id'] > 0:
props = map_properties(branch['parent_id'])
for k, v in enumerate(branch['properties']):
props[v] = objects[k]
return props
return {}
for branch in branches:
# {'parent_id': 36, 'properties': {42: 36}, 'class': 43, 'id': 37}
classed[branch['class']] = {
'class': classes[branch['class']],
'properties': map_properties(branch['id'] if branch['id'] > 0 else branch['parent_id'])
}
return branches
# Temporary method while we learn the replay format.
def manual_parse(self, results, replay_file):
server_regexp = re.compile(self.SERVER_REGEX)
replay_file.seek(0)
search = server_regexp.search(replay_file.read())
if search:
results['header']['ServerName'] = search.group()
return results
##################
# Helper functions
##################
def _debug_bits(self, replay_file, labels=None):
byte = replay_file.read(1)
output = ()
for index in xrange(8):
i, j = divmod(index, 8)
if ord(byte[i]) & (1 << j):
value = '1'
else:
value = '0'
formatted = value.rjust(index+1, '.').ljust(8, '.')
output = output + (int(value),)
if labels and len(labels) == 8:
print('{} = {}: {}'.format(
formatted,
labels[index],
'Set' if formatted == '1' else 'Not set',
))
else:
print(value.rjust(index+1, '.').ljust(8, '.'))
return output
def _read_bit(self, string, index):
i, j = divmod(index, 8)
if ord(string[i]) & (1 << j):
return 1
else:
return 0
def _pretty_byte_string(self, bytes_read):
return ' '.join("{:02x}".format(ord(x)) for x in bytes_read)
def _read_integer(self, replay_file, length=4):
number_format = {
1: '<b',
2: '<h',
4: '<i',
8: '<q',
}[length]
bytes_read = replay_file.read(length)
value = struct.unpack(number_format, bytes_read)[0]
return value
def _read_float(self, replay_file, length):
number_format = {
4: '<f',
8: '<d'
}[length]
bytes_read = replay_file.read(length)
value = struct.unpack(number_format, bytes_read)[0]
return value
def _read_unknown(self, replay_file, num_bytes):
bytes_read = replay_file.read(num_bytes)
return bytes_read
def _read_string(self, replay_file, length=None):
if not length:
length = self._read_integer(replay_file)
bytes_read = replay_file.read(length)[0:-1]
return bytes_read
def _sniff_bytes(self, replay_file, size):
b = self._read_unknown(replay_file, size)
print("**** BYTES ****")
print("Bytes: {}".format(self._pretty_byte_string(b)))
print('Size:', size)
if size == 2:
print("Short: Signed: {} Unsigned: {}".format(struct.unpack('<h', b), struct.unpack('<H', b)))
else:
if size == 4:
print("Integer: Signed: {}, Unsigned: {}".format(struct.unpack('<i', b), struct.unpack('<I', b)))
print("Float: {}".format(struct.unpack('<f', b)))
print("String: {}".format(b))
if __name__ == '__main__': # pragma: no cover
filename = sys.argv[1]
if not filename.endswith('.replay'):
sys.exit('Filename {} does not appear to be a valid replay file'.format(filename))
with open(filename, 'rb') as replay_file:
try:
results = ReplayParser(debug=False).parse(replay_file)
# pprint.pprint(results)
except IOError as e:
print(e)
except struct.error as e:
print(e)
except Exception as e:
print(e)
| gpl-2.0 |
troyleak/youtube-dl | youtube_dl/extractor/hypem.py | 128 | 2101 | from __future__ import unicode_literals
import json
import time
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class HypemIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?hypem\.com/track/(?P<id>[^/]+)/'
_TEST = {
'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
'info_dict': {
'id': '1v6ga',
'ext': 'mp3',
'title': 'Tame',
'uploader': 'BODYWORK',
}
}
def _real_extract(self, url):
track_id = self._match_id(url)
data = {'ax': 1, 'ts': time.time()}
data_encoded = compat_urllib_parse.urlencode(data)
complete_url = url + "?" + data_encoded
request = compat_urllib_request.Request(complete_url)
response, urlh = self._download_webpage_handle(
request, track_id, 'Downloading webpage with the url')
cookie = urlh.headers.get('Set-Cookie', '')
html_tracks = self._html_search_regex(
r'(?ms)<script type="application/json" id="displayList-data">\s*(.*?)\s*</script>',
response, 'tracks')
try:
track_list = json.loads(html_tracks)
track = track_list['tracks'][0]
except ValueError:
raise ExtractorError('Hypemachine contained invalid JSON.')
key = track['key']
track_id = track['id']
artist = track['artist']
title = track['song']
serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key)
request = compat_urllib_request.Request(
serve_url, '', {'Content-Type': 'application/json'})
request.add_header('cookie', cookie)
song_data = self._download_json(request, track_id, 'Downloading metadata')
final_url = song_data["url"]
return {
'id': track_id,
'url': final_url,
'ext': 'mp3',
'title': title,
'uploader': artist,
}
| unlicense |
pli3/e2-openwbif | plugin/controllers/views/web/powerstate.py | 1 | 5042 | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.286449
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/powerstate.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class powerstate(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(powerstate, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_30375654 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2powerstate>
\t<e2instandby>
''')
if VFFSL(SL,"instandby",True) : # generated from line 5, col 3
_v = "true"
if _v is not None: write(_filter(_v))
else:
_v = "false"
if _v is not None: write(_filter(_v))
write(u'''\t</e2instandby>
</e2powerstate>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_30375654
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_powerstate= 'respond'
## END CLASS DEFINITION
if not hasattr(powerstate, '_initCheetahAttributes'):
templateAPIClass = getattr(powerstate, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(powerstate)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=powerstate()).run()
| gpl-2.0 |
tudyzhb/yichui | django/http/multipartparser.py | 20 | 22272 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_unicode
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type)
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(MultiValueDict(), encoding=self._encoding), MultiValueDict()
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_unicode(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_unicode(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_unicode(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
content_type_extra = meta_data.get('content-type', (0, {}))[1]
if content_type_extra is None:
content_type_extra = {}
try:
charset = content_type_extra.get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra.copy())
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
try:
chunk = str(chunk).decode('base64')
except Exception, e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile, e:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload, e:
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_unicode(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(object):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = ''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield ''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = self.next()
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = ''.join(parts())
return out
def next(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = ''
else:
output = self._producer.next()
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = ''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(object):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(object):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def next(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(object):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to .next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def next(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = ''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
if data[max(0,end-1)] == '\n':
end -= 1
if data[max(0,end-1)] == '\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find('\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split('\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = '--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value. """
plist = _parse_header_params(';' + line)
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| bsd-3-clause |
huangbin0709/easyLinux | boot/u-boot-2015.01/tools/genboardscfg.py | 21 | 14502 | #!/usr/bin/env python2
#
# Author: Masahiro Yamada <yamada.m@jp.panasonic.com>
#
# SPDX-License-Identifier: GPL-2.0+
#
"""
Converter from Kconfig and MAINTAINERS to a board database.
Run 'tools/genboardscfg.py' to create a board database.
Run 'tools/genboardscfg.py -h' for available options.
Python 2.6 or later, but not Python 3.x is necessary to run this script.
"""
import errno
import fnmatch
import glob
import multiprocessing
import optparse
import os
import subprocess
import sys
import tempfile
import time
sys.path.append(os.path.join(os.path.dirname(__file__), 'buildman'))
import kconfiglib
### constant variables ###
OUTPUT_FILE = 'boards.cfg'
CONFIG_DIR = 'configs'
SLEEP_TIME = 0.03
COMMENT_BLOCK = '''#
# List of boards
# Automatically generated by %s: don't edit
#
# Status, Arch, CPU, SoC, Vendor, Board, Target, Options, Maintainers
''' % __file__
### helper functions ###
def try_remove(f):
"""Remove a file ignoring 'No such file or directory' error."""
try:
os.remove(f)
except OSError as exception:
# Ignore 'No such file or directory' error
if exception.errno != errno.ENOENT:
raise
def check_top_directory():
"""Exit if we are not at the top of source directory."""
for f in ('README', 'Licenses'):
if not os.path.exists(f):
sys.exit('Please run at the top of source directory.')
def output_is_new(output):
"""Check if the output file is up to date.
Returns:
True if the given output file exists and is newer than any of
*_defconfig, MAINTAINERS and Kconfig*. False otherwise.
"""
try:
ctime = os.path.getctime(output)
except OSError as exception:
if exception.errno == errno.ENOENT:
# return False on 'No such file or directory' error
return False
else:
raise
for (dirpath, dirnames, filenames) in os.walk(CONFIG_DIR):
for filename in fnmatch.filter(filenames, '*_defconfig'):
if fnmatch.fnmatch(filename, '.*'):
continue
filepath = os.path.join(dirpath, filename)
if ctime < os.path.getctime(filepath):
return False
for (dirpath, dirnames, filenames) in os.walk('.'):
for filename in filenames:
if (fnmatch.fnmatch(filename, '*~') or
not fnmatch.fnmatch(filename, 'Kconfig*') and
not filename == 'MAINTAINERS'):
continue
filepath = os.path.join(dirpath, filename)
if ctime < os.path.getctime(filepath):
return False
# Detect a board that has been removed since the current board database
# was generated
with open(output) as f:
for line in f:
if line[0] == '#' or line == '\n':
continue
defconfig = line.split()[6] + '_defconfig'
if not os.path.exists(os.path.join(CONFIG_DIR, defconfig)):
return False
return True
### classes ###
class KconfigScanner:
"""Kconfig scanner."""
### constant variable only used in this class ###
_SYMBOL_TABLE = {
'arch' : 'SYS_ARCH',
'cpu' : 'SYS_CPU',
'soc' : 'SYS_SOC',
'vendor' : 'SYS_VENDOR',
'board' : 'SYS_BOARD',
'config' : 'SYS_CONFIG_NAME',
'options' : 'SYS_EXTRA_OPTIONS'
}
def __init__(self):
"""Scan all the Kconfig files and create a Config object."""
# Define environment variables referenced from Kconfig
os.environ['srctree'] = os.getcwd()
os.environ['UBOOTVERSION'] = 'dummy'
os.environ['KCONFIG_OBJDIR'] = ''
self._conf = kconfiglib.Config()
def __del__(self):
"""Delete a leftover temporary file before exit.
The scan() method of this class creates a temporay file and deletes
it on success. If scan() method throws an exception on the way,
the temporary file might be left over. In that case, it should be
deleted in this destructor.
"""
if hasattr(self, '_tmpfile') and self._tmpfile:
try_remove(self._tmpfile)
def scan(self, defconfig):
"""Load a defconfig file to obtain board parameters.
Arguments:
defconfig: path to the defconfig file to be processed
Returns:
A dictionary of board parameters. It has a form of:
{
'arch': <arch_name>,
'cpu': <cpu_name>,
'soc': <soc_name>,
'vendor': <vendor_name>,
'board': <board_name>,
'target': <target_name>,
'config': <config_header_name>,
'options': <extra_options>
}
"""
# strip special prefixes and save it in a temporary file
fd, self._tmpfile = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
for line in open(defconfig):
colon = line.find(':CONFIG_')
if colon == -1:
f.write(line)
else:
f.write(line[colon + 1:])
self._conf.load_config(self._tmpfile)
try_remove(self._tmpfile)
self._tmpfile = None
params = {}
# Get the value of CONFIG_SYS_ARCH, CONFIG_SYS_CPU, ... etc.
# Set '-' if the value is empty.
for key, symbol in self._SYMBOL_TABLE.items():
value = self._conf.get_symbol(symbol).get_value()
if value:
params[key] = value
else:
params[key] = '-'
defconfig = os.path.basename(defconfig)
params['target'], match, rear = defconfig.partition('_defconfig')
assert match and not rear, '%s : invalid defconfig' % defconfig
# fix-up for aarch64
if params['arch'] == 'arm' and params['cpu'] == 'armv8':
params['arch'] = 'aarch64'
# fix-up options field. It should have the form:
# <config name>[:comma separated config options]
if params['options'] != '-':
params['options'] = params['config'] + ':' + \
params['options'].replace(r'\"', '"')
elif params['config'] != params['target']:
params['options'] = params['config']
return params
def scan_defconfigs_for_multiprocess(queue, defconfigs):
"""Scan defconfig files and queue their board parameters
This function is intended to be passed to
multiprocessing.Process() constructor.
Arguments:
queue: An instance of multiprocessing.Queue().
The resulting board parameters are written into it.
defconfigs: A sequence of defconfig files to be scanned.
"""
kconf_scanner = KconfigScanner()
for defconfig in defconfigs:
queue.put(kconf_scanner.scan(defconfig))
def read_queues(queues, params_list):
"""Read the queues and append the data to the paramers list"""
for q in queues:
while not q.empty():
params_list.append(q.get())
def scan_defconfigs(jobs=1):
"""Collect board parameters for all defconfig files.
This function invokes multiple processes for faster processing.
Arguments:
jobs: The number of jobs to run simultaneously
"""
all_defconfigs = []
for (dirpath, dirnames, filenames) in os.walk(CONFIG_DIR):
for filename in fnmatch.filter(filenames, '*_defconfig'):
if fnmatch.fnmatch(filename, '.*'):
continue
all_defconfigs.append(os.path.join(dirpath, filename))
total_boards = len(all_defconfigs)
processes = []
queues = []
for i in range(jobs):
defconfigs = all_defconfigs[total_boards * i / jobs :
total_boards * (i + 1) / jobs]
q = multiprocessing.Queue(maxsize=-1)
p = multiprocessing.Process(target=scan_defconfigs_for_multiprocess,
args=(q, defconfigs))
p.start()
processes.append(p)
queues.append(q)
# The resulting data should be accumulated to this list
params_list = []
# Data in the queues should be retrieved preriodically.
# Otherwise, the queues would become full and subprocesses would get stuck.
while any([p.is_alive() for p in processes]):
read_queues(queues, params_list)
# sleep for a while until the queues are filled
time.sleep(SLEEP_TIME)
# Joining subprocesses just in case
# (All subprocesses should already have been finished)
for p in processes:
p.join()
# retrieve leftover data
read_queues(queues, params_list)
return params_list
class MaintainersDatabase:
"""The database of board status and maintainers."""
def __init__(self):
"""Create an empty database."""
self.database = {}
def get_status(self, target):
"""Return the status of the given board.
The board status is generally either 'Active' or 'Orphan'.
Display a warning message and return '-' if status information
is not found.
Returns:
'Active', 'Orphan' or '-'.
"""
if not target in self.database:
print >> sys.stderr, "WARNING: no status info for '%s'" % target
return '-'
tmp = self.database[target][0]
if tmp.startswith('Maintained'):
return 'Active'
elif tmp.startswith('Orphan'):
return 'Orphan'
else:
print >> sys.stderr, ("WARNING: %s: unknown status for '%s'" %
(tmp, target))
return '-'
def get_maintainers(self, target):
"""Return the maintainers of the given board.
Returns:
Maintainers of the board. If the board has two or more maintainers,
they are separated with colons.
"""
if not target in self.database:
print >> sys.stderr, "WARNING: no maintainers for '%s'" % target
return ''
return ':'.join(self.database[target][1])
def parse_file(self, file):
"""Parse a MAINTAINERS file.
Parse a MAINTAINERS file and accumulates board status and
maintainers information.
Arguments:
file: MAINTAINERS file to be parsed
"""
targets = []
maintainers = []
status = '-'
for line in open(file):
# Check also commented maintainers
if line[:3] == '#M:':
line = line[1:]
tag, rest = line[:2], line[2:].strip()
if tag == 'M:':
maintainers.append(rest)
elif tag == 'F:':
# expand wildcard and filter by 'configs/*_defconfig'
for f in glob.glob(rest):
front, match, rear = f.partition('configs/')
if not front and match:
front, match, rear = rear.rpartition('_defconfig')
if match and not rear:
targets.append(front)
elif tag == 'S:':
status = rest
elif line == '\n':
for target in targets:
self.database[target] = (status, maintainers)
targets = []
maintainers = []
status = '-'
if targets:
for target in targets:
self.database[target] = (status, maintainers)
def insert_maintainers_info(params_list):
"""Add Status and Maintainers information to the board parameters list.
Arguments:
params_list: A list of the board parameters
"""
database = MaintainersDatabase()
for (dirpath, dirnames, filenames) in os.walk('.'):
if 'MAINTAINERS' in filenames:
database.parse_file(os.path.join(dirpath, 'MAINTAINERS'))
for i, params in enumerate(params_list):
target = params['target']
params['status'] = database.get_status(target)
params['maintainers'] = database.get_maintainers(target)
params_list[i] = params
def format_and_output(params_list, output):
"""Write board parameters into a file.
Columnate the board parameters, sort lines alphabetically,
and then write them to a file.
Arguments:
params_list: The list of board parameters
output: The path to the output file
"""
FIELDS = ('status', 'arch', 'cpu', 'soc', 'vendor', 'board', 'target',
'options', 'maintainers')
# First, decide the width of each column
max_length = dict([ (f, 0) for f in FIELDS])
for params in params_list:
for f in FIELDS:
max_length[f] = max(max_length[f], len(params[f]))
output_lines = []
for params in params_list:
line = ''
for f in FIELDS:
# insert two spaces between fields like column -t would
line += ' ' + params[f].ljust(max_length[f])
output_lines.append(line.strip())
# ignore case when sorting
output_lines.sort(key=str.lower)
with open(output, 'w') as f:
f.write(COMMENT_BLOCK + '\n'.join(output_lines) + '\n')
def gen_boards_cfg(output, jobs=1, force=False):
"""Generate a board database file.
Arguments:
output: The name of the output file
jobs: The number of jobs to run simultaneously
force: Force to generate the output even if it is new
"""
check_top_directory()
if not force and output_is_new(output):
print "%s is up to date. Nothing to do." % output
sys.exit(0)
params_list = scan_defconfigs(jobs)
insert_maintainers_info(params_list)
format_and_output(params_list, output)
def main():
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError:
cpu_count = 1
parser = optparse.OptionParser()
# Add options here
parser.add_option('-f', '--force', action="store_true", default=False,
help='regenerate the output even if it is new')
parser.add_option('-j', '--jobs', type='int', default=cpu_count,
help='the number of jobs to run simultaneously')
parser.add_option('-o', '--output', default=OUTPUT_FILE,
help='output file [default=%s]' % OUTPUT_FILE)
(options, args) = parser.parse_args()
gen_boards_cfg(options.output, jobs=options.jobs, force=options.force)
if __name__ == '__main__':
main()
| gpl-3.0 |
Stranger6667/django-crispy-forms | crispy_forms/layout_slice.py | 32 | 6176 | # -*- coding: utf-8 -*-
from crispy_forms.compatibility import integer_types, string_types
from crispy_forms.exceptions import DynamicError
from crispy_forms.layout import Fieldset, MultiField
from crispy_forms.bootstrap import Container
class LayoutSlice(object):
# List of layout objects that need args passed first before fields
args_first = (Fieldset, MultiField, Container)
def __init__(self, layout, key):
self.layout = layout
if isinstance(key, integer_types):
self.slice = slice(key, key + 1, 1)
else:
self.slice = key
def wrapped_object(self, LayoutClass, fields, *args, **kwargs):
"""
Returns a layout object of type `LayoutClass` with `args` and `kwargs` that
wraps `fields` inside.
"""
if args:
if isinstance(fields, list):
fields = tuple(fields)
else:
fields = (fields,)
if LayoutClass in self.args_first:
arguments = args + fields
else:
arguments = fields + args
return LayoutClass(*arguments, **kwargs)
else:
if isinstance(fields, list):
return LayoutClass(*fields, **kwargs)
else:
return LayoutClass(fields, **kwargs)
def pre_map(self, function):
"""
Iterates over layout objects pointed in `self.slice` executing `function` on them.
It passes `function` penultimate layout object and the position where to find last one
"""
if isinstance(self.slice, slice):
for i in range(*self.slice.indices(len(self.layout.fields))):
function(self.layout, i)
elif isinstance(self.slice, list):
# A list of pointers Ex: [[[0, 0], 'div'], [[0, 2, 3], 'field_name']]
for pointer in self.slice:
position = pointer[0]
# If it's pointing first level
if len(position) == 1:
function(self.layout, position[-1])
else:
layout_object = self.layout.fields[position[0]]
for i in position[1:-1]:
layout_object = layout_object.fields[i]
try:
function(layout_object, position[-1])
except IndexError:
# We could avoid this exception, recalculating pointers.
# However this case is most of the time an undesired behavior
raise DynamicError("Trying to wrap a field within an already wrapped field, \
recheck your filter or layout")
def wrap(self, LayoutClass, *args, **kwargs):
"""
Wraps every layout object pointed in `self.slice` under a `LayoutClass` instance with
`args` and `kwargs` passed.
"""
def wrap_object(layout_object, j):
layout_object.fields[j] = self.wrapped_object(
LayoutClass, layout_object.fields[j], *args, **kwargs
)
self.pre_map(wrap_object)
def wrap_once(self, LayoutClass, *args, **kwargs):
"""
Wraps every layout object pointed in `self.slice` under a `LayoutClass` instance with
`args` and `kwargs` passed, unless layout object's parent is already a subclass of
`LayoutClass`.
"""
def wrap_object_once(layout_object, j):
if not isinstance(layout_object, LayoutClass):
layout_object.fields[j] = self.wrapped_object(
LayoutClass, layout_object.fields[j], *args, **kwargs
)
self.pre_map(wrap_object_once)
def wrap_together(self, LayoutClass, *args, **kwargs):
"""
Wraps all layout objects pointed in `self.slice` together under a `LayoutClass`
instance with `args` and `kwargs` passed.
"""
if isinstance(self.slice, slice):
# The start of the slice is replaced
start = self.slice.start if self.slice.start is not None else 0
self.layout.fields[start] = self.wrapped_object(
LayoutClass, self.layout.fields[self.slice], *args, **kwargs
)
# The rest of places of the slice are removed, as they are included in the previous
for i in reversed(range(*self.slice.indices(len(self.layout.fields)))):
if i != start:
del self.layout.fields[i]
elif isinstance(self.slice, list):
raise DynamicError("wrap_together doesn't work with filter, only with [] operator")
def map(self, function):
"""
Iterates over layout objects pointed in `self.slice` executing `function` on them
It passes `function` last layout object
"""
if isinstance(self.slice, slice):
for i in range(*self.slice.indices(len(self.layout.fields))):
function(self.layout.fields[i])
elif isinstance(self.slice, list):
# A list of pointers Ex: [[[0, 0], 'div'], [[0, 2, 3], 'field_name']]
for pointer in self.slice:
position = pointer[0]
layout_object = self.layout.fields[position[0]]
for i in position[1:]:
previous_layout_object = layout_object
layout_object = layout_object.fields[i]
# If update_attrs is applied to a string, we call to its wrapping layout object
if (
function.__name__ == 'update_attrs'
and isinstance(layout_object, string_types)
):
function(previous_layout_object)
else:
function(layout_object)
def update_attributes(self, **kwargs):
"""
Updates attributes of every layout object pointed in `self.slice` using kwargs
"""
def update_attrs(layout_object):
if hasattr(layout_object, 'attrs'):
layout_object.attrs.update(kwargs)
self.map(update_attrs)
| mit |
gartung/dxr | dxr/plugins/python/tests/test_overrides/test_overrides.py | 9 | 3459 | from textwrap import dedent
from dxr.plugins.python.tests import PythonSingleFileTestCase
from dxr.testing import DxrInstanceTestCase
class OverridesTests(PythonSingleFileTestCase):
source = dedent("""
class Grandparent(object):
def overridden_all(self): #grandparent
'''Overridden in all children.'''
def not_overridden(self): #grandparent
'''Not overridden in any children.'''
class Parent(Grandparent):
def overridden_all(self): #parent
'''Overridden in all children.'''
def overridden_child(self): #parent
'''Overridden in the child class.'''
class Child(Parent):
def overridden_all(self): #child
'''Overridden in all children.'''
def overridden_child(self): #child
'''Overridden in the child class.'''
""")
def test_overrides_direct(self):
"""Make sure that overrides: finds methods overridden in child
classes.
"""
self.found_line_eq('overrides:overridden_child',
'def <b>overridden_child</b>(self): #child')
def test_overrides_multiple(self):
"""Make sure that overrides: finds methods overridden in
multiple descendants.
"""
self.found_lines_eq('overrides:overridden_all', [
'def <b>overridden_all</b>(self): #parent',
'def <b>overridden_all</b>(self): #child'
])
def test_overrides_nothing(self):
"""Make sure that overrides: finds nothing for methods that are
not overridden.
"""
self.found_nothing('overrides:not_overridden')
def test_overrides_qualname(self):
"""Make sure that overrides: supports qualnames."""
self.found_line_eq('overrides:main.Parent.overridden_child',
'def <b>overridden_child</b>(self): #child')
def test_overridden_direct(self):
"""Make sure that overridden: finds methods overridden from
parent classes.
"""
self.found_line_eq('overridden:overridden_child',
'def <b>overridden_child</b>(self): #parent')
def test_overridden_multiple(self):
"""Make sure that overridden: finds methods overridden from
multiple parents.
"""
self.found_lines_eq('overridden:overridden_all', [
'def <b>overridden_all</b>(self): #grandparent',
'def <b>overridden_all</b>(self): #parent'
])
def test_overridden_nothing(self):
"""Make sure that overridden: finds nothing for methods that are
not overridden.
"""
self.found_nothing('overridden:not_overridden')
def test_overridden_qualname(self):
"""Make sure that overridden: supports qualnames."""
self.found_line_eq('overridden:main.Child.overridden_child',
'def <b>overridden_child</b>(self): #parent')
class ImportOverrideTests(DxrInstanceTestCase):
def test_overrides(self):
"""Make sure the overrides filter works across imports."""
self.found_line_eq('overrides:parent.Parent.overridden',
'def <b>overridden</b>(self):', 5)
def test_overridden(self):
"""Make sure the overridden filter works across imports."""
self.found_line_eq('overridden:child.Child.overridden',
'def <b>overridden</b>(self):', 2)
| mit |
cstipkovic/spidermonkey-research | python/pystache/pystache/loader.py | 48 | 4771 | # coding: utf-8
"""
This module provides a Loader class for locating and reading templates.
"""
import os
import sys
from pystache import common
from pystache import defaults
from pystache.locator import Locator
# We make a function so that the current defaults take effect.
# TODO: revisit whether this is necessary.
def _make_to_unicode():
def to_unicode(s, encoding=None):
"""
Raises a TypeError exception if the given string is already unicode.
"""
if encoding is None:
encoding = defaults.STRING_ENCODING
return unicode(s, encoding, defaults.DECODE_ERRORS)
return to_unicode
class Loader(object):
"""
Loads the template associated to a name or user-defined object.
All load_*() methods return the template as a unicode string.
"""
def __init__(self, file_encoding=None, extension=None, to_unicode=None,
search_dirs=None):
"""
Construct a template loader instance.
Arguments:
extension: the template file extension, without the leading dot.
Pass False for no extension (e.g. to use extensionless template
files). Defaults to the package default.
file_encoding: the name of the encoding to use when converting file
contents to unicode. Defaults to the package default.
search_dirs: the list of directories in which to search when loading
a template by name or file name. Defaults to the package default.
to_unicode: the function to use when converting strings of type
str to unicode. The function should have the signature:
to_unicode(s, encoding=None)
It should accept a string of type str and an optional encoding
name and return a string of type unicode. Defaults to calling
Python's built-in function unicode() using the package string
encoding and decode errors defaults.
"""
if extension is None:
extension = defaults.TEMPLATE_EXTENSION
if file_encoding is None:
file_encoding = defaults.FILE_ENCODING
if search_dirs is None:
search_dirs = defaults.SEARCH_DIRS
if to_unicode is None:
to_unicode = _make_to_unicode()
self.extension = extension
self.file_encoding = file_encoding
# TODO: unit test setting this attribute.
self.search_dirs = search_dirs
self.to_unicode = to_unicode
def _make_locator(self):
return Locator(extension=self.extension)
def unicode(self, s, encoding=None):
"""
Convert a string to unicode using the given encoding, and return it.
This function uses the underlying to_unicode attribute.
Arguments:
s: a basestring instance to convert to unicode. Unlike Python's
built-in unicode() function, it is okay to pass unicode strings
to this function. (Passing a unicode string to Python's unicode()
with the encoding argument throws the error, "TypeError: decoding
Unicode is not supported.")
encoding: the encoding to pass to the to_unicode attribute.
Defaults to None.
"""
if isinstance(s, unicode):
return unicode(s)
return self.to_unicode(s, encoding)
def read(self, path, encoding=None):
"""
Read the template at the given path, and return it as a unicode string.
"""
b = common.read(path)
if encoding is None:
encoding = self.file_encoding
return self.unicode(b, encoding)
def load_file(self, file_name):
"""
Find and return the template with the given file name.
Arguments:
file_name: the file name of the template.
"""
locator = self._make_locator()
path = locator.find_file(file_name, self.search_dirs)
return self.read(path)
def load_name(self, name):
"""
Find and return the template with the given template name.
Arguments:
name: the name of the template.
"""
locator = self._make_locator()
path = locator.find_name(name, self.search_dirs)
return self.read(path)
# TODO: unit-test this method.
def load_object(self, obj):
"""
Find and return the template associated to the given object.
Arguments:
obj: an instance of a user-defined class.
search_dirs: the list of directories in which to search.
"""
locator = self._make_locator()
path = locator.find_object(obj, self.search_dirs)
return self.read(path)
| mpl-2.0 |
windskyer/nova | nova/scheduler/client/__init__.py | 42 | 2715 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_utils import importutils
from nova.scheduler import utils
class LazyLoader(object):
def __init__(self, klass, *args, **kwargs):
self.klass = klass
self.args = args
self.kwargs = kwargs
self.instance = None
def __getattr__(self, name):
return functools.partial(self.__run_method, name)
def __run_method(self, __name, *args, **kwargs):
if self.instance is None:
self.instance = self.klass(*self.args, **self.kwargs)
return getattr(self.instance, __name)(*args, **kwargs)
class SchedulerClient(object):
"""Client library for placing calls to the scheduler."""
def __init__(self):
self.queryclient = LazyLoader(importutils.import_class(
'nova.scheduler.client.query.SchedulerQueryClient'))
self.reportclient = LazyLoader(importutils.import_class(
'nova.scheduler.client.report.SchedulerReportClient'))
@utils.retry_select_destinations
def select_destinations(self, context, request_spec, filter_properties):
return self.queryclient.select_destinations(
context, request_spec, filter_properties)
def update_aggregates(self, context, aggregates):
self.queryclient.update_aggregates(context, aggregates)
def delete_aggregate(self, context, aggregate):
self.queryclient.delete_aggregate(context, aggregate)
def update_resource_stats(self, compute_node):
self.reportclient.update_resource_stats(compute_node)
def update_instance_info(self, context, host_name, instance_info):
self.queryclient.update_instance_info(context, host_name,
instance_info)
def delete_instance_info(self, context, host_name, instance_uuid):
self.queryclient.delete_instance_info(context, host_name,
instance_uuid)
def sync_instance_info(self, context, host_name, instance_uuids):
self.queryclient.sync_instance_info(context, host_name, instance_uuids)
| gpl-2.0 |
cginternals/cppfs | source/tests/googletest/googletest/test/googletest-output-test.py | 71 | 12617 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing and Mocking Framework.
To update the golden file:
googletest_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built googletest-output-test_ file.
googletest_output_test.py --gengolden
googletest_output_test.py
"""
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
GOLDEN_NAME = 'googletest-output-test-golden-lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('googletest-output-test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\]((googletest-output-test_|gtest).cc)(\:\d+|\(\d+\))\: ',
r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of googletest-output-test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
SUPPORTS_STACK_TRACES)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read().decode())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_googletest-output-test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_googletest-output-test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if NO_STACKTRACE_SUPPORT_FLAG in sys.argv:
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
if GENGOLDEN_FLAG in sys.argv:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests,
typed tests, stack traces, and multiple threads).
Please build this test and generate the golden file using Blaze on Linux.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| mit |
arocchi/Klampt | Python/klampt/vectorfield.py | 3 | 2694 | class VectorFieldFunction:
"""A callback class used with the rootfind module to define a vector
field f(x)=0 to be solved for during Newton-Raphson root finding.
The output is dimension m and the input is dimension n.
At the minimum, your subclass should fill out the m, n attributes, and
override the eval(), and jacobian() functions. The jacobian_numeric
function is provided for you in case you want to use differencing
to approximate the jacobian.
"""
def __init__(self):
self.n = 0
self.m = 0
def eval(self, x):
pass
def eval_i(self, x, i):
pass
def jacobian(self, x):
pass
def jacobian_ij(self, x, i, j):
pass
def num_vars(self):
return self.n
def num_fns(self):
return self.m
def jacobian_numeric(self,x,delta):
"""Helper method: returns the centered-difference jacobian
approximation with stepsize delta."""
xtemp = x[:]
J = []
for i,xi in enumerate(x):
xtemp[i] = xi - delta
e1 = self.eval(xtemp)
xtemp[i] = xi + delta
e2 = self.eval(xtemp)
xtemp[i] = xi
J.append([(ei2-ei1)/(2.0*delta) for (ei1,ei2) in zip(e1,e2)])
return J
class CompositeVectorFieldFunction(VectorFieldFunction):
"""A helper VectorFieldFunction that aggregates multiple
VectorFieldFunctions into a stacked constraint
0 = f1(x)
0 = f2(x)
...
0 = fn(x)
"""
def __init__(self, fns):
if not fns:
raise RuntimeError("Must have at least one function for composite")
self.fns = fns
self.n = self.fns[0].num_vars()
for f in self.fns:
if f.num_vars() != self.n:
raise RuntimeError("Functions must take the same vector")
self.m = sum([f.num_fns() for f in self.fns])
def eval(self, x):
res = []
for f in self.fns:
res += f.eval(x, i)
return res
def eval_i(self, x, i):
for f in self.fns:
if i < f.num_fns():
return f.eval_i(x, i)
i -= f.num_fns()
raise RuntimeError("eval_i: i must be between 0 and %d" % self.num_fns())
def jacobian(self, x):
res = []
for f in self.fns:
res += f.jacobian(x)
return res
def jacobian_ij(self, x, i, j):
for f in self.fns:
if i < f.num_fns():
return f.jacobian_ij(x,i,j)
i -= f.num_fns()
raise RuntimeError("jacobian_ij: i must be between 0 and %d" % self.num_fns())
| bsd-3-clause |
acogdev/ansible | contrib/inventory/landscape.py | 405 | 3467 | #!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Canonical's
# Landscape (http://www.ubuntu.com/management/landscape-features).
#
# Requires the `landscape_api` Python module
# See:
# - https://landscape.canonical.com/static/doc/api/api-client-package.html
# - https://landscape.canonical.com/static/doc/api/python-api.html
#
# Environment variables
# ---------------------
# - `LANDSCAPE_API_URI`
# - `LANDSCAPE_API_KEY`
# - `LANDSCAPE_API_SECRET`
# - `LANDSCAPE_API_SSL_CA_FILE` (optional)
import argparse
import collections
import os
import sys
from landscape_api.base import API, HTTPError
try:
import json
except ImportError:
import simplejson as json
_key = 'landscape'
class EnvironmentConfig(object):
uri = os.getenv('LANDSCAPE_API_URI')
access_key = os.getenv('LANDSCAPE_API_KEY')
secret_key = os.getenv('LANDSCAPE_API_SECRET')
ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE')
def _landscape_client():
env = EnvironmentConfig()
return API(
uri=env.uri,
access_key=env.access_key,
secret_key=env.secret_key,
ssl_ca_file=env.ssl_ca_file)
def get_landscape_members_data():
return _landscape_client().get_computers()
def get_nodes(data):
return [node['hostname'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for value in node['tags']:
groups[value].append(node['hostname'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['hostname']] = {'tags': node['tags']}
return meta
def print_list():
data = get_landscape_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_landscape_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from landscape cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from landscape cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 |
tiangolo/ansible | test/units/parsing/test_unquote.py | 152 | 2073 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from nose import tools
from ansible.compat.tests import unittest
from ansible.parsing.splitter import unquote
# Tests using nose's test generators cannot use unittest base class.
# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
class TestUnquote:
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
def check_unquote(self, quoted, expected):
tools.eq_(unquote(quoted), expected)
def test_unquote(self):
for datapoint in self.UNQUOTE_DATA:
yield self.check_unquote, datapoint[0], datapoint[1]
| gpl-3.0 |
trueblue2704/AskMeAnything | lib/python2.7/site-packages/tests/test_call_feedback.py | 14 | 3476 | import unittest
from nose.tools import assert_equal
from mock import Mock, patch, ANY
from tests.tools import create_mock_json
from twilio.rest.resources import Call, Calls
AUTH = ('foo', 'bar')
class CallFeedbackTest(unittest.TestCase):
@patch('twilio.rest.resources.base.make_twilio_request')
def test_get_call_feedback(self, request):
resp = create_mock_json('tests/resources/call_feedback.json')
request.return_value = resp
mock = Mock()
mock.uri = '/base'
call = Call(mock, 'CA123')
call.load_subresources()
feedback = call.feedback.get()
assert_equal(5, feedback.quality_score, 5)
assert_equal(['imperfect-audio', 'post-dial-delay'], feedback.issues)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_create_call_feedback(self, request):
resp = create_mock_json('tests/resources/call_feedback.json')
resp.status_code = 201
request.return_value = resp
mock = Mock()
mock.uri = '/base'
mock.auth = AUTH
call = Call(mock, 'CA123')
call.load_subresources()
feedback = call.feedback.create(
quality_score=5,
issues=['imperfect-audio', 'post-dial-delay'],
)
exp_data = {
'QualityScore': 5,
'Issues': ['imperfect-audio', 'post-dial-delay'],
}
assert_equal(5, feedback.quality_score, 5)
assert_equal(['imperfect-audio', 'post-dial-delay'], feedback.issues)
request.assert_called_with(
"POST", "/base/CA123/Feedback",
data=exp_data, auth=AUTH,
timeout=ANY, use_json_extension=True,
)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_create_call_feedback_one_request(self, request):
resp = create_mock_json('tests/resources/call_feedback.json')
resp.status_code = 201
request.return_value = resp
base_uri = 'https://api.twilio.com/2010-04-01/Accounts/AC123'
account_sid = 'AC123'
auth = (account_sid, "token")
calls = Calls(base_uri, auth)
uri = "%s/Calls/CA123/Feedback" % base_uri
feedback = calls.feedback(
'CA123',
quality_score=5,
issue=['imperfect-audio', 'post-dial-delay']
)
exp_data = {
'QualityScore': 5,
'Issue': ['imperfect-audio', 'post-dial-delay'],
}
assert_equal(['imperfect-audio', 'post-dial-delay'], feedback.issues)
request.assert_called_with(
"POST", uri,
data=exp_data, auth=auth,
use_json_extension=True,
)
class CallFeedbackSummaryTest(unittest.TestCase):
@patch('twilio.rest.resources.base.make_twilio_request')
def test_get_call_feedback_summary(self, request):
resp = create_mock_json('tests/resources/call_feedback_summary.json')
request.return_value = resp
base_uri = 'https://api.twilio.com/2010-04-01/Accounts/AC123'
account_sid = 'AC123'
auth = (account_sid, "token")
calls = Calls(base_uri, auth)
uri = "%s/Calls/Summary" % base_uri
feedback = calls.summary.get()
assert_equal(10200, feedback.call_count)
assert_equal(729, feedback.call_feedback_count)
request.assert_called_with('GET', uri, params={}, auth=auth,
use_json_extension=True)
| mit |
tencrance/cool-config | ml_keras_learn/tutorials/theanoTUT/theano14_summary.py | 3 | 1148 | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 14 - summary
"""
==============================
Summary:
-----------------------------------------------
1. Understand the basic usage of Theano;
2. Built a regression neural networks;
3. Built a classification neural networks;
4. Understand the overfitting and the solutions for solving this problem;
5. Save your networks for future usage.
==============================
GPU computation:
-----------------------------------------------
Theano tutorial link: http://deeplearning.net/software/theano/tutorial/using_gpu.html
Requirement: NVIDIA cards and CUDA backend
==============================
Theano Convolutional Neural Networks:
----------------------------------------------
Theano tutorial link: http://deeplearning.net/tutorial/lenet.html
==============================
Theano Recurrent Neural Networks:
-----------------------------------------------
Theano tutorial link: http://deeplearning.net/tutorial/rnnslu.html
""" | mit |
SohKai/ChronoLogger | web/flask/lib/python2.7/site-packages/pip-1.5.4-py2.7.egg/pip/_vendor/requests/packages/charade/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| mit |
fuzzing/bifuz | kivy-android/common.py | 6 | 16802 | import kivy
import random
from kivy.app import App
from kivy.core.window import Window
import glob
from jnius import cast
from jnius import autoclass
import os
from commands import *
from utils import *
from kivy.lang import Builder
from kivy.metrics import metrics
from kivy.uix.floatlayout import FloatLayout
from unicodedata import category
Environment=autoclass("android.os.Environment")
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
Uri = autoclass('android.net.Uri')
ComponentName=autoclass('android.content.ComponentName')
ActivityInfo= autoclass("android.content.pm.ActivityInfo")
PackageInfo= autoclass("android.content.pm.PackageInfo")
PackageManager= autoclass("android.content.pm.PackageManager")
Cursor=autoclass("android.database.Cursor")
AsyncTask=autoclass("com/example/asynctask/MyAsyncTask")
Environment=autoclass("android.os.Environment");
TimeUnit=autoclass("java.util.concurrent.TimeUnit")
intents=[]
intents_package_names=[]
commands=[]
categories=[]
extra_keys=[]
extra_types=[]
activity_actions=[]
flags=[]
path_txt="txts/"
with open(path_txt + "categories.txt") as f:
categories = f.read().splitlines()
with open(path_txt + "extra_keys.txt") as f:
extra_keys = f.read().splitlines()
with open(path_txt + "extra_types.txt") as f:
extra_types = f.read().splitlines()
with open(path_txt + "flags.txt") as f:
flags = f.read().splitlines()
with open(path_txt + "activity_actions.txt") as f:
activity_actions = f.read().splitlines()
for i in range(len(flags)):
index_fl = flags[i].index(':')
if index_fl > 0:
flags[i] = flags[i][index_fl+1:]
# get_default_values("/data/local/tmp/txts/",categories,extra_keys,extra_types,flags)
def log_in_logcat(log):
log_command = "log -p f -t %s" % (str(log))
output = getoutput(log_command)
#parse all lines in a seed file and create intents/broadcast arrays
def parse_seed_line_command(command):
adb_params = command.split(" ");
package=adb_params[adb_params.index("-n")+1]
packageName=package.split("/")
intents_package_names.append(packageName[1])
if (command.find('broadcast')>-1):
intent_type=0
else:
intent_type=1
action=adb_params[adb_params.index("-a")+1]
s=adb_params[adb_params.index("-f")+2]
flag=int(s, 0)
category=adb_params[adb_params.index("-c")+1]
data=Uri.parse(adb_params[adb_params.index("-d")+1])
extra_type=adb_params[adb_params.index("-e")+1]
extra_string=adb_params[adb_params.index("-e")+2]
extra_value=adb_params[adb_params.index("-e")+3]
intent = Intent()
intent.setComponent(ComponentName(packageName[0],packageName[1]))
if (intent_type==0):
intents.append(intent)
else:
intent.setAction(action)
intent.addCategory(category)
intent.setData(data)
intent.setFlags(flag)
if extra_type=="boolean":
if extra_value=="true":
intent.putExtra(extra_string, "true")
else:
intent.putExtra(extra_string, "false")
elif extra_type=="string":
intent.putExtra(extra_string, extra_value)
elif extra_type=="int":
intent.putExtra(extra_string, int(extra_value))
intents.append(intent)
return intent_type
config = PythonActivity.mActivity.getResources().getConfiguration()
Builder.load_file("AppLayout.kv")
class Bifuz(FloatLayout):
# Window.clearcolor = (0,1,1,0)
def fontscale(self):
dpi = metrics.dpi_rounded
if dpi < 140:
return 1
elif dpi < 200:
return 2
elif dpi < 280:
return 2.5
return 3
#
# # Generate Broadcast Intent calls
#
#get all packages and activities for Broadcast
def get_all_Broadcast_packages(self):
arrayList=[]
mypackList=[]
pm = PythonActivity.mActivity.getPackageManager()
mypackList=pm.getInstalledPackages(PackageManager.GET_RECEIVERS).toArray()
for pack in mypackList:
arrayList.append(pack.packageName)
self.s41.values = arrayList
self.s41.bind(text=self.generate_intents_Receivers)
def generate_intents_Receivers(self,spinner,text):
output = getoutput("logcat -c")
PackListReceiver=[]
receivers=[]
pm = PythonActivity.mActivity.getPackageManager()
PackListReceiver=pm.getPackageInfo(text, PackageManager.GET_RECEIVERS).receivers
if (PackListReceiver is not None):
for pack in PackListReceiver:
packageName=text
packageClass=pack.name
command=' am broadcast -n ' + packageName +'/' + packageClass
PythonActivity.toastError('BIFUZ_BROADCAST ' + command)
task=AsyncTask(PythonActivity.mActivity)
task.execute("broadcast",packageName,packageClass,command)
receivers.append(packageClass)
path="/sdcard/test/"
for filename in glob.glob(os.path.join(path, '*.sh')):
file_path=filename.split("/sdcard/test/")
if file_path[1] in self.s1.values:
ok=False
else:
self.s1.values.append(file_path[1])
else:
PythonActivity.toastError("No receivers found for this app")
#
# # Generate Fuzzed Intent calls
#
#get all Broadcast packages
def get_all_Activities_packages(self):
arrayList=[]
mypackList=[]
pm = PythonActivity.mActivity.getPackageManager()
mypackList=pm.getInstalledPackages(PackageManager.GET_ACTIVITIES).toArray()
for pack in mypackList:
arrayList.append(pack.packageName)
self.s31.values = arrayList
self.s31.bind(text=self.generate_intents_Activities)
#get all packages and activities
def generate_intents_Activities(self,spinner, text):
output = getoutput("logcat -c")
PackListActivities=[]
activities=[]
pm = PythonActivity.mActivity.getPackageManager()
PackListActivities=pm.getPackageInfo(text, PackageManager.GET_ACTIVITIES).activities
if (PackListActivities is not None):
for pack in PackListActivities:
packageName=text
packageClass=pack.name
activities.append(packageClass)
cat=random.choice(categories);
flag=random.choice(flags);
e_key=random.choice(extra_keys);
e_type=random.choice(extra_types);
act=random.choice(activity_actions);
data=generate_random_uri()
if e_type == "boolean":
ev = str(random.choice([True,False]))
elif e_type == "string":
ev = string_generator(random.randint(10,100))
else:
ev = str(random.randint(10,100))
command=' am start -a ' + act + ' -c ' + cat + ' -n ' + packageName +'/' + packageClass + ' -f ' + flag + ' -d ' + data +' -e ' + e_type +' '+ e_key + ' ' + ev
PythonActivity.toastError('BIFUZ_INTENT ' + command)
task=AsyncTask(PythonActivity.mActivity)
task.execute("intent",packageName,packageClass,act,cat,flag,data,e_type,e_key,ev,command)
path="/sdcard/test/"
for filename in glob.glob(os.path.join(path, '*.sh')):
file_path=filename.split("/sdcard/test/")
if (file_path[1] in self.s1.values):
ok=False
else:
self.s1.values.append(file_path[1])
else:
PythonActivity.toastError("No activities found for this app")
#
# # Run existing generated intents from file
#
#get all seed files
def parse_directory(self):
seed_files = []
path="/sdcard/test/"
for filename in glob.glob(os.path.join(path, '*.sh')):
file_path=filename.split("/sdcard/test/")
if (file_path[1] in self.s1.values):
ok=False
else:
self.s1.values.append(file_path[1])
self.s1.bind(text=self.show_selected_value)
def show_selected_value(self,spinner, text):
file_path='/sdcard/test/'+text
del intents[:]
del intents_package_names[:]
del commands[:]
if file_path:
with open(file_path) as f:
content = f.readlines()
for command in content:
if (command.find('am')>-1):
c=command.split('am',1)
c[1]="am "+ c[1]
commands.append(c[1])
self.intent_type=parse_seed_line_command(command)
self.s2.values=intents_package_names
self.s2.values.insert(0, "Test All")
self.s2.bind(text=self.run_intents)
def run_intents(self,spinner, text):
# if test all
output = getoutput("logcat -c")
if (text.find('Test All')>-1):
if (self.intent_type==0):
for c in commands:
adb_params = c.split(" ");
package=adb_params[adb_params.index("-n")+1]
packageName=package.split("/")
command=' am broadcast -n ' + package
task=AsyncTask(PythonActivity.mActivity)
task.execute("broadcast",packageName[0],packageName[1],command)
PythonActivity.toastError(command)
else:
for c in commands:
adb_params = c.split(" ");
package=adb_params[adb_params.index("-n")+1]
packageName=package.split("/")
action=adb_params[adb_params.index("-a")+1]
flag=adb_params[adb_params.index("-f")+2]
category=adb_params[adb_params.index("-c")+1]
data=adb_params[adb_params.index("-d")+1]
extra_type=adb_params[adb_params.index("-e")+1]
extra_string=adb_params[adb_params.index("-e")+2]
extra_value=adb_params[adb_params.index("-e")+3]
task=AsyncTask(PythonActivity.mActivity)
command=' am start -a ' + action + ' -c ' + category + ' -n ' + package + ' -f ' + flag + ' -d ' + data +' -e ' + extra_type +' '+ extra_string + ' ' + extra_value
task.execute("intent",packageName[0],packageName[1],action,category,flag,data,extra_type,extra_string,extra_value,command)
PythonActivity.toastError(command)
# for one selected line
else:
index=intents_package_names.index(text)
if (self.intent_type==0):
adb_params = commands[index].split(" ");
package=adb_params[adb_params.index("-n")+1]
packageName=package.split("/")
command=' am broadcast -n ' + package
task=AsyncTask(PythonActivity.mActivity)
task.execute("broadcast",packageName[0],packageName[1],command)
PythonActivity.toastError(command)
else:
adb_params = commands[index].split(" ");
package=adb_params[adb_params.index("-n")+1]
packageName=package.split("/")
action=adb_params[adb_params.index("-a")+1]
flag=adb_params[adb_params.index("-f")+2]
category=adb_params[adb_params.index("-c")+1]
data=adb_params[adb_params.index("-d")+1]
extra_type=adb_params[adb_params.index("-e")+1]
extra_string=adb_params[adb_params.index("-e")+2]
extra_value=adb_params[adb_params.index("-e")+3]
task=AsyncTask(PythonActivity.mActivity)
command=' am start -a ' + action + ' -c ' + category + ' -n ' + package + ' -f ' + flag + ' -d ' + data +' -e ' + extra_type +' '+ extra_string + ' ' + extra_value
task.execute("intent",packageName[0],packageName[1],action,category,flag,data,extra_type,extra_string,extra_value,command)
PythonActivity.toastError(command)
#
# # SQL injections for specific apk
#get all packages and activities for Broadcast
def get_all_Providers_packages(self):
arrayList=[]
mypackList=[]
pm = PythonActivity.mActivity.getPackageManager()
mypackList=pm.getInstalledPackages(PackageManager.GET_PROVIDERS).toArray()
for pack in mypackList:
string=pack.packageName
if (string.find('sieve')>-1):
# PackListProviders=pm.getPackageInfo(string, PackageManager.GET_PROVIDERS).providers
# if (PackListProviders is not None):
arrayList.append(pack.packageName)
self.s51.values = arrayList
self.s51.bind(text=self.generate_contents_providers)
def generate_contents_providers(self,spinner,text):
PackListProviders=[]
providers=[]
pm = PythonActivity.mActivity.getPackageManager()
PackListProviders=pm.getPackageInfo(text, PackageManager.GET_PROVIDERS).providers
if (PackListProviders is not None):
for pack in PackListProviders:
string=pack.authority
path_permissions=pack.pathPermissions
if (path_permissions is not None):
for p in path_permissions:
perm=str(p.getWritePermission ())
if perm is not None:
PythonActivity.toastError(perm)
path_parts=perm.split("WRITE_")
path=path_parts[1]
uri_path=path[0].upper() + path[1:].lower()
am = PythonActivity.mActivity
s='content://com.mwr.example.sieve.DBContentProvider/Passwords/'
# s='content://'+string+'/'+uri_path +'/'
PythonActivity.toastError(s)
providerInfo=am.getContentResolver
cursor = am.getContentResolver().query(Uri.parse(s),None,None,None,None)
array=cursor.getColumnNames()
columns=cursor.getColumnCount()
rows=cursor.getCount()
if (cursor.moveToFirst()==True):
r=1
row_values=""
while (r<=rows):
c=1
while (c<columns):
type=cursor.getType(c)
if type == 1:
value=cursor.getInt(c)
elif type == 2:
value=cursor.getFloat(c)
elif type == 3:
value=cursor.getString(c)
elif type == 4:
value=cursor.getBlob(c)
elif type==0: value=" "
row_values=row_values + "\n" + str(value)
c+=1
r+=1
cursor.moveToNext()
row_values=row_values+ "\n"
PythonActivity.toastError(uri_path + ":" + row_values)
providers.append(string)
else: PythonActivity.toastError("no uri_path")
# | mit |
solidgoldbomb/letsencrypt | letsencrypt/tests/reverter_test.py | 3 | 18201 | """Test letsencrypt.reverter."""
import csv
import itertools
import logging
import os
import shutil
import tempfile
import unittest
import mock
from letsencrypt import errors
class ReverterCheckpointLocalTest(unittest.TestCase):
# pylint: disable=too-many-instance-attributes, too-many-public-methods
"""Test the Reverter Class."""
def setUp(self):
from letsencrypt.reverter import Reverter
# Disable spurious errors... we are trying to test for them
logging.disable(logging.CRITICAL)
self.config = setup_work_direc()
self.reverter = Reverter(self.config)
tup = setup_test_files()
self.config1, self.config2, self.dir1, self.dir2, self.sets = tup
def tearDown(self):
shutil.rmtree(self.config.work_dir)
shutil.rmtree(self.dir1)
shutil.rmtree(self.dir2)
logging.disable(logging.NOTSET)
def test_basic_add_to_temp_checkpoint(self):
# These shouldn't conflict even though they are both named config.txt
self.reverter.add_to_temp_checkpoint(self.sets[0], "save1")
self.reverter.add_to_temp_checkpoint(self.sets[1], "save2")
self.assertTrue(os.path.isdir(self.config.temp_checkpoint_dir))
self.assertEqual(get_save_notes(
self.config.temp_checkpoint_dir), "save1save2")
self.assertFalse(os.path.isfile(
os.path.join(self.config.temp_checkpoint_dir, "NEW_FILES")))
self.assertEqual(
get_filepaths(self.config.temp_checkpoint_dir),
"{0}\n{1}\n".format(self.config1, self.config2))
def test_add_to_checkpoint_copy_failure(self):
with mock.patch("letsencrypt.reverter.shutil.copy2") as mock_copy2:
mock_copy2.side_effect = IOError("bad copy")
self.assertRaises(
errors.ReverterError, self.reverter.add_to_checkpoint,
self.sets[0], "save1")
def test_checkpoint_conflict(self):
"""Make sure that checkpoint errors are thrown appropriately."""
config3 = os.path.join(self.dir1, "config3.txt")
self.reverter.register_file_creation(True, config3)
update_file(config3, "This is a new file!")
self.reverter.add_to_checkpoint(self.sets[2], "save1")
# This shouldn't throw an error
self.reverter.add_to_temp_checkpoint(self.sets[0], "save2")
# Raise error
self.assertRaises(errors.ReverterError, self.reverter.add_to_checkpoint,
self.sets[2], "save3")
# Should not cause an error
self.reverter.add_to_checkpoint(self.sets[1], "save4")
# Check to make sure new files are also checked...
self.assertRaises(errors.ReverterError, self.reverter.add_to_checkpoint,
set([config3]), "invalid save")
def test_multiple_saves_and_temp_revert(self):
self.reverter.add_to_temp_checkpoint(self.sets[0], "save1")
update_file(self.config1, "updated-directive")
self.reverter.add_to_temp_checkpoint(self.sets[0], "save2-updated dir")
update_file(self.config1, "new directive change that we won't keep")
self.reverter.revert_temporary_config()
self.assertEqual(read_in(self.config1), "directive-dir1")
def test_multiple_registration_fail_and_revert(self):
# pylint: disable=invalid-name
config3 = os.path.join(self.dir1, "config3.txt")
update_file(config3, "Config3")
config4 = os.path.join(self.dir2, "config4.txt")
update_file(config4, "Config4")
# Test multiple registrations and two registrations at once
self.reverter.register_file_creation(True, self.config1)
self.reverter.register_file_creation(True, self.config2)
self.reverter.register_file_creation(True, config3, config4)
# Simulate Let's Encrypt crash... recovery routine is run
self.reverter.recovery_routine()
self.assertFalse(os.path.isfile(self.config1))
self.assertFalse(os.path.isfile(self.config2))
self.assertFalse(os.path.isfile(config3))
self.assertFalse(os.path.isfile(config4))
def test_multiple_registration_same_file(self):
self.reverter.register_file_creation(True, self.config1)
self.reverter.register_file_creation(True, self.config1)
self.reverter.register_file_creation(True, self.config1)
self.reverter.register_file_creation(True, self.config1)
files = get_new_files(self.config.temp_checkpoint_dir)
self.assertEqual(len(files), 1)
def test_register_file_creation_write_error(self):
m_open = mock.mock_open()
with mock.patch("letsencrypt.reverter.open", m_open, create=True):
m_open.side_effect = OSError("bad open")
self.assertRaises(
errors.ReverterError, self.reverter.register_file_creation,
True, self.config1)
def test_bad_registration(self):
# Made this mistake and want to make sure it doesn't happen again...
self.assertRaises(
errors.ReverterError, self.reverter.register_file_creation,
"filepath")
def test_register_undo_command(self):
coms = [
["a2dismod", "ssl"],
["a2dismod", "rewrite"],
["cleanslate"]
]
for com in coms:
self.reverter.register_undo_command(True, com)
act_coms = get_undo_commands(self.config.temp_checkpoint_dir)
for a_com, com in itertools.izip(act_coms, coms):
self.assertEqual(a_com, com)
def test_bad_register_undo_command(self):
m_open = mock.mock_open()
with mock.patch("letsencrypt.reverter.open", m_open, create=True):
m_open.side_effect = OSError("bad open")
self.assertRaises(
errors.ReverterError, self.reverter.register_undo_command,
True, ["command"])
@mock.patch("letsencrypt.le_util.run_script")
def test_run_undo_commands(self, mock_run):
mock_run.side_effect = ["", errors.SubprocessError]
coms = [
["invalid_command"],
["a2dismod", "ssl"],
]
for com in coms:
self.reverter.register_undo_command(True, com)
self.reverter.revert_temporary_config()
self.assertEqual(mock_run.call_count, 2)
def test_recovery_routine_in_progress_failure(self):
self.reverter.add_to_checkpoint(self.sets[0], "perm save")
# pylint: disable=protected-access
self.reverter._recover_checkpoint = mock.MagicMock(
side_effect=errors.ReverterError)
self.assertRaises(errors.ReverterError, self.reverter.recovery_routine)
def test_recover_checkpoint_revert_temp_failures(self):
# pylint: disable=invalid-name
mock_recover = mock.MagicMock(
side_effect=errors.ReverterError("e"))
# pylint: disable=protected-access
self.reverter._recover_checkpoint = mock_recover
self.reverter.add_to_temp_checkpoint(self.sets[0], "config1 save")
self.assertRaises(
errors.ReverterError, self.reverter.revert_temporary_config)
def test_recover_checkpoint_rollback_failure(self):
mock_recover = mock.MagicMock(
side_effect=errors.ReverterError("e"))
# pylint: disable=protected-access
self.reverter._recover_checkpoint = mock_recover
self.reverter.add_to_checkpoint(self.sets[0], "config1 save")
self.reverter.finalize_checkpoint("Title")
self.assertRaises(
errors.ReverterError, self.reverter.rollback_checkpoints, 1)
def test_recover_checkpoint_copy_failure(self):
self.reverter.add_to_temp_checkpoint(self.sets[0], "save1")
with mock.patch("letsencrypt.reverter.shutil.copy2") as mock_copy2:
mock_copy2.side_effect = OSError("bad copy")
self.assertRaises(
errors.ReverterError, self.reverter.revert_temporary_config)
def test_recover_checkpoint_rm_failure(self):
self.reverter.add_to_temp_checkpoint(self.sets[0], "temp save")
with mock.patch("letsencrypt.reverter.shutil.rmtree") as mock_rmtree:
mock_rmtree.side_effect = OSError("Cannot remove tree")
self.assertRaises(
errors.ReverterError, self.reverter.revert_temporary_config)
@mock.patch("letsencrypt.reverter.logger.warning")
def test_recover_checkpoint_missing_new_files(self, mock_warn):
self.reverter.register_file_creation(
True, os.path.join(self.dir1, "missing_file.txt"))
self.reverter.revert_temporary_config()
self.assertEqual(mock_warn.call_count, 1)
@mock.patch("letsencrypt.reverter.os.remove")
def test_recover_checkpoint_remove_failure(self, mock_remove):
self.reverter.register_file_creation(True, self.config1)
mock_remove.side_effect = OSError("Can't remove")
self.assertRaises(
errors.ReverterError, self.reverter.revert_temporary_config)
def test_recovery_routine_temp_and_perm(self):
# Register a new perm checkpoint file
config3 = os.path.join(self.dir1, "config3.txt")
self.reverter.register_file_creation(False, config3)
update_file(config3, "This is a new perm file!")
# Add changes to perm checkpoint
self.reverter.add_to_checkpoint(self.sets[0], "perm save1")
update_file(self.config1, "updated perm config1")
self.reverter.add_to_checkpoint(self.sets[1], "perm save2")
update_file(self.config2, "updated perm config2")
# Add changes to a temporary checkpoint
self.reverter.add_to_temp_checkpoint(self.sets[0], "temp save1")
update_file(self.config1, "second update now temp config1")
# Register a new temp checkpoint file
config4 = os.path.join(self.dir2, "config4.txt")
self.reverter.register_file_creation(True, config4)
update_file(config4, "New temporary file!")
# Now erase everything
self.reverter.recovery_routine()
# Now Run tests
# These were new files.. they should be removed
self.assertFalse(os.path.isfile(config3))
self.assertFalse(os.path.isfile(config4))
# Check to make sure everything got rolled back appropriately
self.assertEqual(read_in(self.config1), "directive-dir1")
self.assertEqual(read_in(self.config2), "directive-dir2")
class TestFullCheckpointsReverter(unittest.TestCase):
# pylint: disable=too-many-instance-attributes
"""Tests functions having to deal with full checkpoints."""
def setUp(self):
from letsencrypt.reverter import Reverter
# Disable spurious errors...
logging.disable(logging.CRITICAL)
self.config = setup_work_direc()
self.reverter = Reverter(self.config)
tup = setup_test_files()
self.config1, self.config2, self.dir1, self.dir2, self.sets = tup
def tearDown(self):
shutil.rmtree(self.config.work_dir)
shutil.rmtree(self.dir1)
shutil.rmtree(self.dir2)
logging.disable(logging.NOTSET)
def test_rollback_improper_inputs(self):
self.assertRaises(
errors.ReverterError, self.reverter.rollback_checkpoints, "-1")
self.assertRaises(
errors.ReverterError, self.reverter.rollback_checkpoints, -1000)
self.assertRaises(
errors.ReverterError, self.reverter.rollback_checkpoints, "one")
def test_rollback_finalize_checkpoint_valid_inputs(self):
# pylint: disable=invalid-name
config3 = self._setup_three_checkpoints()
# Check resulting backup directory
self.assertEqual(len(os.listdir(self.config.backup_dir)), 3)
# Check rollbacks
# First rollback
self.reverter.rollback_checkpoints(1)
self.assertEqual(read_in(self.config1), "update config1")
self.assertEqual(read_in(self.config2), "update config2")
# config3 was not included in checkpoint
self.assertEqual(read_in(config3), "Final form config3")
# Second rollback
self.reverter.rollback_checkpoints(1)
self.assertEqual(read_in(self.config1), "update config1")
self.assertEqual(read_in(self.config2), "directive-dir2")
self.assertFalse(os.path.isfile(config3))
# One dir left... check title
all_dirs = os.listdir(self.config.backup_dir)
self.assertEqual(len(all_dirs), 1)
self.assertTrue(
"First Checkpoint" in get_save_notes(
os.path.join(self.config.backup_dir, all_dirs[0])))
# Final rollback
self.reverter.rollback_checkpoints(1)
self.assertEqual(read_in(self.config1), "directive-dir1")
def test_finalize_checkpoint_no_in_progress(self):
# No need to warn for this... just make sure there are no errors.
self.reverter.finalize_checkpoint("No checkpoint...")
@mock.patch("letsencrypt.reverter.shutil.move")
def test_finalize_checkpoint_cannot_title(self, mock_move):
self.reverter.add_to_checkpoint(self.sets[0], "perm save")
mock_move.side_effect = OSError("cannot move")
self.assertRaises(
errors.ReverterError, self.reverter.finalize_checkpoint, "Title")
@mock.patch("letsencrypt.reverter.os.rename")
def test_finalize_checkpoint_no_rename_directory(self, mock_rename):
# pylint: disable=invalid-name
self.reverter.add_to_checkpoint(self.sets[0], "perm save")
mock_rename.side_effect = OSError
self.assertRaises(
errors.ReverterError, self.reverter.finalize_checkpoint, "Title")
@mock.patch("letsencrypt.reverter.logger")
def test_rollback_too_many(self, mock_logger):
self.reverter.rollback_checkpoints(1)
self.assertEqual(mock_logger.warning.call_count, 1)
def test_multi_rollback(self):
config3 = self._setup_three_checkpoints()
self.reverter.rollback_checkpoints(3)
self.assertEqual(read_in(self.config1), "directive-dir1")
self.assertEqual(read_in(self.config2), "directive-dir2")
self.assertFalse(os.path.isfile(config3))
@mock.patch("letsencrypt.reverter.zope.component.getUtility")
def test_view_config_changes(self, mock_output):
"""This is not strict as this is subject to change."""
self._setup_three_checkpoints()
# Make sure it doesn't throw any errors
self.reverter.view_config_changes()
# Make sure notification is output
self.assertEqual(mock_output().notification.call_count, 1)
@mock.patch("letsencrypt.reverter.logger")
def test_view_config_changes_no_backups(self, mock_logger):
self.reverter.view_config_changes()
self.assertTrue(mock_logger.info.call_count > 0)
def test_view_config_changes_bad_backups_dir(self):
# There shouldn't be any "in progess directories when this is called
# It must just be clean checkpoints
os.makedirs(os.path.join(self.config.backup_dir, "in_progress"))
self.assertRaises(
errors.ReverterError, self.reverter.view_config_changes)
def _setup_three_checkpoints(self):
"""Generate some finalized checkpoints."""
# Checkpoint1 - config1
self.reverter.add_to_checkpoint(self.sets[0], "first save")
self.reverter.finalize_checkpoint("First Checkpoint")
update_file(self.config1, "update config1")
# Checkpoint2 - new file config3, update config2
config3 = os.path.join(self.dir1, "config3.txt")
self.reverter.register_file_creation(False, config3)
update_file(config3, "directive-config3")
self.reverter.add_to_checkpoint(self.sets[1], "second save")
self.reverter.finalize_checkpoint("Second Checkpoint")
update_file(self.config2, "update config2")
update_file(config3, "update config3")
# Checkpoint3 - update config1, config2
self.reverter.add_to_checkpoint(self.sets[2], "third save")
self.reverter.finalize_checkpoint("Third Checkpoint - Save both")
update_file(self.config1, "Final form config1")
update_file(self.config2, "Final form config2")
update_file(config3, "Final form config3")
return config3
def setup_work_direc():
"""Setup directories.
:returns: Mocked :class:`letsencrypt.interfaces.IConfig`
"""
work_dir = tempfile.mkdtemp("work")
backup_dir = os.path.join(work_dir, "backup")
return mock.MagicMock(
work_dir=work_dir, backup_dir=backup_dir,
temp_checkpoint_dir=os.path.join(work_dir, "temp"),
in_progress_dir=os.path.join(backup_dir, "in_progress_dir"))
def setup_test_files():
"""Setup sample configuration files."""
dir1 = tempfile.mkdtemp("dir1")
dir2 = tempfile.mkdtemp("dir2")
config1 = os.path.join(dir1, "config.txt")
config2 = os.path.join(dir2, "config.txt")
with open(config1, "w") as file_fd:
file_fd.write("directive-dir1")
with open(config2, "w") as file_fd:
file_fd.write("directive-dir2")
sets = [set([config1]),
set([config2]),
set([config1, config2])]
return config1, config2, dir1, dir2, sets
def get_save_notes(dire):
"""Read save notes"""
return read_in(os.path.join(dire, "CHANGES_SINCE"))
def get_filepaths(dire):
"""Get Filepaths"""
return read_in(os.path.join(dire, "FILEPATHS"))
def get_new_files(dire):
"""Get new files."""
return read_in(os.path.join(dire, "NEW_FILES")).splitlines()
def get_undo_commands(dire):
"""Get new files."""
with open(os.path.join(dire, "COMMANDS")) as csvfile:
return list(csv.reader(csvfile))
def read_in(path):
"""Read in a file, return the str"""
with open(path, "r") as file_fd:
return file_fd.read()
def update_file(filename, string):
"""Update a file with a new value."""
with open(filename, "w") as file_fd:
file_fd.write(string)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
381426068/MissionPlanner | Lib/whichdb.py | 60 | 3470 | # !/usr/bin/env python
"""Guess which db package to use to open a db file."""
import os
import struct
import sys
try:
import dbm
_dbmerror = dbm.error
except ImportError:
dbm = None
# just some sort of valid exception which might be raised in the
# dbm test
_dbmerror = IOError
def whichdb(filename):
"""Guess which db package to use to open a db file.
Return values:
- None if the database file can't be read;
- empty string if the file can be read but can't be recognized
- the module name (e.g. "dbm" or "gdbm") if recognized.
Importing the given module may still fail, and opening the
database using that module may still fail.
"""
# Check for dbm first -- this has a .pag and a .dir file
try:
f = open(filename + os.extsep + "pag", "rb")
f.close()
# dbm linked with gdbm on OS/2 doesn't have .dir file
if not (dbm.library == "GNU gdbm" and sys.platform == "os2emx"):
f = open(filename + os.extsep + "dir", "rb")
f.close()
return "dbm"
except IOError:
# some dbm emulations based on Berkeley DB generate a .db file
# some do not, but they should be caught by the dbhash checks
try:
f = open(filename + os.extsep + "db", "rb")
f.close()
# guarantee we can actually open the file using dbm
# kind of overkill, but since we are dealing with emulations
# it seems like a prudent step
if dbm is not None:
d = dbm.open(filename)
d.close()
return "dbm"
except (IOError, _dbmerror):
pass
# Check for dumbdbm next -- this has a .dir and a .dat file
try:
# First check for presence of files
os.stat(filename + os.extsep + "dat")
size = os.stat(filename + os.extsep + "dir").st_size
# dumbdbm files with no keys are empty
if size == 0:
return "dumbdbm"
f = open(filename + os.extsep + "dir", "rb")
try:
if f.read(1) in ("'", '"'):
return "dumbdbm"
finally:
f.close()
except (OSError, IOError):
pass
# See if the file exists, return None if not
try:
f = open(filename, "rb")
except IOError:
return None
# Read the start of the file -- the magic number
s16 = f.read(16)
f.close()
s = s16[0:4]
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
(magic,) = struct.unpack("=l", s)
except struct.error:
return ""
# Check for GNU dbm
if magic == 0x13579ace:
return "gdbm"
# Check for old Berkeley db hash file format v2
if magic in (0x00061561, 0x61150600):
return "bsddb185"
# Later versions of Berkeley db hash file have a 12-byte pad in
# front of the file type
try:
(magic,) = struct.unpack("=l", s16[-4:])
except struct.error:
return ""
# Check for BSD hash
if magic in (0x00061561, 0x61150600):
return "dbhash"
# Unknown
return ""
if __name__ == "__main__":
for filename in sys.argv[1:]:
print whichdb(filename) or "UNKNOWN", filename
| gpl-3.0 |
frankrousseau/weboob | weboob/applications/qhandjoob/qhandjoob.py | 7 | 1540 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Sébastien Monel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.job import CapJob
from weboob.tools.application.qt import QtApplication
from weboob.tools.config.yamlconfig import YamlConfig
from .main_window import MainWindow
class QHandJoob(QtApplication):
APPNAME = 'qhandjoob'
VERSION = '1.1'
COPYRIGHT = u'Copyright(C) 2013-2014 Sébastien Monel'
DESCRIPTION = "Qt application to search for job."
SHORT_DESCRIPTION = "search for job"
CAPS = CapJob
CONFIG = {'queries': {}}
STORAGE = {'bookmarks': [], 'read': [], 'notes': {}}
def main(self, argv):
self.load_backends(CapJob)
self.create_storage()
self.load_config(klass=YamlConfig)
self.main_window = MainWindow(self.config, self.storage, self.weboob)
self.main_window.show()
return self.weboob.loop()
| agpl-3.0 |
saydulk/newfies-dialer | newfies/agent/function_def.py | 4 | 1886 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from user_profile.models import Manager
from agent.models import AgentProfile, Agent
from appointment.function_def import get_all_calendar_user_id_list
def agent_user_id_list():
agent_id_list = AgentProfile.objects.values_list('user_id', flat=True)
return agent_id_list
def manager_list():
"""Return all managers of the system"""
manager_list = []
agent_id_list = agent_user_id_list()
calendar_user_id_list = get_all_calendar_user_id_list()
list = Manager.objects.values_list('id', 'username').filter(is_staff=False, is_superuser=False)\
.exclude(id__in=agent_id_list).exclude(id__in=calendar_user_id_list).order_by('id')
for l in list:
manager_list.append((l[0], l[1]))
return manager_list
def agentprofile_list(manager_id=None):
"""Return agents which are belong to manager_id"""
agentprofile_list = []
if manager_id:
agent_list = AgentProfile.objects.filter(manager_id=int(manager_id))
else:
agent_list = AgentProfile.objects.all()
for l in agent_list:
agentprofile_list.append((l.id, l.user.username))
return agentprofile_list
def agent_list():
"""Return all agent of the system"""
agent_list = []
agent_id_list = AgentProfile.objects.values_list('user_id', flat=True).all()
list = Agent.objects.values_list('id', 'username') \
.filter(id__in=agent_id_list).order_by('id')
for l in list:
agent_list.append((l[0], l[1]))
return agent_list
| mpl-2.0 |
erigones/esdc-ce | api/mon/backends/abstract/server.py | 1 | 1705 | from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
# noinspection PyProtectedMember
from vms.models.base import _DummyModel, _UserTasksModel
from vms.models import Dc
class AbstractMonitoringServer(_DummyModel, _UserTasksModel):
"""
Abstract model for representing a monitoring server in a DC.
"""
_pk_key = 'mon_server_id'
uri = NotImplemented
name = NotImplemented
address = NotImplemented
connection_id = NotImplemented
# noinspection PyPep8Naming
class Meta:
# Required for api.exceptions.ObjectNotFound
verbose_name_raw = _('Monitoring Server')
# noinspection PyUnusedLocal
def __init__(self, dc):
self.dc = dc
super(AbstractMonitoringServer, self).__init__()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
@property
def id(self):
return self.dc.id
@property
def owner(self): # Required by _UserTasksModel
return self.dc.owner
@property
def pk(self): # Required by task_log
return str(self.id)
@property
def log_name(self): # Required by task_log
return Truncator(self.uri).chars(32)
@property
def log_alias(self): # Required by task_log
return self.name
@classmethod
def get_content_type(cls): # Required by task_log
return None
@classmethod
def get_object_type(cls, content_type=None): # Required by task_log
return 'monitoringserver'
@classmethod
def get_object_by_pk(cls, pk):
dc = Dc.objects.get_by_id(pk)
return cls(dc)
MonitoringServerClass = AbstractMonitoringServer
| apache-2.0 |
umlfri/umlfri2 | umlfri2/qtgui/canvas/scrolledcanvaswidget.py | 1 | 1410 | from PyQt5.QtCore import QPoint
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QWheelEvent
from PyQt5.QtWidgets import QScrollArea
from .canvaswidget import CanvasWidget
class ScrolledCanvasWidget(QScrollArea):
def __init__(self, main_window, drawing_area):
super().__init__()
self.__canvas = CanvasWidget(main_window, drawing_area)
self.setWidget(self.__canvas)
self.setWidgetResizable(True)
def wheelEvent(self, event):
if event.modifiers() == Qt.ShiftModifier:
pixelDelta = event.pixelDelta()
angleDelta = event.angleDelta()
if angleDelta.x() == 0 and angleDelta.y() != 0:
delta = angleDelta.y()
orientation = Qt.Horizontal
else:
delta = angleDelta.x()
orientation = Qt.Vertical
super().wheelEvent(QWheelEvent(event.pos(), event.globalPos(),
QPoint(pixelDelta.y(), pixelDelta.x()),
QPoint(angleDelta.y(), angleDelta.x()),
delta, orientation,
event.buttons(), Qt.NoModifier))
else:
super().wheelEvent(event)
@property
def diagram(self):
return self.__canvas.diagram
| gpl-3.0 |
carbureted/shavar-prod-lists | scripts/json_verify.py | 1 | 6963 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import glob
import json
import re
from types import DictType, ListType, UnicodeType
from urlparse import urlparse
parser = argparse.ArgumentParser(description='Verify json files for shavar.')
parser.add_argument("-f", "--file", help="filename to verify")
bad_uris = []
dupe_hosts = {
"properties": [],
"resources": []
}
block_host_uris = []
entity_host_uris = []
errors = []
file_contents = []
file_name = ""
result = 0
def run(file):
global file_name
file_name = file
try:
verify(file)
except:
errors.append("\tError: Problem handling file")
finish()
def verify(file):
try:
with open(file) as f:
raw_data = f.readlines()
# save contents of file, including line numbers
for x in range(0, len(raw_data)):
line_number = x+1
file_contents.append([raw_data[x], line_number])
# attempt to parse file as json
json_obj = json.loads("".join(raw_data))
try:
# determine which schema this file uses
if ("categories" in json_obj):
# google_mapping.json
# disconnect_blacklist.json
find_uris(json_obj["categories"])
else:
# disconnect_entitylist.json
find_uris_in_entities(json_obj)
except:
errors.append("\tError: Can't parse file")
except ValueError as e:
# invalid json formatting
errors.append("\tError: %s" % e)
return
except IOError as e:
# non-existent file
errors.append("\tError: Can't open file: %s" % e)
return
"""
categories_json is expected to match this format:
"categories": {
"Disconnect": [
{
"Facebook": {
"http://www.facebook.com/": [
"facebook.com",
...
]
}
},
{
"Google": {
"http://www.google.com/": [
"2mdn.net",
...
]
}
},
...
],
"Advertising": [
{
"[x+1]": {
"http://www.xplusone.com/": [
"ru4.com",
...
]
}
},
]
...
}
"""
def find_uris(categories_json):
assert type(categories_json) is DictType
for category, category_json in categories_json.iteritems():
assert type(category) is UnicodeType
assert type(category_json) is ListType
for entity in category_json:
assert type(entity) is DictType
for entity_name, entity_json in entity.iteritems():
assert type(entity_name) is UnicodeType
assert type(entity_json) is DictType
# pop dnt out of the dict, so we can iteritems() over the rest
try:
dnt_value = entity_json.pop('dnt', '')
assert dnt_value in ["w3c", "eff", ""]
except AssertionError:
errors.append("%s has bad DNT value: %s" % (entity_name,
dnt_value))
for domain, uris in entity_json.iteritems():
assert type(domain) is UnicodeType
assert type(uris) is ListType
for uri in uris:
check_uri(uri)
block_host_uris.append(uri)
def find_uris_in_entities(entitylist_json):
checked_uris = {
"properties": [],
"resources": []
}
assert len(entitylist_json.items()) > 0
assert type(entitylist_json) is DictType
for entity, types in entitylist_json.iteritems():
assert type(entity) is UnicodeType
assert type(types) is DictType
for host_type, uris in types.iteritems():
assert host_type in ["properties", "resources"]
assert type(uris) is ListType
for uri in uris:
if uri in checked_uris[host_type]:
dupe_hosts[host_type].append(uri)
check_uri(uri)
entity_host_uris.append(uri)
checked_uris[host_type].append(uri)
def check_uri(uri):
# Valid URI:
# no scheme, port, fragment, path or query string
# no disallowed characters
# no leading/trailing garbage
try:
uri.decode('ascii')
except UnicodeEncodeError:
bad_uris.append(uri)
parsed_uri = urlparse(uri)
try:
assert parsed_uri.scheme == ''
# domains of urls without schemes are parsed into 'path' so check path
# for port
assert ':' not in parsed_uri.path
assert parsed_uri.netloc == ''
assert parsed_uri.params == ''
assert parsed_uri.query == ''
assert parsed_uri.fragment == ''
assert len(parsed_uri.path) < 128
except AssertionError:
bad_uris.append(uri)
return
def find_line_number(uri):
line = 0
try:
for x in range(0, len(file_contents)):
temp = file_contents[x][0].decode("utf-8", "ignore")
if re.search(uri, temp):
line = file_contents[x][1]
file_contents.pop(x)
break
except ValueError as e:
print e
line = -1
return str(line)
def make_errors_from_bad_uris():
for bad_uri in bad_uris:
errors.append("\tError: Bad URI: %s\t: in line %s" %
(bad_uri, find_line_number(bad_uri)))
for host_type, hosts in dupe_hosts.iteritems():
for host in hosts:
errors.append("\tDupe: Dupe host: %s\t in line %s" %
(host, find_line_number(host)))
def finish():
make_errors_from_bad_uris()
if (len(errors) == 0):
print "\n" + file_name + " : valid"
else:
global result
result = 1
print "\n" + file_name + " : invalid"
for error in errors:
print error
reset()
def reset():
global bad_uris
bad_uris = []
global dupe_hosts
dupe_hosts = {
"properties": [],
"resources": []
}
global errors
errors = []
global file_contents
file_contents = []
global file_name
file_name = ""
def start(filename=None):
if (filename):
run(filename)
else:
for f in glob.glob("*.json"):
run(f)
args = parser.parse_args()
start(args.file)
print "\n block_host_uris: %s " % len(block_host_uris)
print "\n entity_host_uris: %s " % len(entity_host_uris)
assert "itisatracker.com" in block_host_uris
exit(result)
| gpl-3.0 |
deployed/django | django/contrib/staticfiles/management/commands/findstatic.py | 64 | 1767 | from __future__ import unicode_literals
import os
from optparse import make_option
from django.core.management.base import LabelCommand
from django.utils.encoding import force_text
from django.contrib.staticfiles import finders
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
args = "[file ...]"
label = 'static file'
option_list = LabelCommand.option_list + (
make_option('--first', action='store_false', dest='all', default=True,
help="Only return the first match for each static file."),
)
def handle_label(self, path, **options):
verbosity = int(options.get('verbosity', 1))
result = finders.find(path, all=options['all'])
path = force_text(path)
if verbosity >= 2:
searched_locations = ("Looking in the following locations:\n %s" %
"\n ".join(force_text(location)
for location in finders.searched_locations))
else:
searched_locations = ''
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = (force_text(os.path.realpath(path)) for path in result)
if verbosity >= 1:
file_list = '\n '.join(result)
return ("Found '%s' here:\n %s\n%s" %
(path, file_list, searched_locations))
else:
return '\n'.join(result)
else:
message = ["No matching file found for '%s'." % path]
if verbosity >= 2:
message.append(searched_locations)
if verbosity >= 1:
self.stderr.write('\n'.join(message))
| bsd-3-clause |
Jollytown/Garuda | server/garuda/lib/python2.7/site-packages/django/http/cookie.py | 95 | 3621 | from __future__ import unicode_literals
from django.utils.encoding import force_str
from django.utils import six
from django.utils.six.moves import http_cookies
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = http_cookies.SimpleCookie()
try:
_tc.load(str('foo:bar=1'))
_cookie_allows_colon_in_names = True
except http_cookies.CookieError:
_cookie_allows_colon_in_names = False
if _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = http_cookies.SimpleCookie
else:
Morsel = http_cookies.Morsel
class SimpleCookie(http_cookies.SimpleCookie):
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",", "\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
if six.PY2 and isinstance(rawdata, six.text_type):
rawdata = force_str(rawdata)
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
key = force_str(key)
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except http_cookies.CookieError:
if not hasattr(self, 'bad_cookies'):
self.bad_cookies = set()
self.bad_cookies.add(key)
dict.__setitem__(self, key, http_cookies.Morsel())
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, http_cookies.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie)
except http_cookies.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
| mit |
andnovar/kivy | kivy/cache.py | 40 | 7975 | '''
Cache manager
=============
The cache manager can be used to store python objects attached to a unique
key. The cache can be controlled in two ways: with a object limit or a
timeout.
For example, we can create a new cache with a limit of 10 objects and a
timeout of 5 seconds::
# register a new Cache
Cache.register('mycache', limit=10, timeout=5)
# create an object + id
key = 'objectid'
instance = Label(text=text)
Cache.append('mycache', key, instance)
# retrieve the cached object
instance = Cache.get('mycache', key)
If the instance is NULL, the cache may have trashed it because you've
not used the label for 5 seconds and you've reach the limit.
'''
__all__ = ('Cache', )
from os import environ
from kivy.logger import Logger
from kivy.clock import Clock
class Cache(object):
'''See module documentation for more information.
'''
_categories = {}
_objects = {}
@staticmethod
def register(category, limit=None, timeout=None):
'''Register a new category in the cache with the specified limit.
:Parameters:
`category` : str
Identifier of the category.
`limit` : int (optional)
Maximum number of objects allowed in the cache.
If None, no limit is applied.
`timeout` : double (optional)
Time after which to delete the object if it has not been used.
If None, no timeout is applied.
'''
Cache._categories[category] = {
'limit': limit,
'timeout': timeout}
Cache._objects[category] = {}
Logger.debug(
'Cache: register <%s> with limit=%s, timeout=%s' %
(category, str(limit), str(timeout)))
@staticmethod
def append(category, key, obj, timeout=None):
'''Add a new object to the cache.
:Parameters:
`category` : str
Identifier of the category.
`key` : str
Unique identifier of the object to store.
`obj` : object
Object to store in cache.
`timeout` : double (optional)
Time after which to delete the object if it has not been used.
If None, no timeout is applied.
'''
#check whether obj should not be cached first
if getattr(obj, '_no_cache', False):
return
try:
cat = Cache._categories[category]
except KeyError:
Logger.warning('Cache: category <%s> not exist' % category)
return
timeout = timeout or cat['timeout']
# FIXME: activate purge when limit is hit
#limit = cat['limit']
#if limit is not None and len(Cache._objects[category]) >= limit:
# Cache._purge_oldest(category)
Cache._objects[category][key] = {
'object': obj,
'timeout': timeout,
'lastaccess': Clock.get_time(),
'timestamp': Clock.get_time()}
@staticmethod
def get(category, key, default=None):
'''Get a object from the cache.
:Parameters:
`category` : str
Identifier of the category.
`key` : str
Unique identifier of the object in the store.
`default` : anything, defaults to None
Default value to be returned if the key is not found.
'''
try:
Cache._objects[category][key]['lastaccess'] = Clock.get_time()
return Cache._objects[category][key]['object']
except Exception:
return default
@staticmethod
def get_timestamp(category, key, default=None):
'''Get the object timestamp in the cache.
:Parameters:
`category` : str
Identifier of the category.
`key` : str
Unique identifier of the object in the store.
`default` : anything, defaults to None
Default value to be returned if the key is not found.
'''
try:
return Cache._objects[category][key]['timestamp']
except Exception:
return default
@staticmethod
def get_lastaccess(category, key, default=None):
'''Get the objects last access time in the cache.
:Parameters:
`category` : str
Identifier of the category.
`key` : str
Unique identifier of the object in the store.
`default` : anything, defaults to None
Default value to be returned if the key is not found.
'''
try:
return Cache._objects[category][key]['lastaccess']
except Exception:
return default
@staticmethod
def remove(category, key=None):
'''Purge the cache.
:Parameters:
`category` : str
Identifier of the category.
`key` : str (optional)
Unique identifier of the object in the store. If this
arguement is not supplied, the entire category will be purged.
'''
try:
if key is not None:
del Cache._objects[category][key]
else:
Cache._objects[category] = {}
except Exception:
pass
@staticmethod
def _purge_oldest(category, maxpurge=1):
print('PURGE', category)
import heapq
heap_list = []
for key in Cache._objects[category]:
obj = Cache._objects[category][key]
if obj['lastaccess'] == obj['timestamp']:
continue
heapq.heappush(heap_list, (obj['lastaccess'], key))
print('<<<', obj['lastaccess'])
n = 0
while n < maxpurge:
try:
lastaccess, key = heapq.heappop(heap_list)
print('=>', key, lastaccess, Clock.get_time())
except Exception:
return
del Cache._objects[category][key]
@staticmethod
def _purge_by_timeout(dt):
curtime = Clock.get_time()
for category in Cache._objects:
if category not in Cache._categories:
continue
timeout = Cache._categories[category]['timeout']
if timeout is not None and dt > timeout:
# XXX got a lag ! that may be because the frame take lot of
# time to draw. and the timeout is not adapted to the current
# framerate. So, increase the timeout by two.
# ie: if the timeout is 1 sec, and framerate go to 0.7, newly
# object added will be automaticly trashed.
timeout *= 2
Cache._categories[category]['timeout'] = timeout
continue
for key in list(Cache._objects[category].keys())[:]:
lastaccess = Cache._objects[category][key]['lastaccess']
objtimeout = Cache._objects[category][key]['timeout']
# take the object timeout if available
if objtimeout is not None:
timeout = objtimeout
# no timeout, cancel
if timeout is None:
continue
if curtime - lastaccess > timeout:
del Cache._objects[category][key]
@staticmethod
def print_usage():
'''Print the cache usage to the console.'''
print('Cache usage :')
for category in Cache._categories:
print(' * %s : %d / %s, timeout=%s' % (
category.capitalize(),
len(Cache._objects[category]),
str(Cache._categories[category]['limit']),
str(Cache._categories[category]['timeout'])))
if 'KIVY_DOC_INCLUDE' not in environ:
# install the schedule clock for purging
Clock.schedule_interval(Cache._purge_by_timeout, 1)
| mit |
lovexiaov/SandwichApp | venv/lib/python2.7/site-packages/py2app/build_app.py | 9 | 77527 | """
Mac OS X .app build command for distutils
Originally (loosely) based on code from py2exe's build_exe.py by Thomas Heller.
"""
from __future__ import print_function
import imp
import sys
import os
import zipfile
import plistlib
import shlex
import shutil
import textwrap
import pkg_resources
import collections
from modulegraph import modulegraph
from py2app.apptemplate.setup import main as script_executable
from py2app.util import mergecopy, make_exec
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from itertools import chain
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
from modulegraph.find_modules import find_modules, parse_mf_results, find_needed_modules
from modulegraph.modulegraph import SourceModule, Package, Script
from modulegraph import zipio
import macholib.dyld
import macholib.MachOStandalone
import macholib.MachO
from macholib.util import flipwritable
from py2app.create_appbundle import create_appbundle
from py2app.create_pluginbundle import create_pluginbundle
from py2app.util import \
fancy_split, byte_compile, make_loader, imp_find_module, \
copy_tree, fsencoding, strip_files, in_system_path, makedirs, \
iter_platform_files, find_version, skipscm, momc, copy_file, \
copy_resource
from py2app.filters import \
not_stdlib_filter, not_system_filter, has_filename_filter
from py2app import recipes
from distutils.sysconfig import get_config_var, get_config_h_filename
PYTHONFRAMEWORK=get_config_var('PYTHONFRAMEWORK')
PLUGIN_SUFFIXES = {
'.qlgenerator': 'QuickLook',
'.mdimporter': 'Spotlight',
'.xpc': 'XPCServices',
'.service': 'Services',
'.prefPane': 'PreferencePanes',
'.iaplugin': 'InternetAccounts',
'.action': 'Automator',
}
try:
basestring
except NameError:
basestring = str
def rewrite_tkinter_load_commands(tkinter_path):
print("rewrite_tk", tkinter_path)
m = macholib.MachO.MachO(tkinter_path)
tcl_path = None
tk_path = None
rewrite_map = {}
for header in m.headers:
for idx, name, other in header.walkRelocatables():
if other.endswith('/Tk'):
if tk_path is not None and other != tk_path:
raise DistutilsPlatformError('_tkinter is linked to different Tk paths')
tk_path = other
elif other.endswith('/Tcl'):
if tcl_path is not None and other != tcl_path:
raise DistutilsPlatformError('_tkinter is linked to different Tcl paths')
tcl_path = other
if tcl_path is None or 'Tcl.framework' not in tcl_path:
raise DistutilsPlatformError('_tkinter is not linked a Tcl.framework')
if tk_path is None or 'Tk.framework' not in tk_path:
raise DistutilsPlatformError('_tkinter is not linked a Tk.framework')
system_tcl_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tcl.framework/Versions') if nm != 'Current']
system_tk_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tk.framework/Versions') if nm != 'Current']
if not tcl_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tcl
ver = os.path.basename(os.path.dirname(tcl_path))
if ver not in system_tcl_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tcl not in /System')
rewrite_map[tcl_path] = '/System/Library/Frameworks/Tcl.framework/Versions/%s/Tcl'%(ver,)
if not tk_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tk
ver = os.path.basename(os.path.dirname(tk_path))
if ver not in system_tk_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tk not in /System')
rewrite_map[tk_path] = '/System/Library/Frameworks/Tk.framework/Versions/%s/Tk'%(ver,)
if rewrite_map:
print("Relinking _tkinter.so to system Tcl/Tk")
rewroteAny = False
for header in m.headers:
for idx, name, other in header.walkRelocatables():
data = rewrite_map.get(other)
if data:
if header.rewriteDataForCommand(idx, data.encode(sys.getfilesystemencoding())):
rewroteAny = True
if rewroteAny:
old_mode = flipwritable(m.filename)
try:
with open(m.filename, 'rb+') as f:
for header in m.headers:
f.seek(0)
header.write(f)
f.seek(0, 2)
f.flush()
finally:
flipwritable(m.filename, old_mode)
else:
print("_tkinter already linked against system Tcl/Tk")
def get_zipfile(dist, semi_standalone=False):
if sys.version_info[0] == 3:
if semi_standalone:
return "python%d.%d/site-packages.zip"%(sys.version_info[:2])
else:
return "python%d%d.zip"%(sys.version_info[:2])
return getattr(dist, "zipfile", None) or "site-packages.zip"
def framework_copy_condition(src):
# Skip Headers, .svn, and CVS dirs
return skipscm(src) and os.path.basename(src) != 'Headers'
class PythonStandalone(macholib.MachOStandalone.MachOStandalone):
def __init__(self, appbuilder, *args, **kwargs):
super(PythonStandalone, self).__init__(*args, **kwargs)
self.appbuilder = appbuilder
def copy_dylib(self, src):
dest = os.path.join(self.dest, os.path.basename(src))
if os.path.islink(src):
dest = os.path.join(self.dest, os.path.basename(os.path.realpath(src)))
# Ensure that the orginal name also exists, avoids problems when
# the filename is used from Python (see issue #65)
#
# NOTE: The if statement checks that the target link won't
# point to itself, needed for systems like homebrew that
# store symlinks in "public" locations that point to
# files of the same name in a per-package install location.
link_dest = os.path.join(self.dest, os.path.basename(src))
if os.path.basename(link_dest) != os.path.basename(dest):
os.symlink(os.path.basename(dest), link_dest)
else:
dest = os.path.join(self.dest, os.path.basename(src))
return self.appbuilder.copy_dylib(src, dest)
def copy_framework(self, info):
destfn = self.appbuilder.copy_framework(info, self.dest)
dest = os.path.join(self.dest, info['shortname'] + '.framework')
self.pending.append((destfn, iter_platform_files(dest)))
return destfn
def iterRecipes(module=recipes):
for name in dir(module):
if name.startswith('_'):
continue
check = getattr(getattr(module, name), 'check', None)
if check is not None:
yield (name, check)
# A very loosely defined "target". We assume either a "script" or "modules"
# attribute. Some attributes will be target specific.
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
# If modules is a simple string, assume they meant list
m = self.__dict__.get("modules")
if m and isinstance(m, basestring):
self.modules = [m]
def get_dest_base(self):
dest_base = getattr(self, "dest_base", None)
if dest_base: return dest_base
script = getattr(self, "script", None)
if script:
return os.path.basename(os.path.splitext(script)[0])
modules = getattr(self, "modules", None)
assert modules, "no script, modules or dest_base specified"
return modules[0].split(".")[-1]
def validate(self):
resources = getattr(self, "resources", [])
for r_filename in resources:
if not os.path.isfile(r_filename):
raise DistutilsOptionError(
"Resource filename '%s' does not exist" % (r_filename,))
def validate_target(dist, attr, value):
res = FixupTargets(value, "script")
other = {"app": "plugin", "plugin": "app"}
if res and getattr(dist, other[attr]):
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
def FixupTargets(targets, default_attribute):
if not targets:
return targets
try:
targets = eval(targets)
except:
pass
ret = []
for target_def in targets:
if isinstance(target_def, basestring):
# Create a default target object, with the string as the attribute
target = Target(**{default_attribute: target_def})
else:
d = getattr(target_def, "__dict__", target_def)
if default_attribute not in d:
raise DistutilsOptionError(
"This target class requires an attribute '%s'"
% (default_attribute,))
target = Target(**d)
target.validate()
ret.append(target)
return ret
def normalize_data_file(fn):
if isinstance(fn, basestring):
fn = convert_path(fn)
return ('', [fn])
return fn
def is_system():
prefix = sys.prefix
if os.path.exists(os.path.join(prefix, ".Python")):
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
return in_system_path(prefix)
def installation_info(version=None):
if version is None:
version = sys.version
if is_system():
return version[:3] + " (FORCED: Using vendor Python)"
else:
return version[:3]
class py2app(Command):
description = "create a Mac OS X application or plugin from Python scripts"
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [
("app=", None,
"application bundle to be built"),
("plugin=", None,
"plugin bundle to be built"),
('optimize=', 'O',
"optimization level: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
("includes=", 'i',
"comma-separated list of modules to include"),
("packages=", 'p',
"comma-separated list of packages to include"),
("iconfile=", None,
"Icon file to use"),
("excludes=", 'e',
"comma-separated list of modules to exclude"),
("dylib-excludes=", 'E',
"comma-separated list of frameworks or dylibs to exclude"),
("datamodels=", None,
"xcdatamodels to be compiled and copied into Resources"),
("mappingmodels=", None,
"xcmappingmodels to be compiled and copied into Resources"),
("resources=", 'r',
"comma-separated list of additional data files and folders to include (not for code!)"),
("frameworks=", 'f',
"comma-separated list of additional frameworks and dylibs to include"),
("plist=", 'P',
"Info.plist template file, dict, or plistlib.Plist"),
("extension=", None,
"Bundle extension [default:.app for app, .plugin for plugin]"),
("graph", 'g',
"output module dependency graph"),
("xref", 'x',
"output module cross-reference as html"),
("no-strip", None,
"do not strip debug and local symbols from output"),
#("compressed", 'c',
# "create a compressed zipfile"),
("no-chdir", 'C',
"do not change to the data directory (Contents/Resources) [forced for plugins]"),
#("no-zip", 'Z',
# "do not use a zip file (XXX)"),
("semi-standalone", 's',
"depend on an existing installation of Python " + installation_info()),
("alias", 'A',
"Use an alias to current source file (for development only!)"),
("argv-emulation", 'a',
"Use argv emulation [disabled for plugins]."),
("argv-inject=", None,
"Inject some commands into the argv"),
("emulate-shell-environment", None,
"Emulate the shell environment you get in a Terminal window"),
("use-pythonpath", None,
"Allow PYTHONPATH to effect the interpreter's environment"),
("use-faulthandler", None,
"Enable the faulthandler in the generated bundle (Python 3.3 or later)"),
("verbose-interpreter", None,
"Start python in verbose mode"),
('bdist-base=', 'b',
'base directory for build library (default is build)'),
('dist-dir=', 'd',
"directory to put final built distributions in (default is dist)"),
('site-packages', None,
"include the system and user site-packages into sys.path"),
("strip", 'S',
"strip debug and local symbols from output (on by default, for compatibility)"),
("prefer-ppc", None,
"Force application to run translated on i386 (LSPrefersPPC=True)"),
('debug-modulegraph', None,
'Drop to pdb console after the module finding phase is complete'),
("debug-skip-macholib", None,
"skip macholib phase (app will not be standalone!)"),
("arch=", None, "set of architectures to use (fat, fat3, universal, intel, i386, ppc, x86_64; default is the set for the current python binary)"),
("qt-plugins=", None, "set of Qt plugins to include in the application bundle (default None)"),
("matplotlib-backends=", None, "set of matplotlib backends to include (default: include entire package)"),
("extra-scripts=", None, "set of scripts to include in the application bundle, next to the main application script"),
("include-plugins=", None, "List of plugins to include"),
("force-system-tk", None, "Ensure that Tkinter is linked against Apple's build of Tcl/Tk"),
("report-missing-from-imports", None, "Report the list of missing names for 'from module import name'"),
("no-report-missing-conditional-import", None, "Don't report missing modules when they appear to be conditional imports"),
]
boolean_options = [
#"compressed",
"xref",
"strip",
"no-strip",
"site-packages",
"semi-standalone",
"alias",
"argv-emulation",
#"no-zip",
"use-pythonpath",
"use-faulthandler",
"verbose-interpreter",
"no-chdir",
"debug-modulegraph",
"debug-skip-macholib",
"graph",
"prefer-ppc",
"emulate-shell-environment",
"force-system-tk",
"report-missing-from-imports",
"no-report-missing-conditional-import",
]
def initialize_options (self):
self.app = None
self.plugin = None
self.bdist_base = None
self.xref = False
self.graph = False
self.no_zip = 0
self.optimize = 0
if hasattr(sys, 'flags'):
self.optimize = sys.flags.optimize
self.arch = None
self.strip = True
self.no_strip = False
self.iconfile = None
self.extension = None
self.alias = 0
self.argv_emulation = 0
self.emulate_shell_environment = 0
self.argv_inject = None
self.no_chdir = 0
self.site_packages = False
self.use_pythonpath = False
self.use_faulthandler = False
self.verbose_interpreter = False
self.includes = None
self.packages = None
self.excludes = None
self.dylib_excludes = None
self.frameworks = None
self.resources = None
self.datamodels = None
self.mappingmodels = None
self.plist = None
self.compressed = True
self.semi_standalone = is_system()
self.dist_dir = None
self.debug_skip_macholib = False
self.debug_modulegraph = False
self.prefer_ppc = False
self.filters = []
self.eggs = []
self.qt_plugins = None
self.matplotlib_backends = None
self.extra_scripts = None
self.include_plugins = None
self.force_system_tk = False
self.report_missing_from_imports = False
self.no_report_missing_conditional_import = False
def finalize_options (self):
if not self.strip:
self.no_strip = True
elif self.no_strip:
self.strip = False
self.optimize = int(self.optimize)
if self.argv_inject and isinstance(self.argv_inject, basestring):
self.argv_inject = shlex.split(self.argv_inject)
self.includes = set(fancy_split(self.includes))
self.includes.add('encodings.*')
if self.use_faulthandler:
self.includes.add('faulthandler')
#if sys.version_info[:2] >= (3, 2):
# self.includes.add('pkgutil')
# self.includes.add('imp')
self.packages = set(fancy_split(self.packages))
self.excludes = set(fancy_split(self.excludes))
self.excludes.add('readline')
# included by apptemplate
self.excludes.add('site')
if getattr(self.distribution, 'install_requires', None):
self.includes.add('pkg_resources')
self.eggs = pkg_resources.require(self.distribution.install_requires)
# Setuptools/distribute style namespace packages uses
# __import__('pkg_resources'), and that import isn't detected at the
# moment. Forcefully include pkg_resources.
self.includes.add('pkg_resources')
dylib_excludes = fancy_split(self.dylib_excludes)
self.dylib_excludes = []
for fn in dylib_excludes:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
try:
res = macholib.dyld.dyld_find(fn)
except ValueError:
res = fn
self.dylib_excludes.append(res)
self.resources = fancy_split(self.resources)
frameworks = fancy_split(self.frameworks)
self.frameworks = []
for fn in frameworks:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
res = macholib.dyld.dyld_find(fn)
while res in self.dylib_excludes:
self.dylib_excludes.remove(res)
self.frameworks.append(res)
if not self.plist:
self.plist = {}
if isinstance(self.plist, basestring):
self.plist = plistlib.Plist.fromFile(self.plist)
if isinstance(self.plist, plistlib.Dict):
self.plist = dict(self.plist.__dict__)
else:
self.plist = dict(self.plist)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('bdist_base', 'bdist_base'))
if self.semi_standalone:
self.filters.append(not_stdlib_filter)
if self.iconfile is None and 'CFBundleIconFile' not in self.plist:
# Default is the generic applet icon in the framework
iconfile = os.path.join(sys.prefix, 'Resources', 'Python.app',
'Contents', 'Resources', 'PythonApplet.icns')
if os.path.exists(iconfile):
self.iconfile = iconfile
self.runtime_preferences = list(self.get_runtime_preferences())
self.qt_plugins = fancy_split(self.qt_plugins)
self.matplotlib_backends = fancy_split(self.matplotlib_backends)
self.extra_scripts = fancy_split(self.extra_scripts)
self.include_plugins = fancy_split(self.include_plugins)
if self.datamodels:
print("WARNING: the datamodels option is deprecated, add model files to the list of resources")
if self.mappingmodels:
print("WARNING: the mappingmodels option is deprecated, add model files to the list of resources")
def get_default_plist(self):
# XXX - this is all single target stuff
plist = {}
target = self.targets[0]
version = self.distribution.get_version()
if version == '0.0.0':
try:
version = find_version(target.script)
except ValueError:
pass
if not isinstance(version, basestring):
raise DistutilsOptionError("Version must be a string")
if sys.version_info[0] > 2 and isinstance(version, type('a'.encode('ascii'))):
raise DistutilsOptionError("Version must be a string")
plist['CFBundleVersion'] = version
name = self.distribution.get_name()
if name == 'UNKNOWN':
base = target.get_dest_base()
name = os.path.basename(base)
plist['CFBundleName'] = name
return plist
def get_runtime(self, prefix=None, version=None):
# XXX - this is a bit of a hack!
# ideally we'd use dylib functions to figure this out
if prefix is None:
prefix = sys.prefix
if version is None:
version = sys.version
version = version[:3]
info = None
if os.path.exists(os.path.join(prefix, ".Python")):
# We're in a virtualenv environment, locate the real prefix
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
try:
fmwk = macholib.dyld.framework_find(prefix)
except ValueError:
info = None
else:
info = macholib.dyld.framework_info(fmwk)
if info is not None:
dylib = info['name']
runtime = os.path.join(info['location'], info['name'])
else:
dylib = 'libpython%s.dylib' % (sys.version[:3],)
runtime = os.path.join(prefix, 'lib', dylib)
return dylib, runtime
def symlink(self, src, dst):
try:
os.remove(dst)
except OSError:
pass
os.symlink(src, dst)
def get_runtime_preferences(self, prefix=None, version=None):
dylib, runtime = self.get_runtime(prefix=prefix, version=version)
yield os.path.join('@executable_path', '..', 'Frameworks', dylib)
if self.semi_standalone or self.alias:
yield runtime
def run(self):
if get_config_var('PYTHONFRAMEWORK') is None:
if not get_config_var('Py_ENABLE_SHARED'):
raise DistutilsPlatformError("This python does not have a shared library or framework")
else:
# Issue .. in py2app's tracker, and issue .. in python's tracker: a unix-style shared
# library build did not read the application environment correctly. The collection of
# if statements below gives a clean error message when py2app is started, instead of
# building a bundle that will give a confusing error message when started.
msg = "py2app is not supported for a shared library build with this version of python"
if sys.version_info[:2] < (2,7):
raise DistutilsPlatformError(msg)
elif sys.version_info[:2] == (2,7) and sys.version[3] < 4:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] < 2:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 2 and sys.version_info[3] < 3:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 3 and sys.version_info[3] < 1:
raise DistutilsPlatformError(msg)
if hasattr(self.distribution, "install_requires") \
and self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
build = self.reinitialize_command('build')
build.build_base = self.bdist_base
build.run()
self.create_directories()
self.fixup_distribution()
self.initialize_plist()
sys_old_path = sys.path[:]
extra_paths = [
os.path.dirname(target.script)
for target in self.targets
]
extra_paths.extend([build.build_platlib, build.build_lib])
self.additional_paths = [
os.path.abspath(p)
for p in extra_paths
if p is not None
]
sys.path[:0] = self.additional_paths
# this needs additional_paths
self.initialize_prescripts()
try:
self._run()
finally:
sys.path = sys_old_path
def iter_datamodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.datamodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcdatamodel':
basefn = fn
fn += '.xcdatamodel'
destfn = os.path.basename(basefn) + '.mom'
yield fn, os.path.join(resdir, path, destfn)
def compile_datamodels(self, resdir):
for src, dest in self.iter_datamodels(resdir):
print("compile datamodel", src, "->", dest)
self.mkpath(os.path.dirname(dest))
momc(src, dest)
def iter_mappingmodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.mappingmodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcmappingmodel':
basefn = fn
fn += '.xcmappingmodel'
destfn = os.path.basename(basefn) + '.cdm'
yield fn, os.path.join(resdir, path, destfn)
def compile_mappingmodels(self, resdir):
for src, dest in self.iter_mappingmodels(resdir):
self.mkpath(os.path.dirname(dest))
mapc(src, dest)
def iter_extra_plugins(self):
for item in self.include_plugins:
if isinstance(item, (list, tuple)):
subdir, path = item
else:
ext = os.path.splitext(item)[1]
try:
subdir = PLUGIN_SUFFIXES[ext]
path = item
except KeyError:
raise DistutilsOptionError("Cannot determine subdirectory for plugin %s"%(item,))
yield path, os.path.join(subdir, os.path.basename(path))
def iter_data_files(self):
dist = self.distribution
allres = chain(getattr(dist, 'data_files', ()) or (), self.resources)
for (path, files) in (normalize_data_file(fn) for fn in allres):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
yield fn, os.path.join(path, os.path.basename(fn))
def collect_scripts(self):
# these contains file names
scripts = set()
for target in self.targets:
scripts.add(target.script)
scripts.update([
k for k in target.prescripts if isinstance(k, basestring)
])
if hasattr(target, 'extra_scripts'):
scripts.update(target.extra_scripts)
scripts.update(self.extra_scripts)
return scripts
def get_plist_options(self):
result = dict(
PyOptions=dict(
use_pythonpath=bool(self.use_pythonpath),
site_packages=bool(self.site_packages),
alias=bool(self.alias),
argv_emulation=bool(self.argv_emulation),
emulate_shell_environment=bool(self.emulate_shell_environment),
no_chdir=bool(self.no_chdir),
prefer_ppc=self.prefer_ppc,
verbose=self.verbose_interpreter,
use_faulthandler=self.use_faulthandler,
),
)
if self.optimize:
result['PyOptions']['optimize'] = self.optimize
return result
def initialize_plist(self):
plist = self.get_default_plist()
for target in self.targets:
plist.update(getattr(target, 'plist', {}))
plist.update(self.plist)
plist.update(self.get_plist_options())
if self.iconfile:
iconfile = self.iconfile
if not os.path.exists(iconfile):
iconfile = iconfile + '.icns'
if not os.path.exists(iconfile):
raise DistutilsOptionError("icon file must exist: %r"
% (self.iconfile,))
self.resources.append(iconfile)
plist['CFBundleIconFile'] = os.path.basename(iconfile)
if self.prefer_ppc:
plist['LSPrefersPPC'] = True
self.plist = plist
return plist
def run_alias(self):
self.app_files = []
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.update(extra_scripts)
dst = self.build_alias_executable(target, target.script, extra_scripts)
self.app_files.append(dst)
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(target.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
def collect_recipedict(self):
return dict(iterRecipes())
def get_modulefinder(self):
if self.debug_modulegraph:
debug = 4
else:
debug = 0
return find_modules(
scripts=self.collect_scripts(),
includes=self.includes,
packages=self.packages,
excludes=self.excludes,
debug=debug,
)
def collect_filters(self):
return [has_filename_filter] + list(self.filters)
def process_recipes(self, mf, filters, flatpackages, loader_files):
rdict = self.collect_recipedict()
while True:
for name, check in rdict.items():
rval = check(self, mf)
if rval is None:
continue
# we can pull this off so long as we stop the iter
del rdict[name]
print('*** using recipe: %s ***' % (name,))
if rval.get('packages'):
self.packages.update(rval['packages'])
find_needed_modules(mf, packages=rval['packages'])
for pkg in rval.get('flatpackages', ()):
if isinstance(pkg, basestring):
pkg = (os.path.basename(pkg), pkg)
flatpackages[pkg[0]] = pkg[1]
filters.extend(rval.get('filters', ()))
loader_files.extend(rval.get('loader_files', ()))
newbootstraps = list(map(self.get_bootstrap,
rval.get('prescripts', ())))
if rval.get('includes'):
find_needed_modules(mf, includes=rval['includes'])
if rval.get('resources'):
self.resources.extend(rval['resources'])
for fn in newbootstraps:
if isinstance(fn, basestring):
mf.run_script(fn)
for target in self.targets:
target.prescripts.extend(newbootstraps)
break
else:
break
def _run(self):
try:
if self.alias:
self.run_alias()
else:
self.run_normal()
except:
raise
# XXX - remove when not debugging
# distutils sucks
import pdb, sys, traceback
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
print("Done!")
def filter_dependencies(self, mf, filters):
print("*** filtering dependencies ***")
nodes_seen, nodes_removed, nodes_orphaned = mf.filterStack(filters)
print('%d total' % (nodes_seen,))
print('%d filtered' % (nodes_removed,))
print('%d orphaned' % (nodes_orphaned,))
print('%d remaining' % (nodes_seen - nodes_removed,))
def get_appname(self):
return self.plist['CFBundleName']
def build_xref(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.html')
print("*** creating dependency html: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.create_xref(fp)
def build_graph(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.dot')
print("*** creating dependency graph: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.graphreport(fp, flatpackages=flatpackages)
def finalize_modulefinder(self, mf):
for item in mf.flatten():
if isinstance(item, Package) and item.filename == '-':
if sys.version_info[:2] <= (3,3):
fn = os.path.join(self.temp_dir, 'empty_package', '__init__.py')
if not os.path.exists(fn):
dn = os.path.dirname(fn)
if not os.path.exists(dn):
os.makedirs(dn)
with open(fn, 'w') as fp:
pass
item.filename = fn
py_files, extensions = parse_mf_results(mf)
# Remove all top-level scripts from the list of python files,
# those get treated differently.
py_files = [ item for item in py_files if not isinstance(item, Script) ]
extensions = list(extensions)
return py_files, extensions
def collect_packagedirs(self):
return list(filter(os.path.exists, [
os.path.join(os.path.realpath(self.get_bootstrap(pkg)), '')
for pkg in self.packages
]))
def run_normal(self):
mf = self.get_modulefinder()
filters = self.collect_filters()
flatpackages = {}
loader_files = []
self.process_recipes(mf, filters, flatpackages, loader_files)
if self.debug_modulegraph:
import pdb
pdb.Pdb().set_trace()
self.filter_dependencies(mf, filters)
if self.graph:
self.build_graph(mf, flatpackages)
if self.xref:
self.build_xref(mf, flatpackages)
py_files, extensions = self.finalize_modulefinder(mf)
pkgdirs = self.collect_packagedirs()
self.create_binaries(py_files, pkgdirs, extensions, loader_files)
missing = []
syntax_error = []
invalid_bytecode = []
for module in mf.nodes():
if isinstance(module, modulegraph.MissingModule):
if module.identifier != '__main__':
missing.append(module)
elif isinstance(module, modulegraph.InvalidSourceModule):
syntax_error.append(module)
elif hasattr(modulegraph, 'InvalidCompiledModule') and isinstance(module, modulegraph.InvalidCompiledModule):
invalid_bytecode.append(module)
if missing:
missing_unconditional = collections.defaultdict(set)
missing_fromimport = collections.defaultdict(set)
missing_fromimport_conditional = collections.defaultdict(set)
missing_conditional = collections.defaultdict(set)
for module in sorted(missing):
for m in mf.getReferers(module):
if m is None: continue # XXX
try:
ed = mf.edgeData(m, module)
except KeyError:
ed = None
if hasattr(modulegraph, 'DependencyInfo') and isinstance(ed, modulegraph.DependencyInfo):
c = missing_unconditional
if ed.conditional or ed.function:
if ed.fromlist:
c = missing_fromimport_conditional
else:
c = missing_conditional
elif ed.fromlist:
c = missing_fromimport
c[module.identifier].add(m.identifier)
else:
missing_unconditional[module.identifier].add(m.identifier)
if missing_unconditional:
log.warn("Modules not found (unconditional imports):")
for m in sorted(missing_unconditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_unconditional[m]))))
log.warn("")
if missing_conditional and not self.no_report_missing_conditional_import:
log.warn("Modules not found (conditional imports):")
for m in sorted(missing_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_conditional[m]))))
log.warn("")
if self.report_missing_from_imports and (
missing_fromimport or (
not self.no_report_missing_conditional_import and missing_fromimport_conditional)):
log.warn("Modules not found ('from ... import y'):")
for m in sorted(missing_fromimport):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport[m]))))
if not self.no_report_missing_conditional_import and missing_fromimport_conditional:
log.warn("")
log.warn("Conditional:")
for m in sorted(missing_fromimport_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport_conditional[m]))))
log.warn("")
if syntax_error:
log.warn("Modules with syntax errors:")
for module in sorted(syntax_error):
log.warn(" * %s"%(module.identifier))
log.warn("")
if invalid_bytecode:
log.warn("Modules with invalid bytecode:")
for module in sorted(invalid_bytecode):
log.warn(" * %s"%(module.identifier))
log.warn("")
def create_directories(self):
bdist_base = self.bdist_base
if self.semi_standalone:
self.bdist_dir = os.path.join(bdist_base,
'python%s-semi_standalone' % (sys.version[:3],), 'app')
else:
self.bdist_dir = os.path.join(bdist_base,
'python%s-standalone' % (sys.version[:3],), 'app')
if os.path.exists(self.bdist_dir):
shutil.rmtree(self.bdist_dir)
self.collect_dir = os.path.abspath(
os.path.join(self.bdist_dir, "collect"))
self.mkpath(self.collect_dir)
self.temp_dir = os.path.abspath(os.path.join(self.bdist_dir, "temp"))
self.mkpath(self.temp_dir)
self.dist_dir = os.path.abspath(self.dist_dir)
self.mkpath(self.dist_dir)
self.lib_dir = os.path.join(self.bdist_dir,
os.path.dirname(get_zipfile(self.distribution, self.semi_standalone)))
self.mkpath(self.lib_dir)
self.ext_dir = os.path.join(self.lib_dir, 'lib-dynload')
self.mkpath(self.ext_dir)
self.framework_dir = os.path.join(self.bdist_dir, 'Frameworks')
self.mkpath(self.framework_dir)
def create_binaries(self, py_files, pkgdirs, extensions, loader_files):
print("*** create binaries ***")
dist = self.distribution
pkgexts = []
copyexts = []
extmap = {}
def packagefilter(mod, pkgdirs=pkgdirs):
fn = os.path.realpath(getattr(mod, 'filename', None))
if fn is None:
return None
for pkgdir in pkgdirs:
if fn.startswith(pkgdir):
return None
return fn
if pkgdirs:
py_files = list(filter(packagefilter, py_files))
for ext in extensions:
fn = packagefilter(ext)
if fn is None:
fn = os.path.realpath(getattr(ext, 'filename', None))
pkgexts.append(ext)
else:
if '.' in ext.identifier:
py_files.append(self.create_loader(ext))
copyexts.append(ext)
extmap[fn] = ext
# byte compile the python modules into the target directory
print("*** byte compile python files ***")
byte_compile(py_files,
target_dir=self.collect_dir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
for item in py_files:
if not isinstance(item, Package): continue
self.copy_package_data(item, self.collect_dir)
self.lib_files = []
self.app_files = []
# create the shared zipfile containing all Python modules
archive_name = os.path.join(self.lib_dir,
get_zipfile(dist, self.semi_standalone))
for path, files in loader_files:
dest = os.path.join(self.collect_dir, path)
self.mkpath(dest)
for fn in files:
destfn = os.path.join(dest, os.path.basename(fn))
if os.path.isdir(fn):
self.copy_tree(fn, destfn, preserve_symlinks=False)
else:
self.copy_file(fn, destfn)
arcname = self.make_lib_archive(archive_name,
base_dir=self.collect_dir, verbose=self.verbose,
dry_run=self.dry_run)
# XXX: this doesn't work with python3
#self.lib_files.append(arcname)
# build the executables
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.extend(target.extra_scripts)
dst = self.build_executable(
target, arcname, pkgexts, copyexts, target.script, extra_scripts)
exp = os.path.join(dst, 'Contents', 'MacOS')
execdst = os.path.join(exp, 'python')
if self.semi_standalone:
self.symlink(sys.executable, execdst)
else:
if os.path.exists(os.path.join(sys.prefix, ".Python")):
fn = os.path.join(sys.prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
rest_path = os.path.normpath(sys.executable)[len(os.path.normpath(sys.prefix))+1:]
if rest_path.startswith('.'):
rest_path = rest_path[1:]
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(os.path.join(prefix, rest_path), execdst)
else:
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(sys.prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(sys.executable, execdst)
if not self.debug_skip_macholib:
if self.force_system_tk:
print("force system tk")
resdir = os.path.join(dst, 'Contents', 'Resources')
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
tkinter_path = os.path.join(ext_dir, '_tkinter.so')
if os.path.exists(tkinter_path):
rewrite_tkinter_load_commands(tkinter_path)
else:
print("tkinter not found at", tkinter_path)
mm = PythonStandalone(self, dst, executable_path=exp)
dylib, runtime = self.get_runtime()
if self.semi_standalone:
mm.excludes.append(runtime)
else:
mm.mm.run_file(runtime)
for exclude in self.dylib_excludes:
info = macholib.dyld.framework_info(exclude)
if info is not None:
exclude = os.path.join(
info['location'], info['shortname'] + '.framework')
mm.excludes.append(exclude)
for fmwk in self.frameworks:
mm.mm.run_file(fmwk)
platfiles = mm.run()
if self.strip:
platfiles = self.strip_dsym(platfiles)
self.strip_files(platfiles)
self.app_files.append(dst)
def copy_package_data(self, package, target_dir):
"""
Copy any package data in a python package into the target_dir.
This is a bit of a hack, it would be better to identify python eggs
and copy those in whole.
"""
exts = [ i[0] for i in imp.get_suffixes() ]
exts.append('.py')
exts.append('.pyc')
exts.append('.pyo')
def datafilter(item):
for e in exts:
if item.endswith(e):
return False
return True
target_dir = os.path.join(target_dir, *(package.identifier.split('.')))
for dname in package.packagepath:
filenames = list(filter(datafilter, zipio.listdir(dname)))
for fname in filenames:
if fname in ('.svn', 'CVS', '.hg', '.git'):
# Scrub revision manager junk
continue
if fname in ('__pycache__',):
# Ignore PEP 3147 bytecode cache
continue
if fname.startswith('.') and fname.endswith('.swp'):
# Ignore vim(1) temporary files
continue
if fname.endswith('~') or fname.endswith('.orig'):
# Ignore backup files for common tools (hg, emacs, ...)
continue
pth = os.path.join(dname, fname)
# Check if we have found a package, exclude those
if zipio.isdir(pth):
# XXX: the 'and not' part is wrong, need to fix zipio.isdir
for p in zipio.listdir(pth):
if p.startswith('__init__.') and p[8:] in exts:
break
else:
if os.path.isfile(pth):
# Avoid extracting a resource file that happens
# to be zipfile.
# XXX: Need API in zipio for nicer code.
copy_file(pth, os.path.join(target_dir, fname))
else:
copy_tree(pth, os.path.join(target_dir, fname))
continue
elif zipio.isdir(pth) and (
zipio.isfile(os.path.join(pth, '__init__.py'))
or zipio.isfile(os.path.join(pth, '__init__.pyc'))
or zipio.isfile(os.path.join(pth, '__init__.pyo'))):
# Subdirectory is a python package, these will get included later on
# when the subpackage itself is included, ignore for now.
pass
else:
copy_file(pth, os.path.join(target_dir, fname))
def strip_dsym(self, platfiles):
""" Remove .dSYM directories in the bundled application """
#
# .dSYM directories are contain detached debugging information and
# should be completely removed when the "strip" option is specified.
#
if self.dry_run:
return platfiles
for dirpath, dnames, fnames in os.walk(self.appdir):
for nm in list(dnames):
if nm.endswith('.dSYM'):
print("removing debug info: %s/%s"%(dirpath, nm))
shutil.rmtree(os.path.join(dirpath, nm))
dnames.remove(nm)
return [file for file in platfiles if '.dSYM' not in file]
def strip_files(self, files):
unstripped = 0
stripfiles = []
for fn in files:
unstripped += os.stat(fn).st_size
stripfiles.append(fn)
log.info('stripping %s', os.path.basename(fn))
strip_files(stripfiles, dry_run=self.dry_run, verbose=self.verbose)
stripped = 0
for fn in stripfiles:
stripped += os.stat(fn).st_size
log.info('stripping saved %d bytes (%d / %d)',
unstripped - stripped, stripped, unstripped)
def copy_dylib(self, src, dst):
# will be copied from the framework?
if src != sys.executable:
force, self.force = self.force, True
self.copy_file(src, dst)
self.force = force
return dst
def copy_versioned_framework(self, info, dst):
# XXX - Boy is this ugly, but it makes sense because the developer
# could have both Python 2.3 and 2.4, or Tk 8.4 and 8.5, etc.
# Saves a good deal of space, and I'm pretty sure this ugly
# hack is correct in the general case.
version = info['version']
if version is None:
return self.raw_copy_framework(info, dst)
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
vsplit = os.path.join(infile, 'Versions').split(os.sep)
def condition(src, vsplit=vsplit, version=version):
srcsplit = src.split(os.sep)
if (
len(srcsplit) > len(vsplit) and
srcsplit[:len(vsplit)] == vsplit and
srcsplit[len(vsplit)] != version and
not os.path.islink(src)
):
return False
# Skip Headers, .svn, and CVS dirs
return framework_copy_condition(src)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=condition)
def copy_framework(self, info, dst):
force, self.force = self.force, True
if info['shortname'] == PYTHONFRAMEWORK:
self.copy_python_framework(info, dst)
else:
self.copy_versioned_framework(info, dst)
self.force = force
return os.path.join(dst, info['name'])
def raw_copy_framework(self, info, dst):
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=framework_copy_condition)
def copy_python_framework(self, info, dst):
# XXX - In this particular case we know exactly what we can
# get away with.. should this be extended to the general
# case? Per-framework recipes?
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
indir = os.path.dirname(os.path.join(info['location'], info['name']))
outdir = os.path.dirname(os.path.join(dst, info['name']))
self.mkpath(os.path.join(outdir, 'Resources'))
pydir = 'python%s.%s'%(sys.version_info[:2])
# Create a symlink "for Python.frameworks/Versions/Current". This
# is required for the Mac App-store.
os.symlink(
os.path.basename(outdir),
os.path.join(os.path.dirname(outdir), "Current"))
# Likewise for two links in the root of the framework:
os.symlink(
'Versions/Current/Resources',
os.path.join(os.path.dirname(os.path.dirname(outdir)), 'Resources'))
os.symlink(
os.path.join('Versions/Current', PYTHONFRAMEWORK),
os.path.join(os.path.dirname(os.path.dirname(outdir)), PYTHONFRAMEWORK))
# Experiment for issue 57
if not os.path.exists(os.path.join(indir, 'include')):
alt = os.path.join(indir, 'Versions/Current')
if os.path.exists(os.path.join(alt, 'include')):
indir = alt
# distutils looks for some files relative to sys.executable, which
# means they have to be in the framework...
self.mkpath(os.path.join(outdir, 'include'))
self.mkpath(os.path.join(outdir, 'include', includedir))
self.mkpath(os.path.join(outdir, 'lib'))
self.mkpath(os.path.join(outdir, 'lib', pydir))
self.mkpath(os.path.join(outdir, 'lib', pydir, configdir))
fmwkfiles = [
os.path.basename(info['name']),
'Resources/Info.plist',
'include/%s/pyconfig.h'%(includedir),
]
if '_sysconfigdata' not in sys.modules:
fmwkfiles.append(
'lib/%s/%s/Makefile'%(pydir, configdir)
)
for fn in fmwkfiles:
self.copy_file(
os.path.join(indir, fn),
os.path.join(outdir, fn))
def fixup_distribution(self):
dist = self.distribution
# Trying to obtain app and plugin from dist for backward compatibility
# reasons.
app = dist.app
plugin = dist.plugin
# If we can get suitable values from self.app and self.plugin, we prefer
# them.
if self.app is not None or self.plugin is not None:
app = self.app
plugin = self.plugin
# Convert our args into target objects.
dist.app = FixupTargets(app, "script")
dist.plugin = FixupTargets(plugin, "script")
if dist.app and dist.plugin:
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
elif dist.app:
self.style = 'app'
self.targets = dist.app
elif dist.plugin:
self.style = 'plugin'
self.targets = dist.plugin
else:
raise DistutilsOptionError(
"You must specify either app or plugin")
if len(self.targets) != 1:
# XXX - support multiple targets?
raise DistutilsOptionError(
"Multiple targets not currently supported")
if not self.extension:
self.extension = '.' + self.style
# make sure all targets use the same directory, this is
# also the directory where the pythonXX.dylib must reside
paths = set()
for target in self.targets:
paths.add(os.path.dirname(target.get_dest_base()))
if len(paths) > 1:
raise DistutilsOptionError(
"all targets must use the same directory: %s" %
([p for p in paths],))
if paths:
app_dir = paths.pop() # the only element
if os.path.isabs(app_dir):
raise DistutilsOptionError(
"app directory must be relative: %s" % (app_dir,))
self.app_dir = os.path.join(self.dist_dir, app_dir)
self.mkpath(self.app_dir)
else:
# Do we allow to specify no targets?
# We can at least build a zipfile...
self.app_dir = self.lib_dir
def initialize_prescripts(self):
prescripts = []
prescripts.append('reset_sys_path')
if self.semi_standalone:
prescripts.append('semi_standalone_path')
if 0 and sys.version_info[:2] >= (3, 2) and not self.alias:
# Python 3.2 or later requires a more complicated
# bootstrap
prescripts.append('import_encodings')
if os.path.exists(os.path.join(sys.prefix, ".Python")):
# We're in a virtualenv, which means sys.path
# will be broken in alias builds unless we fix
# it.
if self.alias or self.semi_standalone:
prescripts.append("virtualenv")
prescripts.append(StringIO('_fixup_virtualenv(%r)' % (sys.real_prefix,)))
if self.site_packages or self.alias:
import site
global_site_packages = not os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt'))
prescripts.append('virtualenv_site_packages')
prescripts.append(StringIO('_site_packages(%r, %r, %d)' % (
sys.prefix, sys.real_prefix, global_site_packages)))
elif self.site_packages or self.alias:
prescripts.append('site_packages')
if is_system():
prescripts.append('system_path_extras')
#if self.style == 'app':
# prescripts.append('setup_pkgresource')
included_subpkg = [pkg for pkg in self.packages if '.' in pkg]
if included_subpkg:
prescripts.append('setup_included_subpackages')
prescripts.append(StringIO('_path_hooks = %r'%(
included_subpkg)))
if self.emulate_shell_environment:
prescripts.append('emulate_shell_environment')
if self.argv_emulation and self.style == 'app':
prescripts.append('argv_emulation')
if 'CFBundleDocumentTypes' not in self.plist:
self.plist['CFBundleDocumentTypes'] = [
{
'CFBundleTypeOSTypes' : [
'****',
'fold',
'disk',
],
'CFBundleTypeRole': 'Viewer'
},
]
if self.argv_inject is not None:
prescripts.append('argv_inject')
prescripts.append(
StringIO('_argv_inject(%r)\n' % (self.argv_inject,)))
if self.style == 'app' and not self.no_chdir:
prescripts.append('chdir_resource')
if not self.alias:
prescripts.append('disable_linecache')
prescripts.append('boot_' + self.style)
else:
# Add ctypes prescript because it is needed to
# find libraries in the bundle, but we don't run
# recipes and hence the ctypes recipe is not used
# for alias builds.
prescripts.append('ctypes_setup')
if self.additional_paths:
prescripts.append('path_inject')
prescripts.append(
StringIO('_path_inject(%r)\n' % (self.additional_paths,)))
prescripts.append('boot_alias' + self.style)
newprescripts = []
for s in prescripts:
if isinstance(s, basestring):
newprescripts.append(
self.get_bootstrap('py2app.bootstrap.' + s))
else:
newprescripts.append(s)
for target in self.targets:
prescripts = getattr(target, 'prescripts', [])
target.prescripts = newprescripts + prescripts
def get_bootstrap(self, bootstrap):
if isinstance(bootstrap, basestring):
if not os.path.exists(bootstrap):
bootstrap = imp_find_module(bootstrap)[1]
return bootstrap
def get_bootstrap_data(self, bootstrap):
bootstrap = self.get_bootstrap(bootstrap)
if not isinstance(bootstrap, basestring):
return bootstrap.getvalue()
else:
with open(bootstrap, 'rU') as fp:
return fp.read()
def create_pluginbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating plugin bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
appdir, plist = create_pluginbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_appbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating application bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
pythonInfo = self.plist.setdefault('PythonInfoDict', {})
py2appInfo = pythonInfo.setdefault('py2app', {}).update(dict(
alias=bool(self.alias),
))
appdir, plist = create_appbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_bundle(self, target, script, use_runtime_preference=True):
fn = getattr(self, 'create_%sbundle' % (self.style,))
return fn(
target,
script,
use_runtime_preference=use_runtime_preference
)
def iter_frameworks(self):
for fn in self.frameworks:
fmwk = macholib.dyld.framework_info(fn)
if fmwk is None:
yield fn
else:
basename = fmwk['shortname'] + '.framework'
yield os.path.join(fmwk['location'], basename)
def build_alias_executable(self, target, script, extra_scripts):
# Build an alias executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
# symlink python executable
execdst = os.path.join(appdir, 'Contents', 'MacOS', 'python')
prefixPathExecutable = os.path.join(sys.prefix, 'bin', 'python')
if os.path.exists(prefixPathExecutable):
pyExecutable = prefixPathExecutable
else:
pyExecutable = sys.executable
self.symlink(pyExecutable, execdst)
# make PYTHONHOME
pyhome = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
makedirs(pyhome)
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pyhome, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pyhome, 'site.pyc'))
self.symlink(
os.path.join(realhome, 'config'),
os.path.join(pyhome, 'config'))
# symlink data files
# XXX: fixme: need to integrate automatic data conversion
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run, symlink=1)
except:
import traceback
traceback.print_exc()
raise
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run)
except:
import traceback
traceback.print_exc()
raise
# symlink frameworks
for src in self.iter_frameworks():
dest = os.path.join(
appdir, 'Contents', 'Frameworks', os.path.basename(src))
if src == dest:
continue
makedirs(os.path.dirname(dest))
self.symlink(os.path.abspath(src), dest)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.realpath(script),))
script_map = {}
for fn in extra_scripts:
tgt = os.path.realpath(fn)
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = tgt
elif fn.endswith('.py'):
script_map[fn[:-4]] = tgt
else:
script_map[fn] = tgt
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('try:\n')
bootfile.write(' _run()\n')
bootfile.write('except KeyboardInterrupt:\n')
bootfile.write(' pass\n')
bootfile.close()
target.appdir = appdir
return appdir
def build_executable(self, target, arcname, pkgexts, copyexts, script, extra_scripts):
# Build an executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
self.appdir = appdir
self.resdir = resdir
self.plist = plist
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(self.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
site_path = os.path.join(resdir, 'site.py')
byte_compile([
SourceModule('site', site_path),
],
target_dir=resdir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
if not self.dry_run:
os.unlink(site_path)
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.basename(script),))
script_map = {}
for fn in extra_scripts:
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = fn
elif fn.endswith('.py'):
script_map[fn[:-4]] = fn
else:
script_map[fn] = fn
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('_run()\n')
bootfile.close()
self.copy_file(script, resdir)
for fn in extra_scripts:
self.copy_file(fn, resdir)
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
if sys.version_info[0] == 2 or self.semi_standalone:
arcdir = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
else:
arcdir = os.path.join(resdir, 'lib')
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
self.mkpath(pydir)
# The site.py file needs to be a two locations
# 1) in lib/pythonX.Y, to be found during normal startup and
# by the 'python' executable
# 2) in the resources directory next to the script for
# semistandalone builds (the lib/pythonX.Y directory is too
# late on sys.path to be found in that case).
#
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pydir, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pydir, 'site.pyc'))
cfgdir = os.path.join(pydir, configdir)
realcfg = os.path.join(realhome, configdir)
real_include = os.path.join(sys.prefix, 'include')
if self.semi_standalone:
self.symlink(realcfg, cfgdir)
self.symlink(real_include, os.path.join(resdir, 'include'))
else:
self.mkpath(cfgdir)
if '_sysconfigdata' not in sys.modules:
# Recent enough versions of Python 2.7 and 3.x have
# an _sysconfigdata module and don't need the Makefile
# to provide the sysconfig data interface. Don't copy
# them.
for fn in 'Makefile', 'Setup', 'Setup.local', 'Setup.config':
rfn = os.path.join(realcfg, fn)
if os.path.exists(rfn):
self.copy_file(rfn, os.path.join(cfgdir, fn))
inc_dir = os.path.join(resdir, 'include', includedir)
self.mkpath(inc_dir)
self.copy_file(get_config_h_filename(),
os.path.join(inc_dir, 'pyconfig.h'))
self.copy_file(arcname, arcdir)
if sys.version_info[0] != 2:
import zlib
self.copy_file(zlib.__file__, os.path.dirname(arcdir))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
self.copy_tree(self.ext_dir, ext_dir, preserve_symlinks=True)
self.copy_tree(self.framework_dir,
os.path.join(appdir, 'Contents', 'Frameworks'),
preserve_symlinks=True)
for pkg_name in self.packages:
pkg = self.get_bootstrap(pkg_name)
print('XXXX', pkg_name, pkg)
if self.semi_standalone:
# For semi-standalone builds don't copy packages
# from the stdlib into the app bundle, even when
# they are mentioned in self.packages.
p = Package(pkg_name, pkg)
if not not_stdlib_filter(p):
continue
dst = os.path.join(pydir, pkg_name)
self.mkpath(dst)
self.copy_tree(pkg, dst)
# FIXME: The python files should be bytecompiled
# here (see issue 101)
for copyext in copyexts:
fn = os.path.join(ext_dir,
(copyext.identifier.replace('.', os.sep) +
os.path.splitext(copyext.filename)[1])
)
self.mkpath(os.path.dirname(fn))
copy_file(copyext.filename, fn, dry_run=self.dry_run)
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
target.appdir = appdir
return appdir
def create_loader(self, item):
# Hm, how to avoid needless recreation of this file?
slashname = item.identifier.replace('.', os.sep)
pathname = os.path.join(self.temp_dir, "%s.py" % slashname)
if os.path.exists(pathname):
if self.verbose:
print("skipping python loader for extension %r"
% (item.identifier,))
else:
self.mkpath(os.path.dirname(pathname))
# and what about dry_run?
if self.verbose:
print("creating python loader for extension %r"
% (item.identifier,))
fname = slashname + os.path.splitext(item.filename)[1]
source = make_loader(fname)
if not self.dry_run:
with open(pathname, "w") as fp:
fp.write(source)
else:
return
return SourceModule(item.identifier, pathname)
def make_lib_archive(self, zip_filename, base_dir, verbose=0,
dry_run=0):
# Like distutils "make_archive", except we can specify the
# compression to use - default is ZIP_STORED to keep the
# runtime performance up.
# Also, we don't append '.zip' to the filename.
from distutils.dir_util import mkpath
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
if self.compressed:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, "w",
compression=compression)
save_cwd = os.getcwd()
os.chdir(base_dir)
for dirpath, dirnames, filenames in os.walk('.'):
if filenames:
# Ensure that there are directory entries for
# all directories in the zipfile. This is a
# workaround for <http://bugs.python.org/issue14905>:
# zipimport won't consider 'pkg/foo.py' to be in
# namespace package 'pkg' unless there is an
# entry for the directory (or there is a
# pkg/__init__.py file as well)
z.write(dirpath, dirpath)
for fn in filenames:
path = os.path.normpath(os.path.join(dirpath, fn))
if os.path.isfile(path):
z.write(path, path)
os.chdir(save_cwd)
z.close()
return zip_filename
def copy_tree(self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1, condition=None):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
This version doesn't bork on existing symlinks
"""
return copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run,
condition=condition)
| apache-2.0 |
boundlessgeo/PDAL | vendor/gtest-1.7.0/test/gtest_xml_test_utils.py | 1815 | 8876 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| bsd-3-clause |
kamalx/edx-platform | lms/djangoapps/discussion_api/tests/test_api.py | 1 | 90651 | """
Tests for Discussion API internal interface
"""
from datetime import datetime, timedelta
import itertools
from urlparse import parse_qs, urlparse, urlunparse
from urllib import urlencode
import ddt
import httpretty
import mock
from pytz import UTC
from django.core.exceptions import ValidationError
from django.http import Http404
from django.test.client import RequestFactory
from rest_framework.exceptions import PermissionDenied
from opaque_keys.edx.locator import CourseLocator
from courseware.tests.factories import BetaTesterFactory, StaffFactory
from discussion_api.api import (
create_comment,
create_thread,
delete_comment,
delete_thread,
get_comment_list,
get_course,
get_course_topics,
get_thread_list,
update_comment,
update_thread,
)
from discussion_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread,
)
from django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role,
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
def _remove_discussion_tab(course, user_id):
"""
Remove the discussion tab for the course.
user_id is passed to the modulestore as the editor of the module.
"""
course.tabs = [tab for tab in course.tabs if not tab.type == 'discussion']
modulestore().update_item(course, user_id)
@ddt.ddt
class GetCourseTest(UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTest, self).setUp()
self.course = CourseFactory.create(org="x", course="y", run="z")
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_course(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(Http404):
get_course(self.request, self.course.id)
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
get_course(self.request, self.course.id)
def test_basic(self):
self.assertEqual(
get_course(self.request, self.course.id),
{
"id": unicode(self.course.id),
"blackouts": [],
"thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz",
"topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z",
}
)
def test_blackout(self):
# A variety of formats is accepted
self.course.discussion_blackouts = [
["2015-06-09T00:00:00Z", "6-10-15"],
[1433980800000, datetime(2015, 6, 12)],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(
result["blackouts"],
[
{"start": "2015-06-09T00:00:00+00:00", "end": "2015-06-10T00:00:00+00:00"},
{"start": "2015-06-11T00:00:00+00:00", "end": "2015-06-12T00:00:00+00:00"},
]
)
@ddt.data(None, "not a datetime", "2015", [])
def test_blackout_errors(self, bad_value):
self.course.discussion_blackouts = [
[bad_value, "2015-06-09T00:00:00Z"],
["2015-06-10T00:00:00Z", "2015-06-11T00:00:00Z"],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(result["blackouts"], [])
@mock.patch.dict("django.conf.settings.FEATURES", {"DISABLE_START_DATES": False})
class GetCourseTopicsTest(UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course_topics"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTopicsTest, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.partition = UserPartition(
0,
"partition",
"Test Partition",
[Group(0, "Cohort A"), Group(1, "Cohort B")],
scheme_id="cohort"
)
self.course = CourseFactory.create(
org="x",
course="y",
run="z",
start=datetime.now(UTC),
discussion_topics={"Test Topic": {"id": "non-courseware-topic-id"}},
user_partitions=[self.partition],
cohort_config={"cohorted": True},
days_early_for_beta=3
)
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def make_discussion_module(self, topic_id, category, subcategory, **kwargs):
"""Build a discussion module in self.course"""
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_id,
discussion_category=category,
discussion_target=subcategory,
**kwargs
)
def get_thread_list_url(self, topic_id_list):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = "http://testserver/api/discussion/v1/threads/"
query_list = [("course_id", unicode(self.course.id))] + [("topic_id", topic_id) for topic_id in topic_id_list]
return urlunparse(("", "", path, "", urlencode(query_list), ""))
def get_course_topics(self):
"""
Get course topics for self.course, using the given user or self.user if
not provided, and generating absolute URIs with a test scheme/host.
"""
return get_course_topics(self.request, self.course.id)
def make_expected_tree(self, topic_id, name, children=None):
"""
Build an expected result tree given a topic id, display name, and
children
"""
topic_id_list = [topic_id] if topic_id else [child["id"] for child in children]
children = children or []
node = {
"id": topic_id,
"name": name,
"children": children,
"thread_list_url": self.get_thread_list_url(topic_id_list)
}
return node
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_course_topics(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(Http404):
self.get_course_topics()
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
self.get_course_topics()
def test_without_courseware(self):
actual = self.get_course_topics()
expected = {
"courseware_topics": [],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_with_courseware(self):
self.make_discussion_module("courseware-topic-id", "Foo", "Bar")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"Foo",
[self.make_expected_tree("courseware-topic-id", "Bar")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_many(self):
self.course.discussion_topics = {
"A": {"id": "non-courseware-1"},
"B": {"id": "non-courseware-2"},
}
modulestore().update_item(self.course, self.user.id)
self.make_discussion_module("courseware-1", "A", "1")
self.make_discussion_module("courseware-2", "A", "2")
self.make_discussion_module("courseware-3", "B", "1")
self.make_discussion_module("courseware-4", "B", "2")
self.make_discussion_module("courseware-5", "C", "1")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"A",
[
self.make_expected_tree("courseware-1", "1"),
self.make_expected_tree("courseware-2", "2"),
]
),
self.make_expected_tree(
None,
"B",
[
self.make_expected_tree("courseware-3", "1"),
self.make_expected_tree("courseware-4", "2"),
]
),
self.make_expected_tree(
None,
"C",
[self.make_expected_tree("courseware-5", "1")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-1", "A"),
self.make_expected_tree("non-courseware-2", "B"),
],
}
self.assertEqual(actual, expected)
def test_sort_key(self):
self.course.discussion_topics = {
"W": {"id": "non-courseware-1", "sort_key": "Z"},
"X": {"id": "non-courseware-2"},
"Y": {"id": "non-courseware-3", "sort_key": "Y"},
"Z": {"id": "non-courseware-4", "sort_key": "W"},
}
modulestore().update_item(self.course, self.user.id)
self.make_discussion_module("courseware-1", "First", "A", sort_key="D")
self.make_discussion_module("courseware-2", "First", "B", sort_key="B")
self.make_discussion_module("courseware-3", "First", "C", sort_key="E")
self.make_discussion_module("courseware-4", "Second", "A", sort_key="F")
self.make_discussion_module("courseware-5", "Second", "B", sort_key="G")
self.make_discussion_module("courseware-6", "Second", "C")
self.make_discussion_module("courseware-7", "Second", "D", sort_key="A")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "B"),
self.make_expected_tree("courseware-1", "A"),
self.make_expected_tree("courseware-3", "C"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-7", "D"),
self.make_expected_tree("courseware-6", "C"),
self.make_expected_tree("courseware-4", "A"),
self.make_expected_tree("courseware-5", "B"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-4", "Z"),
self.make_expected_tree("non-courseware-2", "X"),
self.make_expected_tree("non-courseware-3", "Y"),
self.make_expected_tree("non-courseware-1", "W"),
],
}
self.assertEqual(actual, expected)
def test_access_control(self):
"""
Test that only topics that a user has access to are returned. The
ways in which a user may not have access are:
* Module is visible to staff only
* Module has a start date in the future
* Module is accessible only to a group the user is not in
Also, there is a case that ensures that a category with no accessible
subcategories does not appear in the result.
"""
beta_tester = BetaTesterFactory.create(course_key=self.course.id)
CourseEnrollmentFactory.create(user=beta_tester, course_id=self.course.id)
staff = StaffFactory.create(course_key=self.course.id)
for user, group_idx in [(self.user, 0), (beta_tester, 1)]:
cohort = CohortFactory.create(
course_id=self.course.id,
name=self.partition.groups[group_idx].name,
users=[user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=cohort,
partition_id=self.partition.id,
group_id=self.partition.groups[group_idx].id
)
self.make_discussion_module("courseware-1", "First", "Everybody")
self.make_discussion_module(
"courseware-2",
"First",
"Cohort A",
group_access={self.partition.id: [self.partition.groups[0].id]}
)
self.make_discussion_module(
"courseware-3",
"First",
"Cohort B",
group_access={self.partition.id: [self.partition.groups[1].id]}
)
self.make_discussion_module("courseware-4", "Second", "Staff Only", visible_to_staff_only=True)
self.make_discussion_module(
"courseware-5",
"Second",
"Future Start Date",
start=datetime.now(UTC) + timedelta(days=1)
)
student_actual = self.get_course_topics()
student_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(student_actual, student_expected)
self.request.user = beta_tester
beta_actual = self.get_course_topics()
beta_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[self.make_expected_tree("courseware-5", "Future Start Date")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(beta_actual, beta_expected)
self.request.user = staff
staff_actual = self.get_course_topics()
staff_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-5", "Future Start Date"),
self.make_expected_tree("courseware-4", "Staff Only"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(staff_actual, staff_expected)
@ddt.ddt
class GetThreadListTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Test for get_thread_list"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetThreadListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
self.cohort = CohortFactory.create(course_id=self.course.id)
def get_thread_list(
self,
threads,
page=1,
page_size=1,
num_pages=1,
course=None,
topic_id_list=None,
):
"""
Register the appropriate comments service response, then call
get_thread_list and return the result.
"""
course = course or self.course
self.register_get_threads_response(threads, page, num_pages)
ret = get_thread_list(self.request, course.id, page, page_size, topic_id_list)
return ret
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_thread_list(self.request, CourseLocator.from_string("non/existent/course"), 1, 1)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
self.get_thread_list([])
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
self.get_thread_list([])
def test_empty(self):
self.assertEqual(
self.get_thread_list([]),
{
"results": [],
"next": None,
"previous": None,
"text_search_rewrite": None,
}
)
def test_get_threads_by_topic_id(self):
self.get_thread_list([], topic_id_list=["topic_x", "topic_meow"])
self.assertEqual(urlparse(httpretty.last_request().path).path, "/api/v1/threads")
self.assert_last_query_params({
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["1"],
"recursive": ["False"],
"commentable_ids": ["topic_x,topic_meow"]
})
def test_basic_query_params(self):
self.get_thread_list([], page=6, page_size=14)
self.assert_last_query_params({
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["6"],
"per_page": ["14"],
"recursive": ["False"],
})
def test_thread_content(self):
source_threads = [
{
"type": "thread",
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"commentable_id": "topic_x",
"group_id": None,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"thread_type": "discussion",
"title": "Test Title",
"body": "Test body",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
},
{
"type": "thread",
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"commentable_id": "topic_y",
"group_id": self.cohort.id,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"thread_type": "question",
"title": "Another Test Title",
"body": "More content",
"pinned": False,
"closed": True,
"abuse_flaggers": [],
"votes": {"up_count": 9},
"comments_count": 18,
"unread_comments_count": 0,
},
]
expected_threads = [
{
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"topic_id": "topic_x",
"group_id": None,
"group_name": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"comment_count": 5,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_0",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["following", "voted"],
},
{
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"topic_id": "topic_y",
"group_id": self.cohort.id,
"group_name": self.cohort.name,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"type": "question",
"title": "Another Test Title",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"pinned": False,
"closed": True,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 9,
"comment_count": 18,
"unread_comment_count": 0,
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=False"
),
"editable_fields": ["following", "voted"],
},
]
self.assertEqual(
self.get_thread_list(source_threads),
{
"results": expected_threads,
"next": None,
"previous": None,
"text_search_rewrite": None,
}
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_request_group(self, role_name, course_is_cohorted):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.get_thread_list([], course=cohort_course)
actual_has_group = "group_id" in httpretty.last_request().querystring
expected_has_group = (course_is_cohorted and role_name == FORUM_ROLE_STUDENT)
self.assertEqual(actual_has_group, expected_has_group)
def test_pagination(self):
# N.B. Empty thread list is not realistic but convenient for this test
self.assertEqual(
self.get_thread_list([], page=1, num_pages=3),
{
"results": [],
"next": "http://testserver/test_path?page=2",
"previous": None,
"text_search_rewrite": None,
}
)
self.assertEqual(
self.get_thread_list([], page=2, num_pages=3),
{
"results": [],
"next": "http://testserver/test_path?page=3",
"previous": "http://testserver/test_path?page=1",
"text_search_rewrite": None,
}
)
self.assertEqual(
self.get_thread_list([], page=3, num_pages=3),
{
"results": [],
"next": None,
"previous": "http://testserver/test_path?page=2",
"text_search_rewrite": None,
}
)
# Test page past the last one
self.register_get_threads_response([], page=3, num_pages=3)
with self.assertRaises(Http404):
get_thread_list(self.request, self.course.id, page=4, page_size=10)
@ddt.data(None, "rewritten search string")
def test_text_search(self, text_search_rewrite):
self.register_get_threads_search_response([], text_search_rewrite)
self.assertEqual(
get_thread_list(
self.request,
self.course.id,
page=1,
page_size=10,
text_search="test search string"
),
{
"results": [],
"next": None,
"previous": None,
"text_search_rewrite": text_search_rewrite,
}
)
self.assert_last_query_params({
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["10"],
"recursive": ["False"],
"text": ["test search string"],
})
@ddt.ddt
class GetCommentListTest(CommentsServiceMockMixin, ModuleStoreTestCase):
"""Test for get_comment_list"""
def setUp(self):
super(GetCommentListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
def make_minimal_cs_thread(self, overrides=None):
"""
Create a thread with the given overrides, plus the course_id if not
already in overrides.
"""
overrides = overrides.copy() if overrides else {}
overrides.setdefault("course_id", unicode(self.course.id))
return make_minimal_cs_thread(overrides)
def get_comment_list(self, thread, endorsed=None, page=1, page_size=1):
"""
Register the appropriate comments service response, then call
get_comment_list and return the result.
"""
self.register_get_thread_response(thread)
return get_comment_list(self.request, thread["id"], endorsed, page, page_size)
def test_nonexistent_thread(self):
thread_id = "nonexistent_thread"
self.register_get_thread_error_response(thread_id, 404)
with self.assertRaises(Http404):
get_comment_list(self.request, thread_id, endorsed=False, page=1, page_size=1)
def test_nonexistent_course(self):
with self.assertRaises(Http404):
self.get_comment_list(self.make_minimal_cs_thread({"course_id": "non/existent/course"}))
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
self.get_comment_list(self.make_minimal_cs_thread())
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
self.get_comment_list(self.make_minimal_cs_thread())
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(
self,
role_name,
course_is_cohorted,
topic_is_cohorted,
thread_group_state
):
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
thread = self.make_minimal_cs_thread({
"course_id": unicode(cohort_course.id),
"commentable_id": "test_topic",
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
topic_is_cohorted and
thread_group_state == "different_group"
)
try:
self.get_comment_list(thread)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(True, False)
def test_discussion_endorsed(self, endorsed_value):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "discussion"}),
endorsed=endorsed_value
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field may not be specified for discussion threads."]}
)
def test_question_without_endorsed(self):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "question"}),
endorsed=None
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field is required for question threads."]}
)
def test_empty(self):
discussion_thread = self.make_minimal_cs_thread(
{"thread_type": "discussion", "children": [], "resp_total": 0}
)
self.assertEqual(
self.get_comment_list(discussion_thread),
{"results": [], "next": None, "previous": None}
)
question_thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [],
"non_endorsed_responses": [],
"non_endorsed_resp_total": 0
})
self.assertEqual(
self.get_comment_list(question_thread, endorsed=False),
{"results": [], "next": None, "previous": None}
)
self.assertEqual(
self.get_comment_list(question_thread, endorsed=True),
{"results": [], "next": None, "previous": None}
)
def test_basic_query_params(self):
self.get_comment_list(
self.make_minimal_cs_thread({
"children": [make_minimal_cs_comment()],
"resp_total": 71
}),
page=6,
page_size=14
)
self.assert_query_params_equal(
httpretty.httpretty.latest_requests[-2],
{
"recursive": ["True"],
"user_id": [str(self.user.id)],
"mark_as_read": ["True"],
"resp_skip": ["70"],
"resp_limit": ["14"],
}
)
def test_discussion_content(self):
source_comments = [
{
"type": "comment",
"id": "test_comment_1",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"children": [],
},
{
"type": "comment",
"id": "test_comment_2",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": True,
"anonymous_to_peers": False,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"body": "More content",
"endorsed": False,
"abuse_flaggers": [str(self.user.id)],
"votes": {"up_count": 7},
"children": [],
}
]
expected_comments = [
{
"id": "test_comment_1",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"children": [],
"editable_fields": ["voted"],
},
{
"id": "test_comment_2",
"thread_id": "test_thread",
"parent_id": None,
"author": None,
"author_label": None,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": True,
"voted": False,
"vote_count": 7,
"children": [],
"editable_fields": ["voted"],
},
]
actual_comments = self.get_comment_list(
self.make_minimal_cs_thread({"children": source_comments})
)["results"]
self.assertEqual(actual_comments, expected_comments)
def test_question_content(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({"id": "endorsed_comment"})],
"non_endorsed_responses": [make_minimal_cs_comment({"id": "non_endorsed_comment"})],
"non_endorsed_resp_total": 1,
})
endorsed_actual = self.get_comment_list(thread, endorsed=True)
self.assertEqual(endorsed_actual["results"][0]["id"], "endorsed_comment")
non_endorsed_actual = self.get_comment_list(thread, endorsed=False)
self.assertEqual(non_endorsed_actual["results"][0]["id"], "non_endorsed_comment")
def test_endorsed_by_anonymity(self):
"""
Ensure thread anonymity is properly considered in serializing
endorsed_by.
"""
thread = self.make_minimal_cs_thread({
"anonymous": True,
"children": [
make_minimal_cs_comment({
"endorsement": {"user_id": str(self.author.id), "time": "2015-05-18T12:34:56Z"}
})
]
})
actual_comments = self.get_comment_list(thread)["results"]
self.assertIsNone(actual_comments[0]["endorsed_by"])
@ddt.data(
("discussion", None, "children", "resp_total"),
("question", False, "non_endorsed_responses", "non_endorsed_resp_total"),
)
@ddt.unpack
def test_cs_pagination(self, thread_type, endorsed_arg, response_field, response_total_field):
"""
Test cases in which pagination is done by the comments service.
thread_type is the type of thread (question or discussion).
endorsed_arg is the value of the endorsed argument.
repsonse_field is the field in which responses are returned for the
given thread type.
response_total_field is the field in which the total number of responses
is returned for the given thread type.
"""
# N.B. The mismatch between the number of children and the listed total
# number of responses is unrealistic but convenient for this test
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [make_minimal_cs_comment()],
response_total_field: 5,
})
# Only page
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=5)
self.assertIsNone(actual["next"])
self.assertIsNone(actual["previous"])
# First page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=2)
self.assertEqual(actual["next"], "http://testserver/test_path?page=2")
self.assertIsNone(actual["previous"])
# Middle page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=2)
self.assertEqual(actual["next"], "http://testserver/test_path?page=3")
self.assertEqual(actual["previous"], "http://testserver/test_path?page=1")
# Last page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=3, page_size=2)
self.assertIsNone(actual["next"])
self.assertEqual(actual["previous"], "http://testserver/test_path?page=2")
# Page past the end
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [],
response_total_field: 5
})
with self.assertRaises(Http404):
self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=5)
def test_question_endorsed_pagination(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [
make_minimal_cs_comment({"id": "comment_{}".format(i)}) for i in range(10)
]
})
def assert_page_correct(page, page_size, expected_start, expected_stop, expected_next, expected_prev):
"""
Check that requesting the given page/page_size returns the expected
output
"""
actual = self.get_comment_list(thread, endorsed=True, page=page, page_size=page_size)
result_ids = [result["id"] for result in actual["results"]]
self.assertEqual(
result_ids,
["comment_{}".format(i) for i in range(expected_start, expected_stop)]
)
self.assertEqual(
actual["next"],
"http://testserver/test_path?page={}".format(expected_next) if expected_next else None
)
self.assertEqual(
actual["previous"],
"http://testserver/test_path?page={}".format(expected_prev) if expected_prev else None
)
# Only page
assert_page_correct(
page=1,
page_size=10,
expected_start=0,
expected_stop=10,
expected_next=None,
expected_prev=None
)
# First page of many
assert_page_correct(
page=1,
page_size=4,
expected_start=0,
expected_stop=4,
expected_next=2,
expected_prev=None
)
# Middle page of many
assert_page_correct(
page=2,
page_size=4,
expected_start=4,
expected_stop=8,
expected_next=3,
expected_prev=1
)
# Last page of many
assert_page_correct(
page=3,
page_size=4,
expected_start=8,
expected_stop=10,
expected_next=None,
expected_prev=2
)
# Page past the end
with self.assertRaises(Http404):
self.get_comment_list(thread, endorsed=True, page=2, page_size=10)
class CreateThreadTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for create_thread"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.minimal_data = {
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
@mock.patch("eventtracking.tracker.emit")
def test_basic(self, mock_emit):
self.register_post_thread_response({
"id": "test_id",
"username": self.user.username,
"created_at": "2015-05-19T00:00:00Z",
"updated_at": "2015-05-19T00:00:00Z",
})
actual = create_thread(self.request, self.minimal_data)
expected = {
"id": "test_id",
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-19T00:00:00Z",
"updated_at": "2015-05-19T00:00:00Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"comment_count": 0,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_id",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["following", "raw_body", "title", "topic_id", "type", "voted"],
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": "Test Title",
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
def test_following(self):
self.register_post_thread_response({"id": "test_id"})
self.register_subscription_response(self.user)
data = self.minimal_data.copy()
data["following"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["following"], True)
cs_request = httpretty.last_request()
self.assertEqual(
urlparse(cs_request.path).path,
"/api/v1/users/{}/subscriptions".format(self.user.id)
)
self.assertEqual(cs_request.method, "POST")
self.assertEqual(
cs_request.parsed_body,
{"source_type": ["thread"], "source_id": ["test_id"]}
)
def test_voted(self):
self.register_post_thread_response({"id": "test_id"})
self.register_thread_votes_response("test_id")
data = self.minimal_data.copy()
data["voted"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_course_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["This field is required."]})
def test_course_id_invalid(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "invalid!"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_nonexistent_course(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "non/existent/course"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_invalid_field(self):
data = self.minimal_data.copy()
data["type"] = "invalid_type"
with self.assertRaises(ValidationError):
create_thread(self.request, data)
@ddt.ddt
class CreateCommentTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for create_comment"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
})
)
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
@ddt.data(None, "test_parent")
@mock.patch("eventtracking.tracker.emit")
def test_success(self, parent_id, mock_emit):
if parent_id:
self.register_get_comment_response({"id": parent_id, "thread_id": "test_thread"})
self.register_post_comment_response(
{
"id": "test_comment",
"username": self.user.username,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
},
thread_id="test_thread",
parent_id=parent_id
)
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
actual = create_comment(self.request, data)
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["raw_body", "voted"]
}
self.assertEqual(actual, expected)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
expected_url
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)]
}
)
expected_event_name = (
"edx.forum.comment.created" if parent_id else
"edx.forum.response.created"
)
expected_event_data = {
"discussion": {"id": "test_thread"},
"commentable_id": "test_topic",
"options": {"followed": False},
"id": "test_comment",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
if parent_id:
expected_event_data["response"] = {"id": parent_id}
actual_event_name, actual_event_data = mock_emit.call_args[0]
self.assertEqual(actual_event_name, expected_event_name)
self.assertEqual(actual_event_data, expected_event_data)
def test_voted(self):
self.register_post_comment_response({"id": "test_comment"}, "test_thread")
self.register_comment_votes_response("test_comment")
data = self.minimal_data.copy()
data["voted"] = "True"
result = create_comment(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_thread_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["This field is required."]})
def test_thread_id_not_found(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_nonexistent_course(self):
self.register_get_thread_response(
make_minimal_cs_thread({"id": "test_thread", "course_id": "non/existent/course"})
)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_get_thread_response(make_minimal_cs_thread({
"id": "cohort_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}))
self.register_post_comment_response({}, thread_id="cohort_thread")
data = self.minimal_data.copy()
data["thread_id"] = "cohort_thread"
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
create_comment(self.request, data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"thread_id": ["Invalid value."]}
)
def test_invalid_field(self):
data = self.minimal_data.copy()
del data["raw_body"]
with self.assertRaises(ValidationError):
create_comment(self.request, data)
@ddt.ddt
class UpdateThreadTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for update_thread"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "original_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_put_thread_response(cs_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
# Ensure that the default following value of False is not applied implicitly
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_thread()
update_thread(self.request, "test_thread", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
def test_basic(self):
self.register_thread()
actual = update_thread(self.request, "test_thread", {"raw_body": "Edited body"})
expected = {
"id": "test_thread",
"course_id": unicode(self.course.id),
"topic_id": "original_topic",
"group_id": None,
"group_name": None,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"type": "discussion",
"title": "Original Title",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"comment_count": 0,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["following", "raw_body", "title", "topic_id", "type", "voted"],
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Edited body"],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
}
)
def test_nonexistent_thread(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
self.register_thread()
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_thread(self.request, "test_thread", {})
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_author_only_fields(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
data = {field: "edited" for field in ["topic_id", "title", "raw_body"]}
data["type"] = "question"
expected_error = role_name == FORUM_ROLE_STUDENT
try:
update_thread(self.request, "test_thread", data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{field: ["This field is not editable."] for field in data.keys()}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_following(self, old_following, new_following):
"""
Test attempts to edit the "following" field.
old_following indicates whether the thread should be followed at the
start of the test. new_following indicates the value for the "following"
field in the update. If old_following and new_following are the same, no
update should be made. Otherwise, a subscription should be POSTed or
DELETEd according to the new_following value.
"""
if old_following:
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_subscription_response(self.user)
self.register_thread()
data = {"following": new_following}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["following"], new_following)
last_request_path = urlparse(httpretty.last_request().path).path
subscription_url = "/api/v1/users/{}/subscriptions".format(self.user.id)
if old_following == new_following:
self.assertNotEqual(last_request_path, subscription_url)
else:
self.assertEqual(last_request_path, subscription_url)
self.assertEqual(
httpretty.last_request().method,
"POST" if new_following else "DELETE"
)
request_data = (
httpretty.last_request().parsed_body if new_following else
parse_qs(urlparse(httpretty.last_request().path).query)
)
request_data.pop("request_id", None)
self.assertEqual(
request_data,
{"source_type": ["thread"], "source_id": ["test_thread"]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_voted(self, old_voted, new_voted):
"""
Test attempts to edit the "voted" field.
old_voted indicates whether the thread should be upvoted at the start of
the test. new_voted indicates the value for the "voted" field in the
update. If old_voted and new_voted are the same, no update should be
made. Otherwise, a vote should be PUT or DELETEd according to the
new_voted value.
"""
if old_voted:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
self.register_thread_votes_response("test_thread")
self.register_thread()
data = {"voted": new_voted}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["voted"], new_voted)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/threads/test_thread/votes"
if old_voted == new_voted:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_voted else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_voted else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_voted:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
def test_invalid_field(self):
self.register_thread()
with self.assertRaises(ValidationError) as assertion:
update_thread(self.request, "test_thread", {"raw_body": ""})
self.assertEqual(
assertion.exception.message_dict,
{"raw_body": ["This field is required."]}
)
@ddt.ddt
class UpdateCommentTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for update_comment"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": "test_comment",
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"body": "Original body",
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_put_comment_response(cs_comment_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
self.register_comment()
update_comment(self.request, "test_comment", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
@ddt.data(None, "test_parent")
def test_basic(self, parent_id):
self.register_comment({"parent_id": parent_id})
actual = update_comment(self.request, "test_comment", {"raw_body": "Edited body"})
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["raw_body", "voted"]
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_nonexistent_comment(self):
self.register_get_comment_error_response("test_comment", 404)
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_nonexistent_course(self):
self.register_comment(thread_overrides={"course_id": "non/existent/course"})
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_unenrolled(self):
self.register_comment()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
self.register_comment()
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_get_thread_response(make_minimal_cs_thread())
self.register_comment(
{"thread_id": "test_thread"},
thread_overrides={
"id": "test_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_comment(self.request, "test_comment", {})
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
))
@ddt.unpack
def test_raw_body_access(self, role_name, is_thread_author, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1))
}
)
expected_error = role_name == FORUM_ROLE_STUDENT and not is_comment_author
try:
update_comment(self.request, "test_comment", {"raw_body": "edited"})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"raw_body": ["This field is not editable."]}
)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
[True, False],
))
@ddt.unpack
def test_endorsed_access(self, role_name, is_thread_author, thread_type, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"thread_type": thread_type,
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1)),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(thread_type == "discussion" or not is_thread_author)
)
try:
update_comment(self.request, "test_comment", {"endorsed": True})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"endorsed": ["This field is not editable."]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_voted(self, old_voted, new_voted):
"""
Test attempts to edit the "voted" field.
old_voted indicates whether the comment should be upvoted at the start of
the test. new_voted indicates the value for the "voted" field in the
update. If old_voted and new_voted are the same, no update should be
made. Otherwise, a vote should be PUT or DELETEd according to the
new_voted value.
"""
if old_voted:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
self.register_comment_votes_response("test_comment")
self.register_comment()
data = {"voted": new_voted}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["voted"], new_voted)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/comments/test_comment/votes"
if old_voted == new_voted:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_voted else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_voted else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_voted:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
@ddt.ddt
class DeleteThreadTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for delete_thread"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and DELETE on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"user_id": str(self.user.id),
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_delete_thread_response(cs_data["id"])
def test_basic(self):
self.register_thread()
self.assertIsNone(delete_thread(self.request, self.thread_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads/{}".format(self.thread_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(Http404):
delete_thread(self.request, "missing_thread")
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
def test_discussions_disabled(self):
self.register_thread()
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a thread
All privileged roles are able to delete a thread. A student role can
only delete a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.ddt
class DeleteCommentTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for delete_comment"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
self.thread_id = "test_thread"
self.comment_id = "test_comment"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment_and_thread(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the override
parameters and register mock responses for both GET and DELETE on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": self.comment_id,
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_delete_comment_response(self.comment_id)
def test_basic(self):
self.register_comment_and_thread()
self.assertIsNone(delete_comment(self.request, self.comment_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/comments/{}".format(self.comment_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_comment_id_not_found(self):
self.register_get_comment_error_response("missing_comment", 404)
with self.assertRaises(Http404):
delete_comment(self.request, "missing_comment")
def test_nonexistent_course(self):
self.register_comment_and_thread(
thread_overrides={"course_id": "non/existent/course"}
)
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
def test_not_enrolled(self):
self.register_comment_and_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
def test_discussions_disabled(self):
self.register_comment_and_thread()
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"user_id": str(self.user.id + 1)}
)
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a comment
All privileged roles are able to delete a comment. A student role can
only delete a comment if,
the student role is the author and the comment is not in a cohort,
the student role is the author and the comment is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"thread_id": "test_thread"},
thread_overrides={
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
| agpl-3.0 |
OneBitSoftware/jwtSample | src/Spa/env1/Lib/site-packages/pip/_vendor/lockfile/mkdirlockfile.py | 478 | 3098 | from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock.
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
| mit |
jobevers/emacs.d | emacs-for-python/python-libs/rope/refactor/similarfinder.py | 59 | 12370 | """This module can be used for finding similar code"""
import re
import rope.refactor.wildcards
from rope.base import codeanalyze, evaluate, exceptions, ast, builtins
from rope.refactor import (patchedast, sourceutils, occurrences,
wildcards, importutils)
class BadNameInCheckError(exceptions.RefactoringError):
pass
class SimilarFinder(object):
"""`SimilarFinder` can be used to find similar pieces of code
See the notes in the `rope.refactor.restructure` module for more
info.
"""
def __init__(self, pymodule, wildcards=None):
"""Construct a SimilarFinder"""
self.source = pymodule.source_code
self.raw_finder = RawSimilarFinder(
pymodule.source_code, pymodule.get_ast(), self._does_match)
self.pymodule = pymodule
if wildcards is None:
self.wildcards = {}
for wildcard in [rope.refactor.wildcards.
DefaultWildcard(pymodule.pycore.project)]:
self.wildcards[wildcard.get_name()] = wildcard
else:
self.wildcards = wildcards
def get_matches(self, code, args={}, start=0, end=None):
self.args = args
if end is None:
end = len(self.source)
skip_region = None
if 'skip' in args.get('', {}):
resource, region = args['']['skip']
if resource == self.pymodule.get_resource():
skip_region = region
return self.raw_finder.get_matches(code, start=start, end=end,
skip=skip_region)
def get_match_regions(self, *args, **kwds):
for match in self.get_matches(*args, **kwds):
yield match.get_region()
def _does_match(self, node, name):
arg = self.args.get(name, '')
kind = 'default'
if isinstance(arg, (tuple, list)):
kind = arg[0]
arg = arg[1]
suspect = wildcards.Suspect(self.pymodule, node, name)
return self.wildcards[kind].matches(suspect, arg)
class RawSimilarFinder(object):
"""A class for finding similar expressions and statements"""
def __init__(self, source, node=None, does_match=None):
if node is None:
node = ast.parse(source)
if does_match is None:
self.does_match = self._simple_does_match
else:
self.does_match = does_match
self._init_using_ast(node, source)
def _simple_does_match(self, node, name):
return isinstance(node, (ast.expr, ast.Name))
def _init_using_ast(self, node, source):
self.source = source
self._matched_asts = {}
if not hasattr(node, 'region'):
patchedast.patch_ast(node, source)
self.ast = node
def get_matches(self, code, start=0, end=None, skip=None):
"""Search for `code` in source and return a list of `Match`\es
`code` can contain wildcards. ``${name}`` matches normal
names and ``${?name} can match any expression. You can use
`Match.get_ast()` for getting the node that has matched a
given pattern.
"""
if end is None:
end = len(self.source)
for match in self._get_matched_asts(code):
match_start, match_end = match.get_region()
if start <= match_start and match_end <= end:
if skip is not None and (skip[0] < match_end and
skip[1] > match_start):
continue
yield match
def _get_matched_asts(self, code):
if code not in self._matched_asts:
wanted = self._create_pattern(code)
matches = _ASTMatcher(self.ast, wanted,
self.does_match).find_matches()
self._matched_asts[code] = matches
return self._matched_asts[code]
def _create_pattern(self, expression):
expression = self._replace_wildcards(expression)
node = ast.parse(expression)
# Getting Module.Stmt.nodes
nodes = node.body
if len(nodes) == 1 and isinstance(nodes[0], ast.Expr):
# Getting Discard.expr
wanted = nodes[0].value
else:
wanted = nodes
return wanted
def _replace_wildcards(self, expression):
ropevar = _RopeVariable()
template = CodeTemplate(expression)
mapping = {}
for name in template.get_names():
mapping[name] = ropevar.get_var(name)
return template.substitute(mapping)
class _ASTMatcher(object):
def __init__(self, body, pattern, does_match):
"""Searches the given pattern in the body AST.
body is an AST node and pattern can be either an AST node or
a list of ASTs nodes
"""
self.body = body
self.pattern = pattern
self.matches = None
self.ropevar = _RopeVariable()
self.matches_callback = does_match
def find_matches(self):
if self.matches is None:
self.matches = []
ast.call_for_nodes(self.body, self._check_node, recursive=True)
return self.matches
def _check_node(self, node):
if isinstance(self.pattern, list):
self._check_statements(node)
else:
self._check_expression(node)
def _check_expression(self, node):
mapping = {}
if self._match_nodes(self.pattern, node, mapping):
self.matches.append(ExpressionMatch(node, mapping))
def _check_statements(self, node):
for child in ast.get_children(node):
if isinstance(child, (list, tuple)):
self.__check_stmt_list(child)
def __check_stmt_list(self, nodes):
for index in range(len(nodes)):
if len(nodes) - index >= len(self.pattern):
current_stmts = nodes[index:index + len(self.pattern)]
mapping = {}
if self._match_stmts(current_stmts, mapping):
self.matches.append(StatementMatch(current_stmts, mapping))
def _match_nodes(self, expected, node, mapping):
if isinstance(expected, ast.Name):
if self.ropevar.is_var(expected.id):
return self._match_wildcard(expected, node, mapping)
if not isinstance(expected, ast.AST):
return expected == node
if expected.__class__ != node.__class__:
return False
children1 = self._get_children(expected)
children2 = self._get_children(node)
if len(children1) != len(children2):
return False
for child1, child2 in zip(children1, children2):
if isinstance(child1, ast.AST):
if not self._match_nodes(child1, child2, mapping):
return False
elif isinstance(child1, (list, tuple)):
if not isinstance(child2, (list, tuple)) or \
len(child1) != len(child2):
return False
for c1, c2 in zip(child1, child2):
if not self._match_nodes(c1, c2, mapping):
return False
else:
if child1 != child2:
return False
return True
def _get_children(self, node):
"""Return not `ast.expr_context` children of `node`"""
children = ast.get_children(node)
return [child for child in children
if not isinstance(child, ast.expr_context)]
def _match_stmts(self, current_stmts, mapping):
if len(current_stmts) != len(self.pattern):
return False
for stmt, expected in zip(current_stmts, self.pattern):
if not self._match_nodes(expected, stmt, mapping):
return False
return True
def _match_wildcard(self, node1, node2, mapping):
name = self.ropevar.get_base(node1.id)
if name not in mapping:
if self.matches_callback(node2, name):
mapping[name] = node2
return True
return False
else:
return self._match_nodes(mapping[name], node2, {})
class Match(object):
def __init__(self, mapping):
self.mapping = mapping
def get_region(self):
"""Returns match region"""
def get_ast(self, name):
"""Return the ast node that has matched rope variables"""
return self.mapping.get(name, None)
class ExpressionMatch(Match):
def __init__(self, ast, mapping):
super(ExpressionMatch, self).__init__(mapping)
self.ast = ast
def get_region(self):
return self.ast.region
class StatementMatch(Match):
def __init__(self, ast_list, mapping):
super(StatementMatch, self).__init__(mapping)
self.ast_list = ast_list
def get_region(self):
return self.ast_list[0].region[0], self.ast_list[-1].region[1]
class CodeTemplate(object):
def __init__(self, template):
self.template = template
self._find_names()
def _find_names(self):
self.names = {}
for match in CodeTemplate._get_pattern().finditer(self.template):
if 'name' in match.groupdict() and \
match.group('name') is not None:
start, end = match.span('name')
name = self.template[start + 2:end - 1]
if name not in self.names:
self.names[name] = []
self.names[name].append((start, end))
def get_names(self):
return self.names.keys()
def substitute(self, mapping):
collector = codeanalyze.ChangeCollector(self.template)
for name, occurrences in self.names.items():
for region in occurrences:
collector.add_change(region[0], region[1], mapping[name])
result = collector.get_changed()
if result is None:
return self.template
return result
_match_pattern = None
@classmethod
def _get_pattern(cls):
if cls._match_pattern is None:
pattern = codeanalyze.get_comment_pattern() + '|' + \
codeanalyze.get_string_pattern() + '|' + \
r'(?P<name>\$\{[^\s\$\}]*\})'
cls._match_pattern = re.compile(pattern)
return cls._match_pattern
class _RopeVariable(object):
"""Transform and identify rope inserted wildcards"""
_normal_prefix = '__rope__variable_normal_'
_any_prefix = '__rope__variable_any_'
def get_var(self, name):
if name.startswith('?'):
return self._get_any(name)
else:
return self._get_normal(name)
def is_var(self, name):
return self._is_normal(name) or self._is_var(name)
def get_base(self, name):
if self._is_normal(name):
return name[len(self._normal_prefix):]
if self._is_var(name):
return '?' + name[len(self._any_prefix):]
def _get_normal(self, name):
return self._normal_prefix + name
def _get_any(self, name):
return self._any_prefix + name[1:]
def _is_normal(self, name):
return name.startswith(self._normal_prefix)
def _is_var(self, name):
return name.startswith(self._any_prefix)
def make_pattern(code, variables):
variables = set(variables)
collector = codeanalyze.ChangeCollector(code)
def does_match(node, name):
return isinstance(node, ast.Name) and node.id == name
finder = RawSimilarFinder(code, does_match=does_match)
for variable in variables:
for match in finder.get_matches('${%s}' % variable):
start, end = match.get_region()
collector.add_change(start, end, '${%s}' % variable)
result = collector.get_changed()
return result if result is not None else code
def _pydefined_to_str(pydefined):
address = []
if isinstance(pydefined, (builtins.BuiltinClass, builtins.BuiltinFunction)):
return '__builtins__.' + pydefined.get_name()
else:
while pydefined.parent is not None:
address.insert(0, pydefined.get_name())
pydefined = pydefined.parent
module_name = pydefined.pycore.modname(pydefined.resource)
return '.'.join(module_name.split('.') + address)
| gpl-3.0 |
yfauser/ansible-modules-extras | cloud/centurylink/clc_modify_server.py | 49 | 35279 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
DOCUMENTATION = '''
module: clc_modify_server
short_description: modify servers in CenturyLink Cloud.
description:
- An Ansible module to modify servers in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- A list of server Ids to modify.
required: True
cpu:
description:
- How many CPUs to update on the server
required: False
default: None
memory:
description:
- Memory (in GB) to set to the server.
required: False
default: None
anti_affinity_policy_id:
description:
- The anti affinity policy id to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_name'
required: False
default: None
anti_affinity_policy_name:
description:
- The anti affinity policy name to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_id'
required: False
default: None
alert_policy_id:
description:
- The alert policy id to be associated to the server.
This is mutually exclusive with 'alert_policy_name'
required: False
default: None
alert_policy_name:
description:
- The alert policy name to be associated to the server.
This is mutually exclusive with 'alert_policy_id'
required: False
default: None
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [ True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: set the cpu count to 4 on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 4
state: present
- name: set the memory to 8GB on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
memory: 8
state: present
- name: set the anti affinity policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: present
- name: remove the anti affinity policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: absent
- name: add the alert policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: present
- name: remove the alert policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: absent
- name: set the memory to 16GB and cpu to 8 core on a lust if servers
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 8
memory: 16
state: present
'''
RETURN = '''
changed:
description: A flag indicating if any change was made or not
returned: success
type: boolean
sample: True
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
servers:
description: The list of server objects that are changed
returned: success
type: list
sample:
[
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":1438196820,
"modifiedBy":"service.wfad",
"modifiedDate":1438196820
},
"description":"test-server",
"details":{
"alertPolicies":[
],
"cpu":1,
"customFields":[
],
"diskCount":3,
"disks":[
{
"id":"0:0",
"partitionPaths":[
],
"sizeGB":1
},
{
"id":"0:1",
"partitionPaths":[
],
"sizeGB":2
},
{
"id":"0:2",
"partitionPaths":[
],
"sizeGB":14
}
],
"hostName":"",
"inMaintenanceMode":false,
"ipAddresses":[
{
"internal":"10.1.1.1"
}
],
"memoryGB":1,
"memoryMB":1024,
"partitions":[
],
"powerState":"started",
"snapshots":[
],
"storageGB":17
},
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
"id":"test-server",
"ipaddress":"10.120.45.23",
"isTemplate":false,
"links":[
{
"href":"/v2/servers/wfad/test-server",
"id":"test-server",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"group"
},
{
"href":"/v2/accounts/wfad",
"id":"wfad",
"rel":"account"
},
{
"href":"/v2/billing/wfad/serverPricing/test-server",
"rel":"billing"
},
{
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
"rel":"publicIPAddresses",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/credentials",
"rel":"credentials"
},
{
"href":"/v2/servers/wfad/test-server/statistics",
"rel":"statistics"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/capabilities",
"rel":"capabilities"
},
{
"href":"/v2/servers/wfad/test-server/alertPolicies",
"rel":"alertPolicyMappings",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
"rel":"antiAffinityPolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
},
{
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
"rel":"cpuAutoscalePolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
}
],
"locationId":"UC1",
"name":"test-server",
"os":"ubuntu14_64Bit",
"osType":"Ubuntu 14 64-bit",
"status":"active",
"storageType":"standard",
"type":"standard"
}
]
'''
__version__ = '${version}'
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcModifyServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
p = self.module.params
cpu = p.get('cpu')
memory = p.get('memory')
state = p.get('state')
if state == 'absent' and (cpu or memory):
return self.module.fail_json(
msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to modify: %s' %
server_ids)
(changed, server_dict_array, changed_server_ids) = self._modify_servers(
server_ids=server_ids)
self.module.exit_json(
changed=changed,
server_ids=changed_server_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
state=dict(default='present', choices=['present', 'absent']),
cpu=dict(),
memory=dict(),
anti_affinity_policy_id=dict(),
anti_affinity_policy_name=dict(),
alert_policy_id=dict(),
alert_policy_name=dict(),
wait=dict(type='bool', default=True)
)
mutually_exclusive = [
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name']
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: the error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex.message)
def _modify_servers(self, server_ids):
"""
modify the servers configuration on the provided list
:param server_ids: list of servers to modify
:return: a list of dictionaries with server information about the servers that were modified
"""
p = self.module.params
state = p.get('state')
server_params = {
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
'alert_policy_id': p.get('alert_policy_id'),
'alert_policy_name': p.get('alert_policy_name'),
}
changed = False
server_changed = False
aa_changed = False
ap_changed = False
server_dict_array = []
result_server_ids = []
request_list = []
changed_servers = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return self.module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
for server in servers:
if state == 'present':
server_changed, server_result = self._ensure_server_config(
server, server_params)
if server_result:
request_list.append(server_result)
aa_changed = self._ensure_aa_policy_present(
server,
server_params)
ap_changed = self._ensure_alert_policy_present(
server,
server_params)
elif state == 'absent':
aa_changed = self._ensure_aa_policy_absent(
server,
server_params)
ap_changed = self._ensure_alert_policy_absent(
server,
server_params)
if server_changed or aa_changed or ap_changed:
changed_servers.append(server)
changed = True
self._wait_for_requests(self.module, request_list)
self._refresh_servers(self.module, changed_servers)
for server in changed_servers:
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
def _ensure_server_config(
self, server, server_params):
"""
ensures the server is updated with the provided cpu and memory
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
cpu = server_params.get('cpu')
memory = server_params.get('memory')
changed = False
result = None
if not cpu:
cpu = server.cpu
if not memory:
memory = server.memory
if memory != server.memory or cpu != server.cpu:
if not self.module.check_mode:
result = self._modify_clc_server(
self.clc,
self.module,
server.id,
cpu,
memory)
changed = True
return changed, result
@staticmethod
def _modify_clc_server(clc, module, server_id, cpu, memory):
"""
Modify the memory or CPU of a clc server.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param server_id: id of the server to modify
:param cpu: the new cpu value
:param memory: the new memory value
:return: the result of CLC API call
"""
result = None
acct_alias = clc.v2.Account.GetAlias()
try:
# Update the server configuration
job_obj = clc.v2.API.Call('PATCH',
'servers/%s/%s' % (acct_alias,
server_id),
json.dumps([{"op": "set",
"member": "memory",
"value": memory},
{"op": "set",
"member": "cpu",
"value": cpu}]))
result = clc.v2.Requests(job_obj)
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to update the server configuration for server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process modify server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
def _ensure_aa_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided anti affinity policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id != current_aa_policy_id:
self._modify_aa_policy(
self.clc,
self.module,
acct_alias,
server.id,
aa_policy_id)
changed = True
return changed
def _ensure_aa_policy_absent(
self, server, server_params):
"""
ensures the the provided anti affinity policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id == current_aa_policy_id:
self._delete_aa_policy(
self.clc,
self.module,
acct_alias,
server.id)
changed = True
return changed
@staticmethod
def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
"""
modifies the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param aa_policy_id: the anti affinity policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('PUT',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({"id": aa_policy_id}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _delete_aa_policy(clc, module, acct_alias, server_id):
"""
Delete the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(
msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
alias, str(ex.response_text)))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _get_aa_policy_id_of_server(clc, module, alias, server_id):
"""
retrieves the anti affinity policy id of the server based on the CLC server id
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param server_id: the CLC server id
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
result = clc.v2.API.Call(
method='GET', url='servers/%s/%s/antiAffinityPolicy' %
(alias, server_id))
aa_policy_id = result.get('id')
except APIFailedResponse as ex:
if ex.response_status_code != 404:
module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
server_id, str(ex.response_text)))
return aa_policy_id
def _ensure_alert_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided alert policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and not self._alert_policy_exists(
server, alert_policy_id):
self._add_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
def _ensure_alert_policy_absent(
self, server, server_params):
"""
ensures the alert policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and self._alert_policy_exists(
server, alert_policy_id):
self._remove_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
@staticmethod
def _add_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
add the alert policy to CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('POST',
'servers/%s/%s/alertPolicies' % (
acct_alias,
server_id),
json.dumps({"id": alert_policy_id}))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _remove_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
remove the alert policy to the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/alertPolicies/%s'
% (acct_alias, server_id, alert_policy_id))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
retrieves the alert policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param alert_policy_name: the alert policy name
:return: alert_policy_id: The alert policy id
"""
alert_policy_id = None
try:
alert_policies = clc.v2.API.Call(method='GET',
url='alertPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
alias, str(ex.response_text)))
for alert_policy in alert_policies.get('items'):
if alert_policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = alert_policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _alert_policy_exists(server, alert_policy_id):
"""
Checks if the alert policy exists for the server
:param server: the clc server object
:param alert_policy_id: the alert policy
:return: True: if the given alert policy id associated to the server, False otherwise
"""
result = False
alert_policies = server.alertPolicies
if alert_policies:
for alert_policy in alert_policies:
if alert_policy.get('id') == alert_policy_id:
result = True
return result
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcModifyServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_modify_server = ClcModifyServer(module)
clc_modify_server.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
| gpl-3.0 |
xen0l/ansible | lib/ansible/plugins/action/net_user.py | 648 | 1057 | # (c) 2017, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action.net_base import ActionModule as _ActionModule
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
return result
| gpl-3.0 |
40223232/final-test-6-22 | static/Brython3.1.1-20150328-091302/Lib/token.py | 743 | 3034 | """Token constants (from "token.h")."""
__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
LBRACE = 25
RBRACE = 26
EQEQUAL = 27
NOTEQUAL = 28
LESSEQUAL = 29
GREATEREQUAL = 30
TILDE = 31
CIRCUMFLEX = 32
LEFTSHIFT = 33
RIGHTSHIFT = 34
DOUBLESTAR = 35
PLUSEQUAL = 36
MINEQUAL = 37
STAREQUAL = 38
SLASHEQUAL = 39
PERCENTEQUAL = 40
AMPEREQUAL = 41
VBAREQUAL = 42
CIRCUMFLEXEQUAL = 43
LEFTSHIFTEQUAL = 44
RIGHTSHIFTEQUAL = 45
DOUBLESTAREQUAL = 46
DOUBLESLASH = 47
DOUBLESLASHEQUAL = 48
AT = 49
RARROW = 50
ELLIPSIS = 51
OP = 52
ERRORTOKEN = 53
N_TOKENS = 54
NT_OFFSET = 256
#--end constants--
tok_name = {value: name
for name, value in globals().items()
if isinstance(value, int) and not name.startswith('_')}
__all__.extend(tok_name.values())
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def _main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError as err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = sorted(tokens.keys())
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
_main()
| gpl-3.0 |
att-comdev/deckhand | deckhand/common/validation_message.py | 1 | 2620 | # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Indicates document sanity-check validation failure pre- or post-rendering.
DOCUMENT_SANITY_CHECK_FAILURE = 'D001'
# Indicates document post-rendering validation failure.
DOCUMENT_POST_RENDERING_FAILURE = 'D002'
class ValidationMessage(object):
"""ValidationMessage per UCP convention:
https://github.com/att-comdev/ucp-integration/blob/master/docs/source/api-conventions.rst#output-structure # noqa
Construction of ``ValidationMessage`` message:
:param string message: Validation failure message.
:param boolean error: True or False, if this is an error message.
:param string name: Identifying name of the validation.
:param string level: The severity of validation result, as "Error",
"Warning", or "Info"
:param string schema: The schema of the document being validated.
:param string doc_name: The name of the document being validated.
:param string diagnostic: Information about what lead to the message,
or details for resolution.
"""
def __init__(self,
message='Document validation error.',
error=True,
name='Deckhand validation error',
level='Error',
doc_schema='',
doc_name='',
doc_layer='',
diagnostic=''):
level = 'Error' if error else 'Info'
self._output = {
'message': message,
'error': error,
'name': name,
'documents': [],
'level': level,
'kind': self.__class__.__name__
}
self._output['documents'].append(
dict(schema=doc_schema, name=doc_name, layer=doc_layer))
if diagnostic:
self._output.update(diagnostic=diagnostic)
def format_message(self):
"""Return ``ValidationMessage`` message.
:returns: The ``ValidationMessage`` for the Validation API response.
:rtype: dict
"""
return self._output
| apache-2.0 |
Hellrungj/CSC-412-Networking | rpc-project/venv/lib/python2.7/site-packages/setuptools/depends.py | 462 | 6370 | import sys
import imp
import marshal
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from distutils.version import StrictVersion
from setuptools import compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name,self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f,p,i = find_module(self.module,paths)
if f: f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
extended_arg = arg * compat.long_type(65536)
continue
else:
arg = None
ptr += 1
yield op,arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
if kind==PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts,module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind==PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind==PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind==PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op==LOAD_CONST:
const = code.co_consts[arg]
elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
| gpl-3.0 |
jjhelmus/scipy | scipy/signal/filter_design.py | 14 | 135076 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from scipy import special, optimize
from scipy.special import comb, factorial
from scipy._lib._numpy_compat import polyvalfromroots
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e. zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
if kind == 'ba':
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
elif kind == 'zp':
ep = atleast_1d(den) + 0j
tz = atleast_1d(num) + 0j
else:
raise ValueError("input must be one of {'ba', 'zp'}")
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=None):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
k = np.asarray(k)
if k.size > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
w = findfreqs(z, p, 200, kind='zp')
elif isinstance(worN, int):
N = worN
w = findfreqs(z, p, N, kind='zp')
else:
w = worN
w = atleast_1d(w)
s = 1j * w
num = polyvalfromroots(s, z)
den = polyvalfromroots(s, p)
h = k * num/den
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
sosfreqz
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response::
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 0.2, output='zpk')
>>> w, h = signal.freqz_zpk(z, p, k)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
z, p = map(atleast_1d, (z, p))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(1j * w)
h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\\pi$', r'$-\\pi/2$', '0', r'$\\pi/2$', r'$\\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60Hz component from a
signal sampled at 200Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design notch filter
>>> b, a = signal.iirnotch(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch")
def iirpeak(w0, Q):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Normalized frequency to be retained in a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding
to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300Hz
component from a signal sampled at 1000Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design peak filter
>>> b, a = signal.iirpeak(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak")
def _design_notch_peak_filter(w0, Q, ftype):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB atenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| bsd-3-clause |
gwct/grampa | lib/spec_tree.py | 1 | 5143 | import sys, os, reconcore as RC, recontree as RT, global_vars as globs
#############################################################################
def readSpecTree(spec_tree_input, starttime):
if os.path.isfile(spec_tree_input):
spec_tree = open(spec_tree_input, "r").read().replace("\n", "").replace("\r","");
else:
spec_tree = spec_tree_input;
# If the input string is a filename, read the file. Otherwise, just try it as a newick string.
hybrid_spec = "";
spec_tree = RT.remBranchLength(spec_tree);
tips = spec_tree.replace("(","").replace(")","").replace(";","").split(",");
# Remove the branch lengths from the tree string and get the tip labels.
if any(tip.isdigit() for tip in tips):
RC.errorOut(6, "Tip labels cannot be purely numbers. Please add another character.");
if globs.spec_type == 's' and any(tips.count(tip) > 1 for tip in tips):
RC.errorOut(7, "You have entered a tree type (-t) of 's' but there are labels in your tree that appear more than once!");
if globs.spec_type == 'm' and any(tips.count(tip) not in [1,2] for tip in tips):
RC.errorOut(8, "You have entered a tree type (-t) of 'm', species in your tree should appear exactly once or twice.");
# Some error checking based on the tip labels in the tree.
if globs.spec_type == 'm':
hybrid_spec = list(set([tip for tip in tips if tips.count(tip) != 1]));
for h in hybrid_spec:
spec_tree = spec_tree.replace(h, h+"*", 1);
# If the user entered a MUL-tree, some internal re-labeling must be done to those labels that appear twice.
try:
sinfo, st = RT.treeParse(spec_tree);
# Parsing of the species tree.
except:
RC.errorOut(9, "Error reading species tree!");
# Reading the species tree file.
if globs.label_opt:
if globs.v != -1:
print();
print("# The input species tree with internal nodes labeled:");
print(st + "\n");
RC.endProg(starttime);
# The output if --labeltree is set.
return sinfo, st;
#############################################################################
def hInParse(sinfo, st, h1_input, h2_input):
if globs.spec_type == 's':
hybrid_clades, hybrid_nodes = getHClades(h1_input, sinfo, "h1");
copy_clades, copy_nodes = getHClades(h2_input, sinfo, "h2");
# If the input tree is singly-labeled, use the input info from -h1 and -h2 to get the hybrid clades and nodes.
elif globs.spec_type == 'm':
mul_copy_clade = [n for n in sinfo if sinfo[n][2] == 'tip' and '*' in n];
mul_hybrid_clade = [n.replace("*","") for n in mul_copy_clade];
mul_hybrid_node, mul_hybrid_mono = RT.LCA(mul_hybrid_clade, sinfo);
mul_copy_node, mul_copy_mono = RT.LCA(mul_copy_clade, sinfo);
if not mul_hybrid_mono or not mul_copy_mono:
RC.errorOut(13, "All hybrid clades specified in your MUL-tree must be monophyletic! Hybrid clade identified as: " + ",".join(mul_copy_clade));
hybrid_clades, hybrid_nodes, copy_clades, copy_nodes = [mul_hybrid_clade], [mul_hybrid_node], [mul_copy_clade], [mul_copy_node];
# If the input tree is a MUL-tree, we have to determine what the hybrid clades and nodes are.
return hybrid_clades, hybrid_nodes, copy_clades, copy_nodes;
# Parses the input h nodes.
#############################################################################
def getHClades(h_list, sinfo, h_type):
# This function takes a list of lists of -h1 or -h2 inputs and determines if they are clades or node labels. It then retrieves
# the complete lists of hybrid clades and nodes.
if h_list:
if " " in h_list:
h_clades = h_list.split(" ");
h_clades = list(map(set, [tmp_h.split(",") for tmp_h in h_clades]));
else:
h_clades = list(map(set, [h_list.split(",")]));
# Split up the input info. If there is a space, multiple nodes/clades have been specified.
if not all(h in sinfo for hybrid_list in h_clades for h in hybrid_list if not h.isdigit()):
RC.errorOut(10, "Not all -" + h_type + " species are present in your species tree!");
if not all("<" + h + ">" in sinfo for hybrid_list in h_clades for h in hybrid_list if h.isdigit()):
RC.errorOut(11, "Not all -" + h_type + " nodes are present in your species tree!");
# Some error checking to make sure everything the user input is actually in the tree.
h_nodes = [];
for hybrid_clade in h_clades:
hybrid_clade = list(hybrid_clade);
if hybrid_clade[0].isdigit():
h_node = "<" + hybrid_clade[0] + ">";
# If the input was an internal node, add it to the node list here.
else:
h_node, h_mono = RT.LCA(hybrid_clade, sinfo);
if not h_mono:
RC.errorOut(12, "All hybrid clades specified h1 and h2 must be monophyletic!");
# If the input was a clade, retrieve the ancestral node and check if it is monophyletic here.
if h_node not in h_nodes:
h_nodes.append(h_node);
# Add the hybrid node to the nodes list.
# If the user input anything as -h1 or -h2 this parses it.
else:
h_nodes = list(sinfo.keys());
h_clades = [RT.getClade(node, sinfo) for node in h_nodes];
# If the user did not specify -h1 or -h2, this adds all possible nodes to the list.
return h_clades, h_nodes;
#############################################################################
| gpl-3.0 |
beeftornado/sentry | src/sentry/roles/manager.py | 1 | 2011 | from __future__ import absolute_import
import six
from collections import OrderedDict
class Role(object):
def __init__(self, priority, id, name, desc="", scopes=(), is_global=False):
assert len(id) <= 32, "Role id must be no more than 32 characters"
self.priority = priority
self.id = id
self.name = name
self.desc = desc
self.scopes = frozenset(scopes)
self.is_global = bool(is_global)
def __str__(self):
return self.name.encode("utf-8")
def __unicode__(self):
return six.text_type(self.name)
def __repr__(self):
return u"<Role: {}>".format(self.id)
def has_scope(self, scope):
return scope in self.scopes
class RoleManager(object):
def __init__(self, config, default=None):
role_list = []
self._roles = OrderedDict()
for idx, role in enumerate(config):
role = Role(idx, **role)
role_list.append(role)
self._roles[role.id] = role
self._choices = tuple((r.id, r.name) for r in role_list)
if default:
self._default = self._roles[default]
else:
self._default = role_list[0]
self._top_dog = role_list[-1]
def __iter__(self):
return six.itervalues(self._roles)
def can_manage(self, role, other):
return self.get(role).priority >= self.get(other).priority
def get(self, id):
return self._roles[id]
def get_all(self):
return list(self._roles.values())
def get_choices(self):
return self._choices
def get_default(self):
return self._default
def get_top_dog(self):
return self._top_dog
def with_scope(self, scope):
for role in self.get_all():
if role.has_scope(scope):
yield role
def with_any_scope(self, scopes):
for role in self.get_all():
if any(role.has_scope(scope) for scope in scopes):
yield role
| bsd-3-clause |
chemelnucfin/tensorflow | tensorflow/contrib/timeseries/python/timeseries/math_utils.py | 12 | 44149 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities used by time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from tensorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def normal_log_prob(loc, scale, x):
"""Computes the Normal log pdf."""
z = (x - loc) / scale
return -0.5 * (math_ops.square(z)
+ np.log(2. * np.pi) + math_ops.log(scale))
def cauchy_log_prob(loc, scale, x):
"""Computes the Cauchy log pdf."""
z = (x - loc) / scale
return (-np.log(np.pi) - math_ops.log(scale) -
math_ops.log1p(math_ops.square(z)))
def mvn_tril_log_prob(loc, scale_tril, x):
"""Computes the MVN log pdf under tril scale. Doesn't handle batches."""
x0 = x - loc
z = linalg_ops.matrix_triangular_solve(
scale_tril, x0[..., array_ops.newaxis])[..., 0]
log_det_cov = 2. * math_ops.reduce_sum(math_ops.log(
array_ops.matrix_diag_part(scale_tril)), axis=-1)
d = math_ops.cast(array_ops.shape(scale_tril)[-1], log_det_cov.dtype)
return -0.5 * (math_ops.reduce_sum(math_ops.square(z), axis=-1)
+ d * np.log(2. * np.pi) + log_det_cov)
def clip_covariance(
covariance_matrix, maximum_variance_ratio, minimum_variance):
"""Enforce constraints on a covariance matrix to improve numerical stability.
Args:
covariance_matrix: A [..., N, N] batch of covariance matrices.
maximum_variance_ratio: The maximum allowed ratio of two diagonal
entries. Any entries lower than the maximum entry divided by this ratio
will be set to that value.
minimum_variance: A floor for diagonal entries in the returned matrix.
Returns:
A new covariance matrix with the requested constraints enforced. If the
input was positive definite, the output will be too.
"""
# TODO(allenl): Smarter scaling here so that correlations are preserved when
# fiddling with diagonal elements.
diagonal = array_ops.matrix_diag_part(covariance_matrix)
maximum = math_ops.reduce_max(diagonal, axis=-1, keepdims=True)
new_diagonal = gen_math_ops.maximum(
diagonal, maximum / maximum_variance_ratio)
return array_ops.matrix_set_diag(
covariance_matrix, math_ops.maximum(new_diagonal, minimum_variance))
def block_diagonal(matrices, dtype=dtypes.float32, name="block_diagonal"):
r"""Constructs block-diagonal matrices from a list of batched 2D tensors.
Args:
matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of
matrices with the same batch dimension).
dtype: Data type to use. The Tensors in `matrices` must match this dtype.
name: A name for the returned op.
Returns:
A matrix with the input matrices stacked along its main diagonal, having
shape [..., \sum_i N_i, \sum_i M_i].
"""
matrices = [ops.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tensor_shape.Dimension(0)
blocked_cols = tensor_shape.Dimension(0)
batch_shape = tensor_shape.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = math_ops.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
row_before_length = current_column
current_column += matrix_shape[-1]
row_after_length = ret_columns - current_column
row_blocks.append(
array_ops.pad(
tensor=matrix,
paddings=array_ops.concat(
[
array_ops.zeros(
[array_ops.rank(matrix) - 1, 2], dtype=dtypes.int32), [(
row_before_length, row_after_length)]
],
axis=0)))
blocked = array_ops.concat(row_blocks, -2, name=name)
blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))
return blocked
def power_sums_tensor(array_size, power_matrix, multiplier):
r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..(array_size + 1).
Args:
array_size: The number of non-trivial sums to pre-compute.
power_matrix: The "A" matrix above.
multiplier: The "B" matrix above
Returns:
A Tensor with S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
S[0] is the zero matrix
S[1] is B
S[2] is A B A^T + B
...and so on
"""
array_size = math_ops.cast(array_size, dtypes.int32)
power_matrix = ops.convert_to_tensor(power_matrix)
identity_like_power_matrix = linalg_ops.eye(
array_ops.shape(power_matrix)[0], dtype=power_matrix.dtype)
identity_like_power_matrix.set_shape(
ops.convert_to_tensor(power_matrix).get_shape())
transition_powers = functional_ops.scan(
lambda previous_power, _: math_ops.matmul(previous_power, power_matrix),
math_ops.range(array_size - 1),
initializer=identity_like_power_matrix)
summed = math_ops.cumsum(
array_ops.concat([
array_ops.expand_dims(multiplier, 0), math_ops.matmul(
batch_times_matrix(transition_powers, multiplier),
transition_powers,
adjoint_b=True)
], 0))
return array_ops.concat(
[array_ops.expand_dims(array_ops.zeros_like(multiplier), 0), summed], 0)
def matrix_to_powers(matrix, powers):
"""Raise a single matrix to multiple powers."""
matrix_tiled = array_ops.tile(
array_ops.expand_dims(matrix, 0), [array_ops.size(powers), 1, 1])
return batch_matrix_pow(matrix_tiled, powers)
def batch_matrix_pow(matrices, powers):
"""Compute powers of matrices, e.g. A^3 = matmul(matmul(A, A), A).
Uses exponentiation by squaring, with O(log(p)) matrix multiplications to
compute A^p.
Args:
matrices: [batch size x N x N]
powers: Which integer power to raise each matrix to [batch size]
Returns:
The matrices raised to their respective powers, same dimensions as the
"matrices" argument.
"""
def terminate_when_all_zero(current_argument, residual_powers, accumulator):
del current_argument, accumulator # not used for condition
do_exit = math_ops.reduce_any(
math_ops.greater(residual_powers, array_ops.ones_like(residual_powers)))
return do_exit
def do_iteration(current_argument, residual_powers, accumulator):
"""Compute one step of iterative exponentiation by squaring.
The recursive form is:
power(A, p) = { power(matmul(A, A), p / 2) for even p
{ matmul(A, power(matmul(A, A), (p - 1) / 2)) for odd p
power(A, 0) = I
The power(A, 0) = I case is handled by starting with accumulator set to the
identity matrix; matrices with zero residual powers are passed through
unchanged.
Args:
current_argument: On this step, what is the first argument (A^2..^2) to
the (unrolled) recursive function? [batch size x N x N]
residual_powers: On this step, what is the second argument (residual p)?
[batch_size]
accumulator: Accumulates the exterior multiplications from the odd
powers (initially the identity matrix). [batch_size x N x N]
Returns:
Updated versions of each argument for one step of the unrolled
computation. Does not change parts of the batch which have a residual
power of zero.
"""
is_even = math_ops.equal(residual_powers % 2,
array_ops.zeros(
array_ops.shape(residual_powers),
dtype=dtypes.int32))
new_accumulator = array_ops.where(is_even, accumulator,
math_ops.matmul(accumulator,
current_argument))
new_argument = math_ops.matmul(current_argument, current_argument)
do_update = math_ops.greater(residual_powers, 1)
new_residual_powers = residual_powers - residual_powers % 2
new_residual_powers //= 2
# Stop updating if we've reached our base case; some batch elements may
# finish sooner than others
accumulator = array_ops.where(do_update, new_accumulator, accumulator)
current_argument = array_ops.where(do_update, new_argument,
current_argument)
residual_powers = array_ops.where(do_update, new_residual_powers,
residual_powers)
return (current_argument, residual_powers, accumulator)
matrices = ops.convert_to_tensor(matrices)
powers = math_ops.cast(powers, dtype=dtypes.int32)
ident = array_ops.expand_dims(
array_ops.diag(
array_ops.ones([array_ops.shape(matrices)[1]], dtype=matrices.dtype)),
0)
ident_tiled = array_ops.tile(ident, [array_ops.shape(matrices)[0], 1, 1])
(final_argument,
final_residual_power, final_accumulator) = control_flow_ops.while_loop(
terminate_when_all_zero, do_iteration, [matrices, powers, ident_tiled])
return array_ops.where(
math_ops.equal(final_residual_power,
array_ops.zeros_like(
final_residual_power, dtype=dtypes.int32)),
ident_tiled, math_ops.matmul(final_argument, final_accumulator))
# TODO(allenl): would be useful if this was built into batch_matmul
def batch_times_matrix(batch, matrix, adj_x=False, adj_y=False):
"""Multiply a batch of matrices by a single matrix.
Functionally equivalent to:
tf.matmul(batch, array_ops.tile(gen_math_ops.expand_dims(matrix, 0),
[array_ops.shape(batch)[0], 1, 1]),
adjoint_a=adj_x, adjoint_b=adj_y)
Args:
batch: [batch_size x N x M] after optional transpose
matrix: [M x P] after optional transpose
adj_x: If true, transpose the second two dimensions of "batch" before
multiplying.
adj_y: If true, transpose "matrix" before multiplying.
Returns:
[batch_size x N x P]
"""
batch = ops.convert_to_tensor(batch)
matrix = ops.convert_to_tensor(matrix)
assert batch.get_shape().ndims == 3
assert matrix.get_shape().ndims == 2
if adj_x:
batch = array_ops.transpose(batch, [0, 2, 1])
batch_dimension = batch.get_shape().dims[0].value
first_dimension = batch.get_shape().dims[1].value
tensor_batch_shape = array_ops.shape(batch)
if batch_dimension is None:
batch_dimension = tensor_batch_shape[0]
if first_dimension is None:
first_dimension = tensor_batch_shape[1]
matrix_first_dimension, matrix_second_dimension = matrix.get_shape().as_list()
batch_reshaped = array_ops.reshape(batch, [-1, tensor_batch_shape[2]])
if adj_y:
if matrix_first_dimension is None:
matrix_first_dimension = array_ops.shape(matrix)[0]
result_shape = [batch_dimension, first_dimension, matrix_first_dimension]
else:
if matrix_second_dimension is None:
matrix_second_dimension = array_ops.shape(matrix)[1]
result_shape = [batch_dimension, first_dimension, matrix_second_dimension]
return array_ops.reshape(
math_ops.matmul(batch_reshaped, matrix, adjoint_b=adj_y), result_shape)
def matrix_times_batch(matrix, batch, adj_x=False, adj_y=False):
"""Like batch_times_matrix, but with the multiplication order swapped."""
return array_ops.transpose(
batch_times_matrix(
batch=batch, matrix=matrix, adj_x=not adj_y, adj_y=not adj_x),
[0, 2, 1])
def make_toeplitz_matrix(inputs, name=None):
"""Make a symmetric Toeplitz matrix from input array of values.
Args:
inputs: a 3-D tensor of shape [num_blocks, block_size, block_size].
name: the name of the operation.
Returns:
a symmetric Toeplitz matrix of shape
[num_blocks*block_size, num_blocks*block_size].
"""
num_blocks = array_ops.shape(inputs)[0]
block_size = array_ops.shape(inputs)[1]
output_size = block_size * num_blocks
lags = array_ops.reshape(math_ops.range(num_blocks), shape=[1, -1])
indices = math_ops.abs(lags - array_ops.transpose(lags))
output = array_ops.gather(inputs, indices)
output = array_ops.reshape(
array_ops.transpose(output, [0, 2, 1, 3]), [output_size, output_size])
return array_ops.identity(output, name=name)
# TODO(allenl): Investigate alternative parameterizations.
def sign_magnitude_positive_definite(
raw, off_diagonal_scale=0., overall_scale=0.):
"""Constructs a positive definite matrix from an unconstrained input matrix.
We want to keep the whole matrix on a log scale, but also allow off-diagonal
elements to be negative, so the sign of off-diagonal elements is modeled
separately from their magnitude (using the lower and upper triangles
respectively). Specifically:
for i < j, we have:
output_cholesky[i, j] = raw[j, i] / (abs(raw[j, i]) + 1) *
exp((off_diagonal_scale + overall_scale + raw[i, j]) / 2)
output_cholesky[i, i] = exp((raw[i, i] + overall_scale) / 2)
output = output_cholesky^T * output_cholesky
where raw, off_diagonal_scale, and overall_scale are
un-constrained real-valued variables. The resulting values are stable
around zero due to the exponential (and the softsign keeps the function
smooth).
Args:
raw: A [..., M, M] Tensor.
off_diagonal_scale: A scalar or [...] shaped Tensor controlling the relative
scale of off-diagonal values in the output matrix.
overall_scale: A scalar or [...] shaped Tensor controlling the overall scale
of the output matrix.
Returns:
The `output` matrix described above, a [..., M, M] positive definite matrix.
"""
raw = ops.convert_to_tensor(raw)
diagonal = array_ops.matrix_diag_part(raw)
def _right_pad_with_ones(tensor, target_rank):
# Allow broadcasting even if overall_scale and off_diagonal_scale have batch
# dimensions
tensor = ops.convert_to_tensor(tensor, dtype=raw.dtype.base_dtype)
return array_ops.reshape(tensor,
array_ops.concat(
[
array_ops.shape(tensor), array_ops.ones(
[target_rank - array_ops.rank(tensor)],
dtype=target_rank.dtype)
],
axis=0))
# We divide the log values by 2 to compensate for the squaring that happens
# when transforming Cholesky factors into positive definite matrices.
sign_magnitude = (gen_math_ops.exp(
(raw + _right_pad_with_ones(off_diagonal_scale, array_ops.rank(raw)) +
_right_pad_with_ones(overall_scale, array_ops.rank(raw))) / 2.) *
nn.softsign(array_ops.matrix_transpose(raw)))
sign_magnitude.set_shape(raw.get_shape())
cholesky_factor = array_ops.matrix_set_diag(
input=array_ops.matrix_band_part(sign_magnitude, 0, -1),
diagonal=gen_math_ops.exp((diagonal + _right_pad_with_ones(
overall_scale, array_ops.rank(diagonal))) / 2.))
return math_ops.matmul(cholesky_factor, cholesky_factor, transpose_a=True)
def transform_to_covariance_matrices(input_vectors, matrix_size):
"""Construct covariance matrices via transformations from input_vectors.
Args:
input_vectors: A [batch size x input size] batch of vectors to transform.
matrix_size: An integer indicating one dimension of the (square) output
matrix.
Returns:
A [batch size x matrix_size x matrix_size] batch of covariance matrices.
"""
combined_values = layers.fully_connected(
input_vectors, matrix_size**2 + 2, activation_fn=None)
return sign_magnitude_positive_definite(
raw=array_ops.reshape(combined_values[..., :-2],
array_ops.concat([
array_ops.shape(combined_values)[:-1],
[matrix_size, matrix_size]
], 0)),
off_diagonal_scale=combined_values[..., -2],
overall_scale=combined_values[..., -1])
def variable_covariance_matrix(
size, name, dtype, initial_diagonal_values=None,
initial_overall_scale_log=0.):
"""Construct a Variable-parameterized positive definite matrix.
Useful for parameterizing covariance matrices.
Args:
size: The size of the main diagonal, the returned matrix having shape [size
x size].
name: The name to use when defining variables and ops.
dtype: The floating point data type to use.
initial_diagonal_values: A Tensor with shape [size] with initial values for
the diagonal values of the returned matrix. Must be positive.
initial_overall_scale_log: Initial value of the bias term for every element
of the matrix in log space.
Returns:
A Variable-parameterized covariance matrix with shape [size x size].
"""
raw_values = variable_scope.get_variable(
name + "_pre_transform",
dtype=dtype,
shape=[size, size],
initializer=init_ops.zeros_initializer())
if initial_diagonal_values is not None:
raw_values += array_ops.matrix_diag(math_ops.log(initial_diagonal_values))
return array_ops.identity(
sign_magnitude_positive_definite(
raw=raw_values,
off_diagonal_scale=variable_scope.get_variable(
name + "_off_diagonal_scale",
dtype=dtype,
initializer=constant_op.constant(-5., dtype=dtype)),
overall_scale=ops.convert_to_tensor(
initial_overall_scale_log, dtype=dtype) +
variable_scope.get_variable(
name + "_overall_scale",
dtype=dtype,
shape=[],
initializer=init_ops.zeros_initializer())),
name=name)
def batch_start_time(times):
return times[:, 0]
def batch_end_time(times):
return times[:, -1]
def log_noninformative_covariance_prior(covariance):
"""Compute a relatively uninformative prior for noise parameters.
Helpful for avoiding noise over-estimation, where noise otherwise decreases
very slowly during optimization.
See:
Villegas, C. On the A Priori Distribution of the Covariance Matrix.
Ann. Math. Statist. 40 (1969), no. 3, 1098--1099.
Args:
covariance: A covariance matrix.
Returns:
For a [p x p] matrix:
log(det(covariance)^(-(p + 1) / 2))
"""
# Avoid zero/negative determinants due to numerical errors
covariance += array_ops.diag(1e-8 * array_ops.ones(
shape=[array_ops.shape(covariance)[0]], dtype=covariance.dtype))
power = -(math_ops.cast(array_ops.shape(covariance)[0] + 1,
covariance.dtype) / 2.)
return power * math_ops.log(linalg_ops.matrix_determinant(covariance))
def entropy_matched_cauchy_scale(covariance):
"""Approximates a similar Cauchy distribution given a covariance matrix.
Since Cauchy distributions do not have moments, entropy matching provides one
way to set a Cauchy's scale parameter in a way that provides a similar
distribution. The effect is dividing the standard deviation of an independent
Gaussian by a constant very near 3.
To set the scale of the Cauchy distribution, we first select the diagonals of
`covariance`. Since this ignores cross terms, it overestimates the entropy of
the Gaussian. For each of these variances, we solve for the Cauchy scale
parameter which gives the same entropy as the Gaussian with that
variance. This means setting the (univariate) Gaussian entropy
0.5 * ln(2 * variance * pi * e)
equal to the Cauchy entropy
ln(4 * pi * scale)
Solving, we get scale = sqrt(variance * (e / (8 pi))).
Args:
covariance: A [batch size x N x N] batch of covariance matrices to produce
Cauchy scales for.
Returns:
A [batch size x N] set of Cauchy scale parameters for each part of the batch
and each dimension of the input Gaussians.
"""
return math_ops.sqrt(math.e / (8. * math.pi) *
array_ops.matrix_diag_part(covariance))
class TensorValuedMutableDenseHashTable(lookup.MutableDenseHashTable):
"""A version of MutableDenseHashTable which stores arbitrary Tensor shapes.
Since MutableDenseHashTable only allows vectors right now, simply adds reshape
ops on both ends.
"""
def __init__(self, key_dtype, value_dtype, default_value, *args, **kwargs):
self._non_vector_value_shape = array_ops.shape(default_value)
super(TensorValuedMutableDenseHashTable, self).__init__(
key_dtype=key_dtype,
value_dtype=value_dtype,
default_value=array_ops.reshape(default_value, [-1]),
*args,
**kwargs)
def insert(self, keys, values, name=None):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype)
keys_flat = array_ops.reshape(keys, [-1])
return super(TensorValuedMutableDenseHashTable, self).insert(
keys=keys_flat,
# Each key has one corresponding value, so the shape of the tensor of
# values for every key is key_shape + value_shape
values=array_ops.reshape(values, [array_ops.shape(keys_flat)[0], -1]),
name=name)
def lookup(self, keys, name=None):
keys_flat = array_ops.reshape(
ops.convert_to_tensor(keys, dtype=self._key_dtype), [-1])
return array_ops.reshape(
super(TensorValuedMutableDenseHashTable, self).lookup(
keys=keys_flat, name=name),
array_ops.concat([array_ops.shape(keys), self._non_vector_value_shape],
0))
class TupleOfTensorsLookup(lookup.LookupInterface):
"""A LookupInterface with nested tuples of Tensors as values.
Creates one MutableDenseHashTable per value Tensor, which has some unnecessary
overhead.
"""
def __init__(self,
key_dtype,
default_values,
empty_key,
deleted_key,
name,
checkpoint=True):
default_values_flat = nest.flatten(default_values)
self._hash_tables = nest.pack_sequence_as(default_values, [
TensorValuedMutableDenseHashTable(
key_dtype=key_dtype,
value_dtype=default_value.dtype.base_dtype,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name=name + "_{}".format(table_number),
checkpoint=checkpoint)
for table_number, default_value in enumerate(default_values_flat)
])
self._name = name
def lookup(self, keys):
return nest.pack_sequence_as(
self._hash_tables,
[hash_table.lookup(keys)
for hash_table in nest.flatten(self._hash_tables)])
def insert(self, keys, values):
nest.assert_same_structure(self._hash_tables, values)
# Avoid race conditions by requiring that all inputs are computed before any
# inserts happen (an issue if one key's update relies on another's value).
values_flat = [array_ops.identity(value) for value in nest.flatten(values)]
with ops.control_dependencies(values_flat):
insert_ops = [hash_table.insert(keys, value)
for hash_table, value
in zip(nest.flatten(self._hash_tables),
values_flat)]
return control_flow_ops.group(*insert_ops)
def check_table_dtypes(self, key_dtype, value_dtype):
# dtype checking is done in the objects in self._hash_tables
pass
def replicate_state(start_state, batch_size):
"""Create batch versions of state.
Takes a list of Tensors, adds a batch dimension, and replicates
batch_size times across that batch dimension. Used to replicate the
non-batch state returned by get_start_state in define_loss.
Args:
start_state: Model-defined state to replicate.
batch_size: Batch dimension for data.
Returns:
Replicated versions of the state.
"""
flattened_state = nest.flatten(start_state)
replicated_state = [
array_ops.tile(
array_ops.expand_dims(state_nonbatch, 0),
array_ops.concat([[batch_size], array_ops.ones(
[array_ops.rank(state_nonbatch)], dtype=dtypes.int32)], 0))
for state_nonbatch in flattened_state
]
return nest.pack_sequence_as(start_state, replicated_state)
Moments = collections.namedtuple("Moments", ["mean", "variance"])
# Currently all of these statistics are computed incrementally (i.e. are updated
# every time a new mini-batch of training data is presented) when this object is
# created in InputStatisticsFromMiniBatch.
InputStatistics = collections.namedtuple(
"InputStatistics",
["series_start_moments", # The mean and variance of each feature in a chunk
# (with a size configured in the statistics
# object) at the start of the series. A tuple of
# (mean, variance), each with shape [number of
# features], floating point. One use is in state
# space models, to keep priors calibrated even as
# earlier parts of the series are presented. If
# this object was created by
# InputStatisticsFromMiniBatch, these moments are
# computed based on the earliest chunk of data
# presented so far. However, there is a race
# condition in the update, so these may reflect
# statistics later in the series, but should
# eventually reflect statistics in a chunk at the
# series start.
"overall_feature_moments", # The mean and variance of each feature over
# the entire series. A tuple of (mean,
# variance), each with shape [number of
# features]. If this object was created by
# InputStatisticsFromMiniBatch, these moments
# are estimates based on the data seen so far.
"start_time", # The first (lowest) time in the series, a scalar
# integer. If this object was created by
# InputStatisticsFromMiniBatch, this is the lowest time seen
# so far rather than the lowest time that will ever be seen
# (guaranteed to be at least as low as the lowest time
# presented in the current minibatch).
"total_observation_count", # Count of data points, a scalar integer. If
# this object was created by
# InputStatisticsFromMiniBatch, this is an
# estimate of the total number of observations
# in the whole dataset computed based on the
# density of the series and the minimum and
# maximum times seen.
])
# TODO(allenl): It would be nice to do something with full series statistics
# when the user provides that.
class InputStatisticsFromMiniBatch(object):
"""Generate statistics from mini-batch input."""
def __init__(self, num_features, dtype, starting_variance_window_size=16):
"""Configure the input statistics object.
Args:
num_features: Number of features for the time series
dtype: The floating point data type to use.
starting_variance_window_size: The number of datapoints to use when
computing the mean and variance at the start of the series.
"""
self._starting_variance_window_size = starting_variance_window_size
self._num_features = num_features
self._dtype = dtype
def initialize_graph(self, features, update_statistics=True):
"""Create any ops needed to provide input statistics.
Should be called before statistics are requested.
Args:
features: A dictionary, the output of a `TimeSeriesInputFn` (with keys
TrainEvalFeatures.TIMES and TrainEvalFeatures.VALUES).
update_statistics: Whether `features` should be used to update adaptive
statistics. Typically True for training and false for evaluation.
Returns:
An InputStatistics object composed of Variables, which will be updated
based on mini-batches of data if requested.
"""
if (TrainEvalFeatures.TIMES in features
and TrainEvalFeatures.VALUES in features):
times = features[TrainEvalFeatures.TIMES]
values = features[TrainEvalFeatures.VALUES]
else:
# times and values may not be available, for example during prediction. We
# still need to retrieve our variables so that they can be read from, even
# if we're not going to update them.
times = None
values = None
# Create/retrieve variables representing input statistics, initialized
# without data to avoid deadlocking if variables are initialized before
# queue runners are started.
with variable_scope.variable_scope("input_statistics", use_resource=True):
statistics = self._create_variable_statistics_object()
with variable_scope.variable_scope(
"input_statistics_auxiliary", use_resource=True):
# Secondary statistics, necessary for the incremental computation of the
# primary statistics (e.g. counts and sums for computing a mean
# incrementally).
auxiliary_variables = self._AdaptiveInputAuxiliaryStatistics(
num_features=self._num_features, dtype=self._dtype)
if update_statistics and times is not None and values is not None:
# If we have times and values from mini-batch input, create update ops to
# take the new data into account.
assign_op = self._update_statistics_from_mini_batch(
statistics, auxiliary_variables, times, values)
with ops.control_dependencies([assign_op]):
stat_variables = nest.pack_sequence_as(statistics, [
array_ops.identity(tensor) for tensor in nest.flatten(statistics)
])
# Since start time updates have a race condition, ensure that the
# reported start time is at least as low as the lowest time in this
# mini-batch. The start time should converge on the correct value
# eventually even with the race condition, but for example state space
# models have an assertion which could fail without this
# post-processing.
return stat_variables._replace(start_time=gen_math_ops.minimum(
stat_variables.start_time, math_ops.reduce_min(times)))
else:
return statistics
class _AdaptiveInputAuxiliaryStatistics(collections.namedtuple(
"_AdaptiveInputAuxiliaryStatistics",
["max_time_seen", # The maximum time seen (best effort if updated from
# multiple workers; see notes about race condition
# below).
"chunk_count", # The number of chunks seen.
"inter_observation_duration_sum", # The sum across chunks of their "time
# density" (number of times per
# example).
"example_count", # The number of examples seen (each example has a
# single time associated with it and one or more
# real-valued features).
"overall_feature_sum", # The sum of values for each feature. Shape
# [number of features].
"overall_feature_sum_of_squares", # The sum of squared values for each
# feature. Shape [number of features]
])):
"""Extra statistics used to incrementally update InputStatistics."""
def __new__(cls, num_features, dtype):
return super(
InputStatisticsFromMiniBatch # pylint: disable=protected-access
._AdaptiveInputAuxiliaryStatistics,
cls).__new__(
cls,
max_time_seen=variable_scope.get_variable(
name="max_time_seen",
initializer=dtypes.int64.min,
dtype=dtypes.int64,
trainable=False),
chunk_count=variable_scope.get_variable(
name="chunk_count",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int64,
trainable=False),
inter_observation_duration_sum=variable_scope.get_variable(
name="inter_observation_duration_sum",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtype,
trainable=False),
example_count=variable_scope.get_variable(
name="example_count",
shape=[],
dtype=dtypes.int64,
trainable=False),
overall_feature_sum=variable_scope.get_variable(
name="overall_feature_sum",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
overall_feature_sum_of_squares=variable_scope.get_variable(
name="overall_feature_sum_of_squares",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False))
def _update_statistics_from_mini_batch(
self, statistics, auxiliary_variables, times, values):
"""Given mini-batch input, update `statistics` and `auxiliary_variables`."""
values = math_ops.cast(values, self._dtype)
# The density (measured in times per observation) that we see in each part
# of the mini-batch.
batch_inter_observation_duration = (math_ops.cast(
math_ops.reduce_max(times, axis=1) - math_ops.reduce_min(times, axis=1),
self._dtype) / math_ops.cast(
array_ops.shape(times)[1] - 1, self._dtype))
# Co-locate updates with their variables to minimize race conditions when
# updating statistics.
with ops.device(auxiliary_variables.max_time_seen.device):
# There is a race condition if this value is being updated from multiple
# workers. However, it should eventually reach the correct value if the
# last chunk is presented enough times.
max_time_seen_assign = state_ops.assign(
auxiliary_variables.max_time_seen,
gen_math_ops.maximum(auxiliary_variables.max_time_seen,
math_ops.reduce_max(times)))
with ops.device(auxiliary_variables.chunk_count.device):
chunk_count_assign = state_ops.assign_add(auxiliary_variables.chunk_count,
array_ops.shape(
times,
out_type=dtypes.int64)[0])
with ops.device(auxiliary_variables.inter_observation_duration_sum.device):
inter_observation_duration_assign = state_ops.assign_add(
auxiliary_variables.inter_observation_duration_sum,
math_ops.reduce_sum(batch_inter_observation_duration))
with ops.device(auxiliary_variables.example_count.device):
example_count_assign = state_ops.assign_add(
auxiliary_variables.example_count,
array_ops.size(times, out_type=dtypes.int64))
# Note: These mean/variance updates assume that all points are equally
# likely, which is not true if _chunks_ are sampled uniformly from the space
# of all possible contiguous chunks, since points at the start and end of
# the series are then members of fewer chunks. For series which are much
# longer than the chunk size (the usual/expected case), this effect becomes
# irrelevant.
with ops.device(auxiliary_variables.overall_feature_sum.device):
overall_feature_sum_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum,
math_ops.reduce_sum(values, axis=[0, 1]))
with ops.device(auxiliary_variables.overall_feature_sum_of_squares.device):
overall_feature_sum_of_squares_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum_of_squares,
math_ops.reduce_sum(values**2, axis=[0, 1]))
per_chunk_aux_updates = control_flow_ops.group(
max_time_seen_assign, chunk_count_assign,
inter_observation_duration_assign, example_count_assign,
overall_feature_sum_assign, overall_feature_sum_of_squares_assign)
with ops.control_dependencies([per_chunk_aux_updates]):
example_count_float = math_ops.cast(auxiliary_variables.example_count,
self._dtype)
new_feature_mean = (auxiliary_variables.overall_feature_sum /
example_count_float)
overall_feature_mean_update = state_ops.assign(
statistics.overall_feature_moments.mean, new_feature_mean)
overall_feature_var_update = state_ops.assign(
statistics.overall_feature_moments.variance,
# De-biased n / (n - 1) variance correction
example_count_float / (example_count_float - 1.) *
(auxiliary_variables.overall_feature_sum_of_squares /
example_count_float - new_feature_mean**2))
# TODO(b/35675805): Remove this cast
min_time_batch = math_ops.cast(math_ops.argmin(times[:, 0]), dtypes.int32)
def series_start_updates():
# If this is the lowest-time chunk that we have seen so far, update
# series start moments to reflect that. Note that these statistics are
# "best effort", as there are race conditions in the update (however,
# they should eventually converge if the start of the series is
# presented enough times).
mean, variance = nn.moments(
values[min_time_batch, :self._starting_variance_window_size],
axes=[0])
return control_flow_ops.group(
state_ops.assign(statistics.series_start_moments.mean, mean),
state_ops.assign(statistics.series_start_moments.variance,
variance))
with ops.device(statistics.start_time.device):
series_start_update = control_flow_ops.cond(
# Update moments whenever we even match the lowest time seen so far,
# to ensure that series start statistics are eventually updated to
# their correct values, despite race conditions (i.e. eventually
# statistics.start_time will reflect the global lowest time, and
# given that we will eventually update the series start moments to
# their correct values).
math_ops.less_equal(times[min_time_batch, 0],
statistics.start_time),
series_start_updates,
control_flow_ops.no_op)
with ops.control_dependencies([series_start_update]):
# There is a race condition if this update is performed in parallel on
# multiple workers. Since models may be sensitive to being presented
# with times before the putative start time, the value of this
# variable is post-processed above to guarantee that each worker is
# presented with a start time which is at least as low as the lowest
# time in its current mini-batch.
start_time_update = state_ops.assign(statistics.start_time,
gen_math_ops.minimum(
statistics.start_time,
math_ops.reduce_min(times)))
inter_observation_duration_estimate = (
auxiliary_variables.inter_observation_duration_sum / math_ops.cast(
auxiliary_variables.chunk_count, self._dtype))
# Estimate the total number of observations as:
# (end time - start time + 1) * average intra-chunk time density
total_observation_count_update = state_ops.assign(
statistics.total_observation_count,
math_ops.cast(
gen_math_ops.round(
math_ops.cast(max_time_seen_assign -
start_time_update + 1, self._dtype) /
inter_observation_duration_estimate), dtypes.int64))
per_chunk_stat_updates = control_flow_ops.group(
overall_feature_mean_update, overall_feature_var_update,
series_start_update, start_time_update,
total_observation_count_update)
return per_chunk_stat_updates
def _create_variable_statistics_object(self):
"""Creates non-trainable variables representing input statistics."""
series_start_moments = Moments(
mean=variable_scope.get_variable(
name="series_start_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="series_start_variance",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
overall_feature_moments = Moments(
mean=variable_scope.get_variable(
name="overall_feature_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="overall_feature_var",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
start_time = variable_scope.get_variable(
name="start_time",
dtype=dtypes.int64,
initializer=dtypes.int64.max,
trainable=False)
total_observation_count = variable_scope.get_variable(
name="total_observation_count",
shape=[],
dtype=dtypes.int64,
initializer=init_ops.ones_initializer(),
trainable=False)
return InputStatistics(
series_start_moments=series_start_moments,
overall_feature_moments=overall_feature_moments,
start_time=start_time,
total_observation_count=total_observation_count)
| apache-2.0 |
mixturemodel-flow/tensorflow | tensorflow/contrib/learn/python/learn/ops/losses_ops.py | 81 | 3207 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Ops for loss computation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
@deprecated('2016-12-01', 'Use `tf.contrib.losses.mean_squared_error` '
'and explicit logits computation.')
def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
"""Returns prediction and loss for mean squared error regression."""
with ops.name_scope(name, 'mean_squared_error_regressor',
[tensor_in, labels]):
predictions = nn.xw_plus_b(tensor_in, weights, biases)
if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2:
predictions = array_ops_.squeeze(predictions, squeeze_dims=[1])
return predictions, loss_ops.mean_squared_error(predictions, labels)
@deprecated('2016-12-01', 'Use `tf.contrib.losses.softmax_cross_entropy` '
'and explicit logits computation.')
def softmax_classifier(tensor_in,
labels,
weights,
biases,
class_weight=None,
name=None):
"""Returns prediction and loss for softmax classifier.
This function returns "probabilities" and a cross entropy loss. To obtain
predictions, use `tf.argmax` on the returned probabilities.
This function requires labels to be passed in one-hot encoding.
Args:
tensor_in: Input tensor, [batch_size, feature_size], features.
labels: Tensor, [batch_size, n_classes], one-hot labels of the output
classes.
weights: Tensor, [batch_size, feature_size], linear transformation
matrix.
biases: Tensor, [batch_size], biases.
class_weight: Tensor, optional, [n_classes], weight for each class.
If not given, all classes are supposed to have weight one.
name: Operation name.
Returns:
`tuple` of softmax predictions and loss `Tensor`s.
"""
with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
logits = nn.xw_plus_b(tensor_in, weights, biases)
if class_weight is not None:
logits = math_ops.multiply(logits, class_weight)
return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
| apache-2.0 |
akrherz/iem | htdocs/DCP/ahpsxml2wxc.py | 1 | 2188 | """Convert the AHPS XML into WXC format"""
import datetime
from paste.request import parse_formvars
from twisted.words.xish import domish, xpath
import requests
def do(nwsli):
"""work"""
res = ""
xml = requests.get(
(
"https://water.weather.gov/ahps2/"
"hydrograph_to_xml.php?gage=%s&output=xml"
)
% (nwsli,)
).content
elementStream = domish.elementStream()
roots = []
results = []
elementStream.DocumentStartEvent = roots.append
elementStream.ElementEvent = lambda elem: roots[0].addChild(elem)
elementStream.DocumentEndEvent = lambda: results.append(roots[0])
res += """IEM %s AHPS2WXC host=0 TimeStamp=%s
5
15 Station
6 UTCDate
4 UTCTime
7 Stage
7 CFS\n""" % (
nwsli,
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"),
)
elementStream.parse(xml)
elem = results[0]
nodes = xpath.queryForNodes("/site/forecast/datum", elem)
if nodes is None:
return res
i = 0
maxval = {"val": 0, "time": None}
for node in nodes:
utc = datetime.datetime.strptime(
str(node.valid)[:15], "%Y-%m-%dT%H:%M"
)
res += ("%12s%03i %6s %4s %7s %7s\n") % (
nwsli,
i,
utc.strftime("%b %-d"),
utc.strftime("%H%M"),
node.primary,
node.secondary,
)
if float(str(node.primary)) > maxval["val"]:
maxval["val"] = float(str(node.primary))
maxval["time"] = utc
maxval["cfs"] = float(str(node.secondary))
i += 1
if maxval["time"] is not None:
utc = maxval["time"]
res += ("%12sMAX %6s %4s %7s %7s\n") % (
nwsli,
utc.strftime("%b %-d"),
utc.strftime("%H%M"),
maxval["val"],
maxval["cfs"],
)
return res
def application(environ, start_response):
"""Do Fun Things"""
fields = parse_formvars(environ)
nwsli = fields.get("nwsli", "MROI4")[:5]
start_response("200 OK", [("Content-type", "text/plain")])
return [do(nwsli).encode("ascii")]
| mit |
koyuawsmbrtn/eclock | windows/Python27/Lib/site-packages/pygame/tests/cursors_test.py | 18 | 3025 | #################################### IMPORTS ###################################
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils \
import test_not_implemented, fixture_path, unittest
else:
from test.test_utils \
import test_not_implemented, fixture_path, unittest
import pygame
################################################################################
class CursorsModuleTest(unittest.TestCase):
def todo_test_compile(self):
# __doc__ (as of 2008-06-25) for pygame.cursors.compile:
# pygame.cursors.compile(strings, black, white,xor) -> data, mask
# compile cursor strings into cursor data
#
# This takes a set of strings with equal length and computes
# the binary data for that cursor. The string widths must be
# divisible by 8.
#
# The black and white arguments are single letter strings that
# tells which characters will represent black pixels, and which
# characters represent white pixels. All other characters are
# considered clear.
#
# This returns a tuple containing the cursor data and cursor mask
# data. Both these arguments are used when setting a cursor with
# pygame.mouse.set_cursor().
self.fail()
def test_load_xbm(self):
# __doc__ (as of 2008-06-25) for pygame.cursors.load_xbm:
# pygame.cursors.load_xbm(cursorfile, maskfile) -> cursor_args
# reads a pair of XBM files into set_cursor arguments
#
# Arguments can either be filenames or filelike objects
# with the readlines method. Not largely tested, but
# should work with typical XBM files.
# Test that load_xbm will take filenames as arguments
cursorfile = fixture_path(r"xbm_cursors/white_sizing.xbm")
maskfile = fixture_path(r"xbm_cursors/white_sizing_mask.xbm")
cursor = pygame.cursors.load_xbm(cursorfile, maskfile)
# Test that load_xbm will take file objects as arguments
cursorfile, maskfile = [open(pth) for pth in (cursorfile, maskfile)]
cursor = pygame.cursors.load_xbm(cursorfile, maskfile)
# Is it in a format that mouse.set_cursor won't blow up on?
pygame.display.init()
pygame.mouse.set_cursor(*cursor)
pygame.display.quit()
################################################################################
if __name__ == '__main__':
unittest.main()
################################################################################
| gpl-2.0 |
soaplib/soaplib | setup.py | 1 | 2916 | #!/usr/bin/env python
from unittest import TestLoader
from pkg_resources import resource_exists
from pkg_resources import resource_listdir
from setuptools import setup, find_packages
VERSION = '2.0.0'
LONG_DESC = """\
This is a simple, easily extendible soap library that provides several useful
tools for creating and publishing soap web services in python. This package
features on-demand wsdl generation for the published services, a
wsgi-compliant web application, support for complex class structures, binary
attachments, and a simple framework for creating additional serialization
mechanisms.
This project uses lxml as it's XML API, providing full namespace support.
"""
SHORT_DESC="A transport and architecture agnostic soap (de)serialization " \
"library that focuses on making small, rpc-like messaging work."
class NoInteropLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Load unit test (skip 'interop' package).
Hacked from the version in 'setuptools.command.test.ScanningLoader'.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self,module))
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file == 'interop':
# These tests require installing a bunch of extra
# code: see 'src/soaplib/test/README'.
continue
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(
module.__name__, file + '/__init__.py'
):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
return self.suiteClass(tests)
setup(
name='soaplib',
packages=find_packages('src'),
package_dir={'':'src'},
version=VERSION,
description=SHORT_DESC,
long_description=LONG_DESC,
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
'Natural Language :: English',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords=('soap', 'wsdl', 'wsgi'),
author='Soaplib Contributors',
author_email='soap@python.org',
maintainer = 'Burak Arslan',
maintainer_email = 'burak@arskom.com.tr',
url='http://soaplib.github.com/soaplib/2_0/',
license='LGPL',
zip_safe=False,
install_requires=[
'setuptools',
'pytz',
'lxml>=2.2.1',
],
test_suite='soaplib.core.test',
test_loader='__main__:NoInteropLoader',
namespace_packages=["soaplib"]
)
| lgpl-2.1 |
wangsai/oppia | core/domain/fs_domain.py | 30 | 10570 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects representing a file system and a file stream."""
__author__ = 'Sean Lip'
import logging
import os
from core.platform import models
(file_models,) = models.Registry.import_models([
models.NAMES.file
])
import feconf
import utils
CHANGE_LIST_SAVE = [{'cmd': 'save'}]
class FileMetadata(object):
"""A class representing the metadata of a file."""
def __init__(self, metadata):
self._size = metadata.size if (metadata is not None) else None
@property
def size(self):
return self._size
class FileStreamWithMetadata(object):
"""A class that wraps a file stream, but adds extra attributes to it."""
def __init__(self, content, version, metadata):
"""The args are a file content blob and a metadata model object."""
self._content = content
self._version = version
self._metadata = FileMetadata(metadata)
def read(self):
"""Emulates stream.read(). Returns all bytes and emulates EOF."""
content = self._content
self._content = ''
return content
@property
def metadata(self):
return self._metadata
@property
def version(self):
return self._version
class ExplorationFileSystem(object):
"""A datastore-backed read-write file system for a single exploration.
The conceptual intention is for each exploration to have its own asset
folder. An asset has no meaning outside its exploration, so the assets in
these asset folders should therefore not be edited directly. They should
only be modified as side-effects of some other operation (such as adding an
image to an exploration).
The content of an exploration should include a reference to the asset
together with the version number of the asset. This allows the
exploration to refer to asset versions.
In general, assets should be retrieved only within the context of the
exploration that contains them, and should not be retrieved outside this
context.
"""
_DEFAULT_VERSION_NUMBER = 1
def __init__(self, exploration_id):
self._exploration_id = exploration_id
@property
def exploration_id(self):
return self._exploration_id
def _get_file_metadata(self, filepath, version):
"""Return the desired file metadata.
Returns None if the file does not exist.
"""
if version is None:
return file_models.FileMetadataModel.get_model(
self._exploration_id, 'assets/%s' % filepath)
else:
return file_models.FileMetadataModel.get_version(
self._exploration_id, 'assets/%s' % filepath, version)
def _get_file_data(self, filepath, version):
"""Return the desired file content.
Returns None if the file does not exist.
"""
if version is None:
return file_models.FileModel.get_model(
self._exploration_id, 'assets/%s' % filepath)
else:
return file_models.FileModel.get_version(
self._exploration_id, 'assets/%s' % filepath, version)
def _save_file(self, user_id, filepath, raw_bytes):
"""Create or update a file."""
if len(raw_bytes) > feconf.MAX_FILE_SIZE_BYTES:
raise Exception('The maximum allowed file size is 1 MB.')
metadata = self._get_file_metadata(filepath, None)
if not metadata:
metadata = file_models.FileMetadataModel.create(
self._exploration_id, 'assets/%s' % filepath)
metadata.size = len(raw_bytes)
data = self._get_file_data(filepath, None)
if not data:
data = file_models.FileModel.create(
self._exploration_id, 'assets/%s' % filepath)
data.content = raw_bytes
data.commit(user_id, CHANGE_LIST_SAVE)
metadata.commit(user_id, CHANGE_LIST_SAVE)
def get(self, filepath, version=None, mode=None):
"""Gets a file as an unencoded stream of raw bytes.
If `version` is not supplied, the latest version is retrieved. If the
file does not exist, None is returned.
The 'mode' argument is unused. It is included so that this method
signature matches that of other file systems.
"""
metadata = self._get_file_metadata(filepath, version)
if metadata:
data = self._get_file_data(filepath, version)
if data:
if version is None:
version = data.version
return FileStreamWithMetadata(data.content, version, metadata)
else:
logging.error(
'Metadata and data for file %s (version %s) are out of '
'sync.' % (filepath, version))
return None
else:
return None
def commit(self, user_id, filepath, raw_bytes):
"""Saves a raw bytestring as a file in the database."""
self._save_file(user_id, filepath, raw_bytes)
def delete(self, user_id, filepath):
"""Marks the current version of a file as deleted."""
metadata = self._get_file_metadata(filepath, None)
if metadata:
metadata.delete(user_id, '')
data = self._get_file_data(filepath, None)
if data:
data.delete(user_id, '')
def isfile(self, filepath):
"""Checks the existence of a file."""
metadata = self._get_file_metadata(filepath, None)
return bool(metadata)
def listdir(self, dir_name):
"""Lists all files in a directory.
Args:
dir_name: The directory whose files should be listed. This should
not start with '/' or end with '/'.
Returns:
List of str. This is a lexicographically-sorted list of filenames,
each of which is prefixed with dir_name.
"""
# The trailing slash is necessary to prevent non-identical directory
# names with the same prefix from matching, e.g. /abcd/123.png should
# not match a query for files under /abc/.
prefix = '%s' % utils.vfs_construct_path(
'/', self._exploration_id, 'assets', dir_name)
if not prefix.endswith('/'):
prefix += '/'
result = set()
metadata_models = file_models.FileMetadataModel.get_undeleted()
for metadata_model in metadata_models:
filepath = metadata_model.id
if filepath.startswith(prefix):
result.add('/'.join(filepath.split('/')[3:]))
return sorted(list(result))
class DiskBackedFileSystem(object):
"""Implementation for a disk-backed file system.
This implementation ignores versioning and is used only by tests.
"""
def __init__(self, root):
"""Constructor for this class.
Args:
root: the path to append to the oppia/ directory.
"""
self._root = os.path.join(os.getcwd(), root)
self._exploration_id = 'test'
@property
def exploration_id(self):
return self._exploration_id
def isfile(self, filepath):
"""Checks if a file exists."""
return os.path.isfile(os.path.join(self._root, filepath))
def get(self, filepath, version=None, mode='r'):
"""Returns a bytestring with the file content, but no metadata."""
content = utils.get_file_contents(
os.path.join(self._root, filepath), raw_bytes=True, mode=mode)
return FileStreamWithMetadata(content, None, None)
def commit(self, user_id, filepath, raw_bytes):
raise NotImplementedError
def delete(self, user_id, filepath):
raise NotImplementedError
def listdir(self, dir_name):
raise NotImplementedError
class AbstractFileSystem(object):
"""Interface for a file system."""
def __init__(self, impl):
self._impl = impl
@property
def impl(self):
return self._impl
def _check_filepath(self, filepath):
"""Raises an error if a filepath is invalid."""
base_dir = utils.vfs_construct_path(
'/', self.impl.exploration_id, 'assets')
absolute_path = utils.vfs_construct_path(base_dir, filepath)
normalized_path = utils.vfs_normpath(absolute_path)
# This check prevents directory traversal.
if not normalized_path.startswith(base_dir):
raise IOError('Invalid filepath: %s' % filepath)
def isfile(self, filepath):
"""Checks if a file exists. Similar to os.path.isfile(...)."""
self._check_filepath(filepath)
return self._impl.isfile(filepath)
def open(self, filepath, version=None, mode='r'):
"""Returns a stream with the file content. Similar to open(...)."""
self._check_filepath(filepath)
return self._impl.get(filepath, version=version, mode=mode)
def get(self, filepath, version=None, mode='r'):
"""Returns a bytestring with the file content, but no metadata."""
file_stream = self.open(filepath, version=version, mode=mode)
if file_stream is None:
raise IOError(
'File %s (version %s) not found.'
% (filepath, version if version else 'latest'))
return file_stream.read()
def commit(self, user_id, filepath, raw_bytes):
"""Replaces the contents of the file with the given bytestring."""
raw_bytes = str(raw_bytes)
self._check_filepath(filepath)
self._impl.commit(user_id, filepath, raw_bytes)
def delete(self, user_id, filepath):
"""Deletes a file and the metadata associated with it."""
self._check_filepath(filepath)
self._impl.delete(user_id, filepath)
def listdir(self, dir_name):
"""Lists all the files in a directory. Similar to os.listdir(...)."""
self._check_filepath(dir_name)
return self._impl.listdir(dir_name)
| apache-2.0 |
tictail/claw | claw/utils.py | 1 | 2061 | # -*- coding: utf-8 -*-
import logging
from random import shuffle
from claw.constants import RE_DELIMITER
log = logging.getLogger(__name__)
def safe_format(format_string, *args, **kwargs):
"""
Helper: formats string with any combination of bytestrings/unicode
strings without raising exceptions
"""
try:
if not args and not kwargs:
return format_string
else:
return format_string.format(*args, **kwargs)
# catch encoding errors and transform everything into utf-8 string
# before logging:
except (UnicodeEncodeError, UnicodeDecodeError):
format_string = to_utf8(format_string)
args = [to_utf8(p) for p in args]
kwargs = {k: to_utf8(v) for k, v in kwargs.iteritems()}
return format_string.format(*args, **kwargs)
# ignore other errors
except:
return u''
def to_unicode(str_or_unicode, precise=False):
"""
Safely returns a unicode version of a given string
>>> utils.to_unicode('привет')
u'привет'
>>> utils.to_unicode(u'привет')
u'привет'
If `precise` flag is True, tries to guess the correct encoding first.
"""
encoding = detect_encoding(str_or_unicode) if precise else 'utf-8'
if isinstance(str_or_unicode, str):
return unicode(str_or_unicode, encoding, 'replace')
return str_or_unicode
def to_utf8(str_or_unicode):
"""
Safely returns a UTF-8 version of a given string
>>> utils.to_utf8(u'hi')
'hi'
"""
if isinstance(str_or_unicode, unicode):
return str_or_unicode.encode("utf-8", "ignore")
return str(str_or_unicode)
def random_token(length=7):
vals = ("a b c d e f g h i j k l m n o p q r s t u v w x y z "
"0 1 2 3 4 5 6 7 8 9").split(' ')
shuffle(vals)
return ''.join(vals[:length])
def get_delimiter(msg_body):
delimiter = RE_DELIMITER.search(msg_body)
if delimiter:
delimiter = delimiter.group()
else:
delimiter = '\n'
return delimiter
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.