repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
valtech-mooc/edx-platform | common/lib/xmodule/xmodule/tests/test_textannotation.py | 92 | 2829 | # -*- coding: utf-8 -*-
"Test for Annotation Xmodule functional logic."
import unittest
from mock import Mock
from lxml import etree
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.textannotation_module import TextAnnotationModule
from . import get_test_system
class TextAnnotationModuleTestCase(unittest.TestCase):
''' text Annotation Module Test Case '''
sample_xml = '''
<annotatable>
<instructions><p>Test Instructions.</p></instructions>
<p>
One Fish. Two Fish.
Red Fish. Blue Fish.
Oh the places you'll go!
</p>
</annotatable>
'''
def setUp(self):
"""
Makes sure that the Module is declared and mocked with the sample xml above.
"""
super(TextAnnotationModuleTestCase, self).setUp()
# return anything except None to test LMS
def test_real_user(useless):
useless_user = Mock(email='fake@fake.com', id=useless)
return useless_user
# test to make sure that role is checked in LMS
def test_user_role():
return 'staff'
self.system = get_test_system()
self.system.get_real_user = test_real_user
self.system.get_user_role = test_user_role
self.system.anonymous_student_id = None
self.mod = TextAnnotationModule(
Mock(),
self.system,
DictFieldData({'data': self.sample_xml}),
ScopeIds(None, None, None, None)
)
def test_extract_instructions(self):
"""
Tests to make sure that the instructions are correctly pulled from the sample xml above.
It also makes sure that if no instructions exist, that it does in fact return nothing.
"""
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div><p>Test Instructions.</p></div>"
actual_xml = self.mod._extract_instructions(xmltree) # pylint: disable=protected-access
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = self.mod._extract_instructions(xmltree) # pylint: disable=protected-access
self.assertIsNone(actual)
def test_student_view(self):
"""
Tests the function that passes in all the information in the context that will be used in templates/textannotation.html
"""
context = self.mod.student_view({}).content
for key in ['display_name', 'tag', 'source', 'instructions_html', 'content_html', 'annotation_storage', 'token', 'diacritic_marks', 'default_tab', 'annotation_mode', 'is_course_staff']:
self.assertIn(key, context)
| agpl-3.0 |
andrewleech/SickRage | lib/sqlalchemy/pool.py | 75 | 43619 | # sqlalchemy/pool.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import time
import traceback
import weakref
from . import exc, log, event, interfaces, util
from .util import queue as sqla_queue
from .util import threading, memoized_property, \
chop_traceback
from collections import deque
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
reset_rollback = util.symbol('reset_rollback')
reset_commit = util.symbol('reset_commit')
reset_none = util.symbol('reset_none')
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`.Pool` is combined with an :class:`.Engine`,
the :class:`.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
_dispatch=None,
_dialect=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`.Pool.unique_connection` method is provided to return
a consistenty unique connection to bypass this behavior
when the flag is set.
.. warning:: The :paramref:`.Pool.use_threadlocal` flag
**does not affect the behavior** of :meth:`.Engine.connect`.
:meth:`.Engine.connect` makes use of the :meth:`.Pool.unique_connection`
method which **does not use thread local context**.
To produce a :class:`.Connection` which refers to the
:meth:`.Pool.connect` method, use
:meth:`.Engine.contextual_connect`.
Note that other SQLAlchemy connectivity systems such as
:meth:`.Engine.execute` as well as the orm
:class:`.Session` make use of
:meth:`.Engine.contextual_connect` internally, so these functions
are compatible with the :paramref:`.Pool.use_threadlocal` setting.
.. seealso::
:ref:`threadlocal_strategy` - contains detail on the
"threadlocal" engine strategy, which provides a more comprehensive
approach to "threadlocal" connectivity for the specific
use case of using :class:`.Engine` and :class:`.Connection` objects
directly.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should only be made on a database
that has no transaction support at all,
namely MySQL MyISAM. By not doing anything,
performance can be improved. This
setting should **never be selected** for a
database that supports transactions,
as it will lead to deadlocks and stale
state.
* ``False`` - same as None, this is here for
backwards compatibility.
.. versionchanged:: 0.7.6
:paramref:`.Pool.reset_on_return` accepts ``"rollback"``
and ``"commit"`` arguments.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`.create_engine` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
if reset_on_return in ('rollback', True, reset_rollback):
self._reset_on_return = reset_rollback
elif reset_on_return in (None, False, reset_none):
self._reset_on_return = reset_none
elif reset_on_return in ('commit', reset_commit):
self._reset_on_return = reset_commit
else:
raise exc.ArgumentError(
"Invalid value for 'reset_on_return': %r"
% reset_on_return)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if _dialect:
self._dialect = _dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except (SystemExit, KeyboardInterrupt):
raise
except:
self.logger.error("Exception closing connection %r",
connection, exc_info=True)
@util.deprecated(
2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`.Pool.connect` when the
:paramref:`.Pool.use_threadlocal` flag is not set to True.
When :paramref:`.Pool.use_threadlocal` is True, the :meth:`.Pool.unique_connection`
method provides a means of bypassing the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if getattr(connection, 'is_valid', False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunection with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
See also the :meth:`Pool.recreate` method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the :class:`.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`.PoolEvents.connect` and
:meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
self.finalize_callback = deque()
pool.dispatch.first_connect.\
for_modify(pool.dispatch).\
exec_once(self.connection, self)
pool.dispatch.connect(self.connection, self)
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`.Connection.info` accessors.
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except:
rec.checkin()
raise
fairy = _ConnectionFairy(dbapi_connection, rec)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and \
_finalize_fairy(
dbapi_connection,
rec, pool, ref, pool._echo)
)
_refs.add(rec)
if pool._echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
def checkin(self):
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or :meth:`.Connection.invalidate`
methods are called, as well as when any so-called "automatic invalidation"
condition occurs.
.. seealso::
:ref:`pool_connection_invalidation`
"""
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"Invalidate connection %r (reason: %s:%s)",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"Invalidate connection %r", self.connection)
self.__close()
self.connection = None
def get_connection(self):
recycle = False
if self.connection is None:
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; recycling",
self.connection
)
recycle = True
if recycle:
self.__close()
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
return self.connection
def __close(self):
self.__pool._close_connection(self.connection)
def __connect(self):
try:
self.starttime = time.time()
connection = self.__pool._creator()
self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception as e:
self.__pool.logger.debug("Error on connect(): %s", e)
raise
def _finalize_fairy(connection, connection_record, pool, ref, echo, fairy=None):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy_ref is not ref:
return
if connection is not None:
if connection_record and echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
try:
fairy = fairy or _ConnectionFairy(connection, connection_record)
assert fairy.connection is connection
fairy._reset(pool, echo)
# Immediately close detached instances
if not connection_record:
pool._close_connection(connection)
except Exception as e:
if connection_record:
connection_record.invalidate(e=e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
if connection_record:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`.Pool`.
The name "fairy" is inspired by the fact that the :class:`._ConnectionFairy`
object's lifespan is transitory, as it lasts only for the length of a
specific DBAPI connection being checked out from the pool, and additionally
that as a transparent proxy, it is mostly invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record):
self.connection = dbapi_connection
self._connection_record = connection_record
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and do_commit()
methods.
In practice, a :class:`.Connection` assigns a :class:`.Transaction` object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
fairy._echo = pool._should_log_debug()
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if not pool.dispatch.checkout or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
pool.dispatch.checkout(fairy.connection,
fairy._connection_record,
fairy)
return fairy
except exc.DisconnectionError as e:
pool.logger.info(
"Disconnection detected on checkout: %s", e)
fairy._connection_record.invalidate(e)
fairy.connection = fairy._connection_record.get_connection()
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo, fairy=self)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool, echo):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info`
accessors.
"""
return self._connection_record.info
def invalidate(self, e=None):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e)
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
_refs.remove(self._connection_record)
self._connection_record.fairy_ref = None
self._connection_record.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param \**kw: Other keyword arguments including :paramref:`.Pool.recycle`,
:paramref:`.Pool.echo`, :paramref:`.Pool.reset_on_return` and others
are passed to the :class:`.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._inc_overflow():
try:
return self._create_connection()
except:
self._dec_overflow()
raise
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
.. versionchanged:: 0.7
:class:`.NullPool` is used by the SQlite dialect automatically
when a file-based database is used. See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
.. versionchanged:: 0.7
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop('sa_pool_key', None)
pool = self.poolclass(lambda:
self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw['sa_pool_key']
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
| gpl-3.0 |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/pip/commands/search.py | 27 | 4605 | from __future__ import absolute_import
import logging
import sys
import textwrap
from pip.basecommand import Command, SUCCESS
from pip.download import PipXmlrpcTransport
from pip.models import PyPI
from pip.utils import get_terminal_size
from pip.utils.logging import indent_log
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor import pkg_resources
from pip._vendor.six.moves import xmlrpc_client
logger = logging.getLogger(__name__)
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default=PyPI.pypi_url,
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
pypi_hits = self.search(query, options)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, options):
index_url = options.index
with self._build_session(options) as session:
transport = PipXmlrpcTransport(index_url, session)
pypi = xmlrpc_client.ServerProxy(index_url, transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
'score': score,
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a
# list sorted by score
package_list = sorted(
packages.values(),
key=lambda x: x['score'],
reverse=True,
)
return package_list
def print_results(hits, name_column_width=None, terminal_width=None):
if not hits:
return
if name_column_width is None:
name_column_width = max((len(hit['name']) for hit in hits)) + 4
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(
summary,
terminal_width - name_column_width - 5,
)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.info(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
with indent_log():
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.info('INSTALLED: %s (latest)', dist.version)
else:
logger.info('INSTALLED: %s', dist.version)
logger.info('LATEST: %s', latest)
except UnicodeEncodeError:
pass
def highest_version(versions):
return next(iter(
sorted(versions, key=pkg_resources.parse_version, reverse=True)
))
| mit |
JoeGlancy/linux | scripts/analyze_suspend.py | 1537 | 120394 | #!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
from datetime import datetime
import struct
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
version = 3.0
verbose = False
testdir = '.'
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
'device_pm_callback_start'
]
modename = {
'freeze': 'Suspend-To-Idle (S0)',
'standby': 'Power-On Suspend (S1)',
'mem': 'Suspend-to-RAM (S3)',
'disk': 'Suspend-to-disk (S4)'
}
mempath = '/dev/mem'
powerfile = '/sys/power/state'
suspendmode = 'mem'
hostname = 'localhost'
prefix = 'test'
teststamp = ''
dmesgfile = ''
ftracefile = ''
htmlfile = ''
rtcwake = False
rtcwaketime = 10
rtcpath = ''
android = False
adb = 'adb'
devicefilter = []
stamp = 0
execcount = 1
x2delay = 0
usecallgraph = False
usetraceevents = False
usetraceeventsonly = False
notestrun = False
altdevname = dict()
postresumetime = 0
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
postresumefmt = '# post resume time (?P<t>[0-9]*)$'
stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
def __init__(self):
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
def setOutputFile(self):
if((self.htmlfile == '') and (self.dmesgfile != '')):
m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if((self.htmlfile == '') and (self.ftracefile != '')):
m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
if(self.htmlfile == ''):
self.htmlfile = 'output.html'
def initTestOutput(self, subdir):
if(not self.android):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = string.split(v)[2]
else:
self.prefix = 'android'
v = os.popen(self.adb+' shell cat /proc/version').read().strip()
kver = string.split(v)[2]
testtime = datetime.now().strftime('suspend-%m%d%y-%H%M%S')
if(subdir != "."):
self.testdir = subdir+"/"+testtime
else:
self.testdir = testtime
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
os.mkdir(self.testdir)
def setDeviceFilter(self, devnames):
self.devicefilter = string.split(devnames)
def rtcWakeAlarm(self):
os.system('echo 0 > '+self.rtcpath+'/wakealarm')
outD = open(self.rtcpath+'/date', 'r').read().strip()
outT = open(self.rtcpath+'/time', 'r').read().strip()
mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT)
if(mD and mT):
# get the current time from hardware
utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds())
dt = datetime(\
int(mD.group('y')), int(mD.group('m')), int(mD.group('d')),
int(mT.group('h')), int(mT.group('m')), int(mT.group('s')))
nowtime = int(dt.strftime('%s')) + utcoffset
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
os.system('echo %d > %s/wakealarm' % (alarm, self.rtcpath))
sysvals = SystemValues()
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
name = ''
children = 0
depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# root structure, started as dmesg & ftrace, but now only ftrace
# contents: times for suspend start/end, resume start/end, fwdata
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes intradev trace events
# }
# }
# }
# }
#
class Data:
dmesg = {} # root data structure
phases = [] # ordered list of phases
start = 0.0 # test start
end = 0.0 # test end
tSuspended = 0.0 # low-level suspend start
tResumed = 0.0 # low-level resume start
tLow = 0.0 # time spent in low-level suspend (standby/freeze)
fwValid = False # is firmware data available
fwSuspend = 0 # time spent in firmware suspend
fwResume = 0 # time spent in firmware resume
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
stamp = 0
outfile = ''
def __init__(self, num):
idchar = 'abcdefghijklmnopqrstuvwxyz'
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = []
self.phases = []
self.dmesg = { # fixed list of 10 phases
'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#CCFFCC', 'order': 0},
'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#88FF88', 'order': 1},
'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#00AA00', 'order': 2},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#008888', 'order': 3},
'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#0000FF', 'order': 4},
'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF0000', 'order': 5},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF9900', 'order': 6},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFCC00', 'order': 7},
'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFF88', 'order': 8},
'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFFCC', 'order': 9}
}
self.phases = self.sortedPhases()
def getStart(self):
return self.dmesg[self.phases[0]]['start']
def setStart(self, time):
self.start = time
self.dmesg[self.phases[0]]['start'] = time
def getEnd(self):
return self.dmesg[self.phases[-1]]['end']
def setEnd(self, time):
self.end = time
self.dmesg[self.phases[-1]]['end'] = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
return False
return True
def addIntraDevTraceEvent(self, action, name, pid, time):
if(action == 'mutex_lock_try'):
color = 'red'
elif(action == 'mutex_lock_pass'):
color = 'green'
elif(action == 'mutex_unlock'):
color = 'blue'
else:
# create separate colors based on the name
v1 = len(name)*10 % 256
v2 = string.count(name, 'e')*100 % 256
v3 = ord(name[0])*20 % 256
color = '#%06X' % ((v1*0x10000) + (v2*0x100) + v3)
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
e = TraceEvent(action, name, color, time)
if('traceevents' not in d):
d['traceevents'] = []
d['traceevents'].append(e)
return d
break
return 0
def capIntraDevTraceEvent(self, action, name, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
if('traceevents' not in d):
return
for e in d['traceevents']:
if(e.action == action and
e.name == name and not e.ready):
e.length = time - e.time
e.ready = True
break
return
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.phases:
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('traceevents' in d):
for e in d['traceevents']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
def normalizeTime(self, tZero):
# first trim out any standby or freeze clock time
if(self.tSuspended != self.tResumed):
if(self.tResumed > tZero):
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, True)
else:
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, False)
# shift the timeline so that tZero is the new 0
self.tSuspended -= tZero
self.tResumed -= tZero
self.start -= tZero
self.end -= tZero
for phase in self.phases:
p = self.dmesg[phase]
p['start'] -= tZero
p['end'] -= tZero
list = p['list']
for name in list:
d = list[name]
d['start'] -= tZero
d['end'] -= tZero
if('ftrace' in d):
cg = d['ftrace']
cg.start -= tZero
cg.end -= tZero
for line in cg.list:
line.time -= tZero
if('traceevents' in d):
for e in d['traceevents']:
e.time -= tZero
def newPhaseWithSingleAction(self, phasename, devname, start, end, color):
for phase in self.phases:
self.dmesg[phase]['order'] += 1
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = dict()
list[devname] = \
{'start': start, 'end': end, 'pid': 0, 'par': '',
'length': (end-start), 'row': 0, 'id': devid, 'drv': '' };
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': 0}
self.phases = self.sortedPhases()
def newPhase(self, phasename, start, end, color, order):
if(order < 0):
order = len(self.phases)
for phase in self.phases[order:]:
self.dmesg[phase]['order'] += 1
if(order > 0):
p = self.phases[order-1]
self.dmesg[p]['end'] = start
if(order < len(self.phases)):
p = self.phases[order]
self.dmesg[p]['start'] = end
list = dict()
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': order}
self.phases = self.sortedPhases()
def setPhase(self, phase, ktime, isbegin):
if(isbegin):
self.dmesg[phase]['start'] = ktime
else:
self.dmesg[phase]['end'] = ktime
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase, end):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
dev['end'] = end
vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
# remove all by the relatives of the filter devnames
filter = []
for phase in self.phases:
list = self.dmesg[phase]['list']
for name in devicefilter:
dev = name
while(dev in list):
if(dev not in filter):
filter.append(dev)
dev = list[dev]['par']
children = self.deviceDescendants(name, phase)
for dev in children:
if(dev not in filter):
filter.append(dev)
for phase in self.phases:
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
pid = list[name]['pid']
if(name not in filter and pid >= 0):
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase, self.getEnd())
def newActionGlobal(self, name, start, end):
# which phase is this device callback or action "in"
targetphase = "none"
overlap = 0.0
for phase in self.phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
o = max(0, min(end, pend) - max(start, pstart))
if(o > overlap):
targetphase = phase
overlap = o
if targetphase in self.phases:
self.newAction(targetphase, name, -1, '', start, end, '')
return True
return False
def newAction(self, phase, name, pid, parent, start, end, drv):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent,
'length': length, 'row': 0, 'id': devid, 'drv': drv }
def deviceIDs(self, devlist, phase):
idlist = []
list = self.dmesg[phase]['list']
for devname in list:
if devname in devlist:
idlist.append(list[devname]['id'])
return idlist
def deviceParentID(self, devname, phase):
pdev = ''
pdevid = ''
list = self.dmesg[phase]['list']
if devname in list:
pdev = list[devname]['par']
if pdev in list:
return list[pdev]['id']
return pdev
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def deviceDescendants(self, devname, phase):
children = self.deviceChildren(devname, phase)
family = children
for child in children:
family += self.deviceDescendants(child, phase)
return family
def deviceChildrenIDs(self, devname, phase):
devlist = self.deviceChildren(devname, phase)
return self.deviceIDs(devlist, phase)
def printDetails(self):
vprint(' test start: %f' % self.start)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
vprint(' %16s: %f - %f (%d devices)' % (phase, \
self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
vprint(' test end: %f' % self.end)
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
clist = self.deviceChildren(cname, 'resume')
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.phases:
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
pdev = list[dev]['par']
if(re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
# Class: TraceEvent
# Description:
# A container for trace event data found in the ftrace file
class TraceEvent:
ready = False
name = ''
time = 0.0
color = '#FFFFFF'
length = 0.0
action = ''
def __init__(self, a, n, c, t):
self.action = a
self.name = n
self.color = c
self.time = t
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
depth = 0
name = ''
type = ''
def __init__(self, t, m, d):
self.time = float(t)
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n')
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
def debugPrint(self, dev):
if(self.freturn and self.fcall):
print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
elif(self.freturn):
print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
else:
print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
def __init__(self):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
def setDepth(self, line):
if(line.fcall and not line.freturn):
line.depth = self.depth
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
line.depth = self.depth
else:
line.depth = self.depth
def addLine(self, line, match):
if(not self.invalid):
self.setDepth(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
self.list.append(line)
return True
if(self.invalid):
return False
if(len(self.list) >= 1000000 or self.depth < 0):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
if(not match):
return False
id = 'task %s cpu %s' % (match.group('pid'), match.group('cpu'))
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
print('Too much data for '+id+\
' (buffer overflow), ignoring this callback')
else:
print('Too much data for '+id+\
' '+window+', ignoring this callback')
return False
self.list.append(line)
if(self.start < 0):
self.start = line.time
return False
def slice(self, t0, tN):
minicg = FTraceCallGraph()
count = -1
firstdepth = 0
for l in self.list:
if(l.time < t0 or l.time > tN):
continue
if(count < 0):
if(not l.fcall or l.name == 'dev_driver_string'):
continue
firstdepth = l.depth
count = 0
l.depth -= firstdepth
minicg.addLine(l, 0)
if((count == 0 and l.freturn and l.fcall) or
(count > 0 and l.depth <= 0)):
break
count += 1
return minicg
def sanityCheck(self):
stack = dict()
cnt = 0
for l in self.list:
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(l.depth not in stack):
return False
stack[l.depth].length = l.length
stack[l.depth] = 0
l.length = 0
cnt -= 1
if(cnt == 0):
return True
return False
def debugPrint(self, filename):
if(filename == 'stdout'):
print('[%f - %f]') % (self.start, self.end)
for l in self.list:
if(l.freturn and l.fcall):
print('%f (%02d): %s(); (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
print('%f (%02d): %s} (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
print('%f (%02d): %s() { (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
print(' ')
else:
fp = open(filename, 'w')
print(filename)
for l in self.list:
if(l.freturn and l.fcall):
fp.write('%f (%02d): %s(); (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
fp.write('%f (%02d): %s} (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
fp.write('%f (%02d): %s() { (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
fp.close()
# Class: Timeline
# Description:
# A container for a suspend/resume html timeline. In older versions
# of the script there were multiple timelines, but in the latest
# there is only one.
class Timeline:
html = {}
scaleH = 0.0 # height of the row as a percent of the timeline height
rowH = 0.0 # height of each row in percent of the timeline height
row_height_pixels = 30
maxrows = 0
height = 0
def __init__(self):
self.html = {
'timeline': '',
'legend': '',
'scale': ''
}
def setRows(self, rows):
self.maxrows = int(rows)
self.scaleH = 100.0/float(self.maxrows)
self.height = self.maxrows*self.row_height_pixels
r = float(self.maxrows - 1)
if(r < 1.0):
r = 1.0
self.rowH = (100.0 - self.scaleH)/r
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
ftrace_line_fmt = ftrace_line_fmt_nop
cgformat = False
ftemp = dict()
ttemp = dict()
inthepipe = False
tracertype = ''
data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
def isReady(self):
if(tracertype == '' or not data):
return False
return True
def setTracerType(self, tracer):
self.tracertype = tracer
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer, False)
# ----------------- FUNCTIONS --------------------
# Function: vprint
# Description:
# verbose print (prints only with -verbose option)
# Arguments:
# msg: the debug/log message to print
def vprint(msg):
global sysvals
if(sysvals.verbose):
print(msg)
# Function: initFtrace
# Description:
# Configure ftrace to use trace events and/or a callgraph
def initFtrace():
global sysvals
tp = sysvals.tpath
cf = 'dpm_run_callback'
if(sysvals.usetraceeventsonly):
cf = '-e dpm_prepare -e dpm_complete -e dpm_run_callback'
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system('echo 0 > '+tp+'tracing_on')
# set the trace clock to global
os.system('echo global > '+tp+'trace_clock')
# set trace buffer to a huge value
os.system('echo nop > '+tp+'current_tracer')
os.system('echo 100000 > '+tp+'buffer_size_kb')
# initialize the callgraph trace, unless this is an x2 run
if(sysvals.usecallgraph and sysvals.execcount == 1):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | grep '+\
cf+' > '+tp+'set_graph_function')
if(sysvals.usetraceevents):
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system('echo 1 > '+sysvals.epath+e+'/enable')
# clear the trace buffer
os.system('echo "" > '+tp+'trace')
# Function: initFtraceAndroid
# Description:
# Configure ftrace to capture trace events
def initFtraceAndroid():
global sysvals
tp = sysvals.tpath
if(sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
# set the trace clock to global
os.system(sysvals.adb+" shell 'echo global > "+tp+"trace_clock'")
# set trace buffer to a huge value
os.system(sysvals.adb+" shell 'echo nop > "+tp+"current_tracer'")
os.system(sysvals.adb+" shell 'echo 10000 > "+tp+"buffer_size_kb'")
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system(sysvals.adb+" shell 'echo 1 > "+\
sysvals.epath+e+"/enable'")
# clear the trace buffer
os.system(sysvals.adb+" shell 'echo \"\" > "+tp+"trace'")
# Function: verifyFtrace
# Description:
# Check that ftrace is working on the system
# Output:
# True or False
def verifyFtrace():
global sysvals
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = sysvals.tpath
if(sysvals.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+tp+f).read().strip()
if(out != tp+f):
return False
else:
if(os.path.exists(tp+f) == False):
return False
return True
# Function: parseStamp
# Description:
# Pull in the stamp comment line from the data file(s),
# create the stamp, and add it to the global sysvals object
# Arguments:
# m: the valid re.match output for the stamp line
def parseStamp(m, data):
global sysvals
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
sysvals.suspendmode = data.stamp['mode']
if not sysvals.stamp:
sysvals.stamp = data.stamp
# Function: diffStamp
# Description:
# compare the host, kernel, and mode fields in 3 stamps
# Arguments:
# stamp1: string array with mode, kernel, and host
# stamp2: string array with mode, kernel, and host
# Return:
# True if stamps differ, False if they're the same
def diffStamp(stamp1, stamp2):
if 'host' in stamp1 and 'host' in stamp2:
if stamp1['host'] != stamp2['host']:
return True
if 'kernel' in stamp1 and 'kernel' in stamp2:
if stamp1['kernel'] != stamp2['kernel']:
return True
if 'mode' in stamp1 and 'mode' in stamp2:
if stamp1['mode'] != stamp2['mode']:
return True
return False
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has some or all of the trace events
# required for primary parsing. Set the usetraceevents and/or
# usetraceeventsonly flags in the global sysvals object
def doesTraceLogHaveTraceEvents():
global sysvals
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
out = os.popen('cat '+sysvals.ftracefile+' | grep "'+e+': "').read()
if(not out):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and out):
sysvals.usetraceevents = True
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Legacy support of ftrace outputs that lack the device_pm_callback
# and/or suspend_resume trace events. The primary data should be
# taken from dmesg, and this ftrace is used only for callgraph data
# or custom actions in the timeline. The data is appended to the Data
# objects provided.
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
global sysvals
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = -1
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
vprint('Analyzing the ftrace data...')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# grab the time stamp first (signifies the start of the test run)
m = re.match(sysvals.stampfmt, line)
if(m):
testidx += 1
parseStamp(m, testrun[testidx].data)
continue
# pull out any firmware data
if(re.match(sysvals.firmwarefmt, line)):
continue
# if we havent found a test time stamp yet keep spinning til we do
if(testidx < 0):
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun[testidx].setTracerType(tracer)
continue
# parse only valid lines, if this isnt one move on
m = re.match(testrun[testidx].ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun[testidx].cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
data = testrun[testidx].data
if(not testrun[testidx].inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun[testidx].inthepipe = True
data.setStart(t.time)
continue
else:
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
testrun[testidx].inthepipe = False
data.setEnd(t.time)
if(testidx == testcnt - 1):
break
continue
# general trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# special processing for trace events
if re.match('dpm_prepare\[.*', name):
continue
elif re.match('machine_suspend.*', name):
continue
elif re.match('suspend_enter\[.*', name):
if(not isbegin):
data.dmesg['suspend_prepare']['end'] = t.time
continue
elif re.match('dpm_suspend\[.*', name):
if(not isbegin):
data.dmesg['suspend']['end'] = t.time
continue
elif re.match('dpm_suspend_late\[.*', name):
if(isbegin):
data.dmesg['suspend_late']['start'] = t.time
else:
data.dmesg['suspend_late']['end'] = t.time
continue
elif re.match('dpm_suspend_noirq\[.*', name):
if(isbegin):
data.dmesg['suspend_noirq']['start'] = t.time
else:
data.dmesg['suspend_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_noirq\[.*', name):
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
data.dmesg['resume_noirq']['start'] = t.time
else:
data.dmesg['resume_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_early\[.*', name):
if(isbegin):
data.dmesg['resume_early']['start'] = t.time
else:
data.dmesg['resume_early']['end'] = t.time
continue
elif re.match('dpm_resume\[.*', name):
if(isbegin):
data.dmesg['resume']['start'] = t.time
else:
data.dmesg['resume']['end'] = t.time
continue
elif re.match('dpm_complete\[.*', name):
if(isbegin):
data.dmesg['resume_complete']['start'] = t.time
else:
data.dmesg['resume_complete']['end'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(isbegin):
# store each trace event in ttemp
if(name not in testrun[testidx].ttemp):
testrun[testidx].ttemp[name] = []
testrun[testidx].ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
# finish off matching trace event in ttemp
if(name in testrun[testidx].ttemp):
testrun[testidx].ttemp[name][-1]['end'] = t.time
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testrun:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
if(sysvals.verbose):
test.data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testruns) > 1):
t1e = testruns[0].getEnd()
t2s = testruns[-1].getStart()
testruns[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog():
global sysvals
vprint('Analyzing the ftrace data...')
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
# extract the callgraph and traceevent data
testruns = []
testdata = []
testrun = 0
data = 0
tf = open(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# stamp line: each stamp means a new test run
m = re.match(sysvals.stampfmt, line)
if(m):
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
parseStamp(m, data)
continue
if(not data):
continue
# firmware line: pull out any firmware data
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
# tracer type line: determine the trace data type
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun.setTracerType(tracer)
continue
# post resume time line: did this test run include post-resume data
m = re.match(sysvals.postresumefmt, line)
if(m):
t = int(m.group('t'))
if(t > 0):
sysvals.postresumetime = t
continue
# ftrace line: parse only valid lines
m = re.match(testrun.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
if(not testrun.inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun.inthepipe = True
data.setStart(t.time)
continue
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
if(sysvals.postresumetime > 0):
phase = 'post_resume'
data.newPhase(phase, t.time, t.time, '#FF9966', -1)
else:
testrun.inthepipe = False
data.setEnd(t.time)
continue
if(phase == 'post_resume'):
data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(re.match('acpi_suspend\[.*', t.name) or
re.match('suspend_enter\[.*', name)):
continue
# -- phase changes --
# suspend_prepare start
if(re.match('dpm_prepare\[.*', t.name)):
phase = 'suspend_prepare'
if(not isbegin):
data.dmesg[phase]['end'] = t.time
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = 'suspend'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = 'suspend_late'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = 'suspend_noirq'
data.setPhase(phase, t.time, isbegin)
if(not isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['start'] = t.time
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['end'] = t.time
data.tSuspended = t.time
else:
if(sysvals.suspendmode in ['mem', 'disk']):
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
phase = 'resume_machine'
data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
data.tLow = data.tResumed - data.tSuspended
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = 'resume_noirq'
data.setPhase(phase, t.time, isbegin)
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = 'resume_early'
data.setPhase(phase, t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = 'resume'
data.setPhase(phase, t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = 'resume_complete'
if(isbegin):
data.dmesg[phase]['start'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
if(len(testrun.ttemp[name]) > 0):
# if an antry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
elif(phase == 'post_resume'):
# post resume events can just have ends
testrun.ttemp[name].append({
'begin': data.dmesg[phase]['start'],
'end': t.time})
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# device callback start
elif(t.type == 'device_pm_callback_start'):
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
list = data.dmesg[phase]['list']
if(n in list):
dev = list[n]
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# callgraph processing
elif sysvals.usecallgraph:
# this shouldn't happen, but JIC, ignore callgraph data post-res
if(phase == 'post_resume'):
continue
# create a callgraph object for the data
if(pid not in testrun.ftemp):
testrun.ftemp[pid] = []
testrun.ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun.ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun.ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testruns:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 2:
continue
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
if(cg.list[0].name in borderphase):
p = borderphase[cg.list[0].name]
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg.slice(dev['start'], dev['end'])
continue
if(cg.list[0].name != 'dpm_run_callback'):
continue
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# fill in any missing phases
for data in testdata:
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing!' % p)
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if(sysvals.verbose):
data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testdata) > 1):
t1e = testdata[0].getEnd()
t2s = testdata[-1].getStart()
testdata[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
return testdata
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
global sysvals
vprint('Analyzing the dmesg data...')
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
# there can be multiple test runs in a single file delineated by stamps
testruns = []
data = 0
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match(sysvals.stampfmt, line)
if(m):
if(data):
testruns.append(data)
data = Data(len(testruns))
parseStamp(m, data)
continue
if(not data):
continue
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
data.dmesgtext.append(line)
if(re.match('ACPI: resume from mwait', m.group('msg'))):
print('NOTE: This suspend appears to be freeze rather than'+\
' %s, it will be treated as such' % sysvals.suspendmode)
sysvals.suspendmode = 'freeze'
else:
vprint('ignoring dmesg line: %s' % line.replace('\n', ''))
testruns.append(data)
lf.close()
if(not data):
print('ERROR: analyze_suspend header missing from dmesg log')
sys.exit()
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
global sysvals
phase = 'suspend_runtime'
if(data.fwValid):
vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': 'PM: Syncing filesystems.*',
'suspend': 'PM: Entering [a-z]* sleep.*',
'suspend_late': 'PM: suspend of devices complete after.*',
'suspend_noirq': 'PM: late suspend of devices complete after.*',
'suspend_machine': 'PM: noirq suspend of devices complete after.*',
'resume_machine': 'ACPI: Low-level resume complete.*',
'resume_noirq': 'ACPI: Waking up from system sleep state.*',
'resume_early': 'PM: noirq resume of devices complete after.*',
'resume': 'PM: early resume of devices complete after.*',
'resume_complete': 'PM: resume of devices complete after.*',
'post_resume': '.*Restarting tasks \.\.\..*',
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = 'PM: freeze of devices complete after.*'
dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
dm['resume'] = 'PM: early restore of devices complete after.*'
dm['resume_complete'] = 'PM: restore of devices complete after.*'
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = 'ACPI: resume from mwait'
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# -- preprocessing --
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
doWarning('INVALID DMESG LINE: '+\
line.replace('\n', ''), 'dmesg')
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# -- phase changes --
# suspend start
if(re.match(dm['suspend_prepare'], msg)):
phase = 'suspend_prepare'
data.dmesg[phase]['start'] = ktime
data.setStart(ktime)
# suspend start
elif(re.match(dm['suspend'], msg)):
data.dmesg['suspend_prepare']['end'] = ktime
phase = 'suspend'
data.dmesg[phase]['start'] = ktime
# suspend_late start
elif(re.match(dm['suspend_late'], msg)):
data.dmesg['suspend']['end'] = ktime
phase = 'suspend_late'
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg['suspend_late']['end'] = ktime
phase = 'suspend_noirq'
data.dmesg[phase]['start'] = ktime
# suspend_machine start
elif(re.match(dm['suspend_machine'], msg)):
data.dmesg['suspend_noirq']['end'] = ktime
phase = 'suspend_machine'
data.dmesg[phase]['start'] = ktime
# resume_machine start
elif(re.match(dm['resume_machine'], msg)):
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
data.dmesg['suspend_machine']['end'] = prevktime
else:
data.tSuspended = ktime
data.dmesg['suspend_machine']['end'] = ktime
phase = 'resume_machine'
data.tResumed = ktime
data.tLow = data.tResumed - data.tSuspended
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg['resume_noirq']['end'] = ktime
phase = 'resume_early'
data.dmesg[phase]['start'] = ktime
# resume start
elif(re.match(dm['resume'], msg)):
data.dmesg['resume_early']['end'] = ktime
phase = 'resume'
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg['resume']['end'] = ktime
phase = 'resume_complete'
data.dmesg[phase]['start'] = ktime
# post resume start
elif(re.match(dm['post_resume'], msg)):
data.dmesg['resume_complete']['end'] = ktime
data.setEnd(ktime)
phase = 'post_resume'
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# -- non-devicecallback actions --
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in at:
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
# fill in any missing phases
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing, something went wrong!' % p)
print(' In %s, this dmesg line denotes the start of %s:' % \
(sysvals.suspendmode, p))
print(' "%s"' % dm[p])
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
# fill in any actions we've found
for name in actions:
for event in actions[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < data.start):
data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > data.end):
data.setEnd(end)
data.newActionGlobal(name, begin, end)
if(sysvals.verbose):
data.printDetails()
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
# Function: setTimelineRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# list: the list of devices/actions for a single phase
# sortedkeys: cronologically sorted key list to use
# Output:
# The total number of rows needed to display this phase of the timeline
def setTimelineRows(list, sortedkeys):
# clear all rows and set them to undefined
remaining = len(list)
rowdata = dict()
row = 0
for item in list:
list[item]['row'] = -1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for item in sortedkeys:
if(list[item]['row'] < 0):
s = list[item]['start']
e = list[item]['end']
valid = True
for ritem in rowdata[row]:
rs = ritem['start']
re = ritem['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(list[item])
list[item]['row'] = row
remaining -= 1
row += 1
return row
# Function: createTimeScale
# Description:
# Create the timescale header for the html timeline
# Arguments:
# t0: start time (suspend begin)
# tMax: end time (resume end)
# tSuspend: time when suspend occurs, i.e. the zero time
# Output:
# The html code needed to display the time scale
def createTimeScale(t0, tMax, tSuspended):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
output = '<div id="timescale">\n'
# set scale for timeline
tTotal = tMax - t0
tS = 0.1
if(tTotal <= 0):
return output
if(tTotal > 4):
tS = 1
if(tSuspended < 0):
for i in range(int(tTotal/tS)+1):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal))
if(i > 0):
val = '%0.fms' % (float(i)*tS*1000)
else:
val = ''
output += timescale.format(pos, val)
else:
tSuspend = tSuspended - t0
divTotal = int(tTotal/tS) + 1
divSuspend = int(tSuspend/tS)
s0 = (tSuspend - tS*divSuspend)*100/tTotal
for i in range(divTotal):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal) - s0)
if((i == 0) and (s0 < 3)):
val = ''
elif(i == divSuspend):
val = 'S/R'
else:
val = '%0.fms' % (float(i-divSuspend)*tS*1000)
output += timescale.format(pos, val)
output += '</div>\n'
return output
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile):
global sysvals
# print out the basic summary of all the tests
hf = open(htmlfile, 'w')
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend Summary</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:#495E09;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;}\n\
.summary {font: 22px Arial;border:1px solid;}\n\
th {border: 1px solid black;background-color:#A7C942;color:white;}\n\
td {text-align: center;}\n\
tr.alt td {background-color:#EAF2D3;}\n\
tr.avg td {background-color:#BDE34C;}\n\
a:link {color: #90B521;}\n\
a:visited {color: #495E09;}\n\
a:hover {color: #B1DF28;}\n\
a:active {color: #FFFFFF;}\n\
</style>\n</head>\n<body>\n'
# group test header
count = len(testruns)
headline_stamp = '<div class="stamp">{0} {1} {2} {3} ({4} tests)</div>\n'
html += headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'],
sysvals.stamp['time'], count)
# check to see if all the tests have the same value
stampcolumns = False
for data in testruns:
if diffStamp(sysvals.stamp, data.stamp):
stampcolumns = True
break
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdlink = '\t<td><a href="{0}">Click Here</a></td>\n'
# table header
html += '<table class="summary">\n<tr>\n'
html += th.format("Test #")
if stampcolumns:
html += th.format("Hostname")
html += th.format("Kernel Version")
html += th.format("Suspend Mode")
html += th.format("Test Time")
html += th.format("Suspend Time")
html += th.format("Resume Time")
html += th.format("Detail")
html += '</tr>\n'
# test data, 1 row per test
sTimeAvg = 0.0
rTimeAvg = 0.0
num = 1
for data in testruns:
# data.end is the end of post_resume
resumeEnd = data.dmesg['resume_complete']['end']
if num % 2 == 1:
html += '<tr class="alt">\n'
else:
html += '<tr>\n'
# test num
html += td.format("test %d" % num)
num += 1
if stampcolumns:
# host name
val = "unknown"
if('host' in data.stamp):
val = data.stamp['host']
html += td.format(val)
# host kernel
val = "unknown"
if('kernel' in data.stamp):
val = data.stamp['kernel']
html += td.format(val)
# suspend mode
val = "unknown"
if('mode' in data.stamp):
val = data.stamp['mode']
html += td.format(val)
# test time
val = "unknown"
if('time' in data.stamp):
val = data.stamp['time']
html += td.format(val)
# suspend time
sTime = (data.tSuspended - data.start)*1000
sTimeAvg += sTime
html += td.format("%3.3f ms" % sTime)
# resume time
rTime = (resumeEnd - data.tResumed)*1000
rTimeAvg += rTime
html += td.format("%3.3f ms" % rTime)
# link to the output html
html += tdlink.format(data.outfile)
html += '</tr>\n'
# last line: test average
if(count > 0):
sTimeAvg /= count
rTimeAvg /= count
html += '<tr class="avg">\n'
html += td.format('Average') # name
if stampcolumns:
html += td.format('') # host
html += td.format('') # kernel
html += td.format('') # mode
html += td.format('') # time
html += td.format("%3.3f ms" % sTimeAvg) # suspend time
html += td.format("%3.3f ms" % rTimeAvg) # resume time
html += td.format('') # output link
html += '</tr>\n'
# flush the data to file
hf.write(html+'</table>\n')
hf.write('</body>\n</html>\n')
hf.close()
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns):
global sysvals
for data in testruns:
data.normalizeTime(testruns[-1].tSuspended)
x2changes = ['', 'absolute']
if len(testruns) > 1:
x2changes = ['1', 'relative']
# html function templates
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail%s</button>' % x2changes[0]
html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n'
html_traceevent = '<div title="{0}" class="traceevent" style="left:{1}%;top:{2}%;height:{3}%;width:{4}%;border:1px solid {5};background-color:{5}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background-color:{3}"></div>\n'
html_legend = '<div class="square" style="left:{0}%;background-color:{1}"> {2}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green">{4}Kernel Suspend: {0} ms</td>'\
'<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
'<td class="purple">{4}Firmware Resume: {2} ms</td>'\
'<td class="yellow">{4}Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# device timeline
vprint('Creating Device Timeline...')
devtl = Timeline()
# Generate the header for this timeline
textnum = ['First', 'Second']
for data in testruns:
tTotal = data.end - data.start
tEnd = data.dmesg['resume_complete']['end']
if(tTotal == 0):
print('ERROR: No timeline data')
sys.exit()
if(data.tLow > 0):
low_time = '%.0f'%(data.tLow*1000)
if data.fwValid:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000 + \
(data.fwSuspend/1000000.0))
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000 + \
(data.fwResume/1000000.0))
testdesc1 = 'Total'
testdesc2 = ''
if(len(testruns) > 1):
testdesc1 = testdesc2 = textnum[data.testnumber]
testdesc2 += ' '
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc1)
devtl.html['timeline'] += thtml
sktime = '%.3f'%((data.dmesg['suspend_machine']['end'] - \
data.getStart())*1000)
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
rktime = '%.3f'%((data.getEnd() - \
data.dmesg['resume_machine']['start'])*1000)
devtl.html['timeline'] += html_timegroups.format(sktime, \
sftime, rftime, rktime, testdesc2)
else:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000)
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000)
testdesc = 'Kernel'
if(len(testruns) > 1):
testdesc = textnum[data.testnumber]+' '+testdesc
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc)
devtl.html['timeline'] += thtml
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tSuspended = testruns[-1].tSuspended
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
timelinerows = 0
for data in testruns:
for phase in data.dmesg:
list = data.dmesg[phase]['list']
rows = setTimelineRows(list, list)
data.dmesg[phase]['row'] = rows
if(rows > timelinerows):
timelinerows = rows
# calculate the timeline height and create bounding box, add buttons
devtl.setRows(timelinerows + 1)
devtl.html['timeline'] += html_devlist1
if len(testruns) > 1:
devtl.html['timeline'] += html_devlist2
devtl.html['timeline'] += html_zoombox
devtl.html['timeline'] += html_timeline.format('dmesg', devtl.height)
# draw the colored boxes for each of the phases
for data in testruns:
for b in data.dmesg:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html['timeline'] += html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%(100-devtl.scaleH), \
data.dmesg[b]['color'], '')
# draw the time scale, try to make the number of labels readable
devtl.html['scale'] = createTimeScale(t0, tMax, tSuspended)
devtl.html['timeline'] += devtl.html['scale']
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for d in phaselist:
name = d
drv = ''
dev = phaselist[d]
if(d in sysvals.altdevname):
name = sysvals.altdevname[d]
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((dev['start']-t0)*100)/tTotal)
width = '%.3f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += html_device.format(dev['id'], \
d+drv+length+b, left, top, '%.3f'%height, width, name+drv)
# draw any trace events found
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for name in phaselist:
dev = phaselist[name]
if('traceevents' in dev):
vprint('Debug trace events found for device %s' % name)
vprint('%20s %20s %10s %8s' % ('action', \
'name', 'time(ms)', 'length(ms)'))
for e in dev['traceevents']:
vprint('%20s %20s %10.3f %8.3f' % (e.action, \
e.name, e.time*1000, e.length*1000))
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((e.time-t0)*100)/tTotal)
width = '%.3f' % (e.length*100/tTotal)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += \
html_traceevent.format(e.action+' '+e.name, \
left, top, '%.3f'%height, \
width, e.color, '')
# timeline is finished
devtl.html['timeline'] += '</div>\n</div>\n'
# draw a legend which describes the phases by color
data = testruns[-1]
devtl.html['legend'] = '<div class="legend">\n'
pdelta = 100.0/len(data.phases)
pmargin = pdelta / 4.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' ')
devtl.html['legend'] += html_legend.format(order, \
data.dmesg[phase]['color'], name)
devtl.html['legend'] += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
thread_height = 0
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\
.callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\
.callgraph article * {padding-left: 28px;}\n\
h1 {color:black;font: bold 30px Times;}\n\
t0 {color:black;font: bold 30px Times;}\n\
t1 {color:black;font: 30px Times;}\n\
t2 {color:black;font: 25px Times;}\n\
t3 {color:black;font: 20px Times;white-space:nowrap;}\n\
t4 {color:black;font: bold 30px Times;line-height:60px;white-space:nowrap;}\n\
table {width:100%;}\n\
.gray {background-color:rgba(80,80,80,0.1);}\n\
.green {background-color:rgba(204,255,204,0.4);}\n\
.purple {background-color:rgba(128,0,128,0.2);}\n\
.yellow {background-color:rgba(255,255,204,0.4);}\n\
.time1 {font: 22px Arial;border:1px solid;}\n\
.time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align: center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color: red;}\n\
.hide {display: none;}\n\
.pf {display: none;}\n\
.pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\
.zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\
.timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\
.thread {position: absolute; height: '+'%.3f'%thread_height+'%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\
.thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\
.hover {background-color:white;border:1px solid red;z-index:10;}\n\
.traceevent {position: absolute;opacity: 0.3;height: '+'%.3f'%thread_height+'%;width:0;overflow:hidden;line-height:30px;text-align:center;white-space:nowrap;}\n\
.phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\
.phaselet {position:absolute;overflow:hidden;border:0px;text-align:center;height:100px;font-size:24px;}\n\
.t {position:absolute;top:0%;height:100%;border-right:1px solid black;}\n\
.legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\
.legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.devlist {position:'+x2changes[1]+';width:190px;}\n\
#devicedetail {height:100px;box-shadow: 5px 5px 20px black;}\n\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# write the test title and general info header
if(sysvals.stamp['time'] != ""):
hf.write(headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'], \
sysvals.stamp['time']))
# write the device timeline
hf.write(devtl.html['timeline'])
hf.write(devtl.html['legend'])
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
for b in data.phases:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
data = testruns[-1]
if(sysvals.usecallgraph):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
num = 0
for p in data.phases:
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if('ftrace' not in list[devname]):
continue
name = devname
if(devname in sysvals.altdevname):
name = sysvals.altdevname[devname]
devid = list[devname]['id']
cg = list[devname]['ftrace']
flen = '<r>(%.3f ms @ %.3f to %.3f)</r>' % \
((cg.end - cg.start)*1000, cg.start*1000, cg.end*1000)
hf.write(html_func_top.format(devid, data.dmesg[p]['color'], \
num, name+' '+p, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
flen = '<n>(%.3f ms @ %.3f)</n>' % (line.length*1000, \
line.time*1000)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
hf.write('\n\n </section>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000
tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' function zoomTimeline() {\n'\
' var timescale = document.getElementById("timescale");\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 40000) newval = 40000;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var html = "";\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\
' if(tS < 1) tS = 1;\n'\
' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\
' var pos = (tMax - s) * 100.0 / tTotal;\n'\
' var name = (s == 0)?"S/R":(s+"ms");\n'\
' html += "<div class=\\"t\\" style=\\"right:"+pos+"%\\">"+name+"</div>";\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "thread hover";\n'\
' } else {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = title.slice(0, title.indexOf(" "));\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace("_", " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(idlist.indexOf(cg[i].id) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var sx = e.clientX;\n'\
' if(sx > window.innerWidth - 440)\n'\
' sx = window.innerWidth - 440;\n'\
' var cfg="top="+e.screenY+", left="+sx+", width=440, height=720, scrollbars=yes";\n'\
' var win = window.open("", "_blank", cfg);\n'\
' if(window.chrome) win.moveBy(sx, 0);\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var devlist = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < devlist.length; i++)\n'\
' devlist[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend():
global sysvals
detectUSB(False)
t0 = time.time()*1000
tp = sysvals.tpath
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system('dmesg -C')
# enable callgraph ftrace only for the second run
if(sysvals.usecallgraph and count == 2):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | '+\
'grep dpm_run_callback > '+tp+'set_graph_function')
# if this is test2 and there's a delay, start here
if(count > 1 and sysvals.x2delay > 0):
tN = time.time()*1000
while (tN - t0) < sysvals.x2delay:
tN = time.time()*1000
time.sleep(0.001)
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('START TRACING')
os.system('echo 1 > '+tp+'tracing_on')
# initiate suspend
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo SUSPEND START > '+tp+'trace_marker')
if(sysvals.rtcwake):
print('SUSPEND START')
print('will autoresume in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarm()
else:
print('SUSPEND START (press a key to resume)')
pf = open(sysvals.powerfile, 'w')
pf.write(sysvals.suspendmode)
# execution will pause here
pf.close()
t0 = time.time()*1000
# return from suspend
print('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo RESUME COMPLETE > '+tp+'trace_marker')
# see if there's firmware timing data to be had
t = sysvals.postresumetime
if(t > 0):
print('Waiting %d seconds for POST-RESUME trace events...' % t)
time.sleep(t)
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo 0 > '+tp+'tracing_on')
print('CAPTURING TRACE')
writeDatafileHeader(sysvals.ftracefile)
os.system('cat '+tp+'trace >> '+sysvals.ftracefile)
os.system('echo "" > '+tp+'trace')
# grab a copy of the dmesg output
print('CAPTURING DMESG')
writeDatafileHeader(sysvals.dmesgfile)
os.system('dmesg -c >> '+sysvals.dmesgfile)
def writeDatafileHeader(filename):
global sysvals
fw = getFPDT(False)
prt = sysvals.postresumetime
fp = open(filename, 'a')
fp.write(sysvals.teststamp+'\n')
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if(prt > 0):
fp.write('# post resume time %u\n' % prt)
fp.close()
# Function: executeAndroidSuspend
# Description:
# Execute system suspend through the sysfs interface
# on a remote android device, then transfer the output
# dmesg and ftrace files to the local output directory.
def executeAndroidSuspend():
global sysvals
# check to see if the display is currently off
tp = sysvals.tpath
out = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
# if so we need to turn it on so we can issue a new suspend
if(out.endswith('false')):
print('Waking the device up for the test...')
# send the KEYPAD_POWER keyevent to wake it up
os.system(sysvals.adb+' shell input keyevent 26')
# wait a few seconds so the user can see the device wake up
time.sleep(3)
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system(sysvals.adb+' shell dmesg -c > /dev/null 2>&1')
# start ftrace
if(sysvals.usetraceevents):
print('START TRACING')
os.system(sysvals.adb+" shell 'echo 1 > "+tp+"tracing_on'")
# initiate suspend
for count in range(1,sysvals.execcount+1):
if(sysvals.usetraceevents):
os.system(sysvals.adb+\
" shell 'echo SUSPEND START > "+tp+"trace_marker'")
print('SUSPEND START (press a key on the device to resume)')
os.system(sysvals.adb+" shell 'echo "+sysvals.suspendmode+\
" > "+sysvals.powerfile+"'")
# execution will pause here, then adb will exit
while(True):
check = os.popen(sysvals.adb+\
' shell pwd 2>/dev/null').read().strip()
if(len(check) > 0):
break
time.sleep(1)
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo RESUME COMPLETE > "+tp+\
"trace_marker'")
# return from suspend
print('RESUME COMPLETE')
# stop ftrace
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
print('CAPTURING TRACE')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.ftracefile)
os.system(sysvals.adb+' shell cat '+tp+\
'trace >> '+sysvals.ftracefile)
# grab a copy of the dmesg output
print('CAPTURING DMESG')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.dmesgfile)
os.system(sysvals.adb+' shell dmesg >> '+sysvals.dmesgfile)
# Function: setUSBDevicesAuto
# Description:
# Set the autosuspend control parameter of all USB devices to auto
# This can be dangerous, so use at your own risk, most devices are set
# to always-on since the kernel cant determine if the device can
# properly autosuspend
def setUSBDevicesAuto():
global sysvals
rootCheck()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
os.system('echo auto > %s/power/control' % dirname)
name = dirname.split('/')[-1]
desc = os.popen('cat %s/product 2>/dev/null' % \
dirname).read().replace('\n', '')
ctrl = os.popen('cat %s/power/control 2>/dev/null' % \
dirname).read().replace('\n', '')
print('control is %s for %6s: %s' % (ctrl, name, desc))
# Function: yesno
# Description:
# Print out an equivalent Y or N for a set of known parameter values
# Output:
# 'Y', 'N', or ' ' if the value is unknown
def yesno(val):
yesvals = ['auto', 'enabled', 'active', '1']
novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported']
if val in yesvals:
return 'Y'
elif val in novals:
return 'N'
return ' '
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
ms = 0
try:
ms = int(val)
except:
return 0.0
m = ms / 60000
s = (ms / 1000) - (m * 60)
return '%3dm%2ds' % (m, s)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
# Arguments:
# output: True to output the info to stdout, False otherwise
def detectUSB(output):
global sysvals
field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
'control':'', 'persist':'', 'runtime_enabled':'',
'runtime_status':'', 'runtime_usage':'',
'runtime_active_time':'',
'runtime_suspended_time':'',
'active_duration':'',
'connected_duration':''}
if(output):
print('LEGEND')
print('---------------------------------------------------------------------------------------------')
print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)')
print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)')
print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)')
print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)')
print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)')
print(' U = runtime usage count')
print('---------------------------------------------------------------------------------------------')
print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT')
print('---------------------------------------------------------------------------------------------')
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
for i in field:
field[i] = os.popen('cat %s/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
name = dirname.split('/')[-1]
if(len(field['product']) > 0):
sysvals.altdevname[name] = \
'%s [%s]' % (field['product'], name)
else:
sysvals.altdevname[name] = \
'%s:%s [%s]' % (field['idVendor'], \
field['idProduct'], name)
if(output):
for i in power:
power[i] = os.popen('cat %s/power/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
if(re.match('usb[0-9]*', name)):
first = '%-8s' % name
else:
first = '%8s' % name
print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \
(first, field['idVendor'], field['idProduct'], \
field['product'][0:20], field['speed'], \
yesno(power['async']), \
yesno(power['control']), \
yesno(power['persist']), \
yesno(power['runtime_enabled']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['autosuspend'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']), \
ms2nice(power['active_duration']), \
ms2nice(power['connected_duration'])))
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
global sysvals
modes = ''
if(not sysvals.android):
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
else:
line = os.popen(sysvals.adb+' shell cat '+\
sysvals.powerfile).read().strip()
modes = string.split(line)
return modes
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
global sysvals
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
rootCheck()
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file doesnt exist: %s' % sysvals.fpdtpath, False)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.fpdtpath, False)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file doesnt exist: %s' % sysvals.mempath, False)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.mempath, False)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes', False)
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
print('')
print('Firmware Performance Data Table (%s)' % table[0])
print(' Signature : %s' % table[0])
print(' Table Length : %u' % table[1])
print(' Revision : %u' % table[2])
print(' Checksum : 0x%x' % table[3])
print(' OEM ID : %s' % table[4])
print(' OEM Table ID : %s' % table[5])
print(' OEM Revision : %u' % table[6])
print(' Creator ID : %s' % table[7])
print(' Creator Revision : 0x%x' % table[8])
print('')
if(table[0] != 'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
fp = open(sysvals.mempath, 'rb')
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
continue
if(header[1] != 16):
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
doError('Bad address 0x%x in %s' % (addr, sysvals.mempath), False)
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
print(' Reset END : %u ns' % record[4])
print(' OS Loader LoadImage Start : %u ns' % record[5])
print(' OS Loader StartImage Start : %u ns' % record[6])
print(' ExitBootServices Entry : %u ns' % record[7])
print(' ExitBootServices Exit : %u ns' % record[8])
elif(rechead[0] == 'S3PT'):
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
print(' %s' % prectype[prechead[0]])
print(' Resume Count : %u' % \
record[1])
print(' FullResume : %u ns' % \
record[2])
print(' AverageResume : %u ns' % \
record[3])
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
print(' %s' % prectype[prechead[0]])
print(' SuspendStart : %u ns' % \
record[0])
print(' SuspendEnd : %u ns' % \
record[1])
print(' SuspendTime : %u ns' % \
fwData[0])
j += prechead[1]
if(output):
print('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck():
global sysvals
status = True
if(sysvals.android):
print('Checking the android system ...')
else:
print('Checking this system (%s)...' % platform.node())
# check if adb is connected to a device
if(sysvals.android):
res = 'NO'
out = os.popen(sysvals.adb+' get-state').read().strip()
if(out == 'device'):
res = 'YES'
print(' is android device connected: %s' % res)
if(res != 'YES'):
print(' Please connect the device before using this tool')
return False
# check we have root access
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell id').read().strip()
if('root' in out):
res = 'YES'
else:
if(os.environ['USER'] == 'root'):
res = 'YES'
print(' have root access: %s' % res)
if(res != 'YES'):
if(sysvals.android):
print(' Try running "adb root" to restart the daemon as root')
else:
print(' Try running this script with sudo')
return False
# check sysfs is mounted
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+\
sysvals.powerfile).read().strip()
if(out == sysvals.powerfile):
res = 'YES'
else:
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
print(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return False
# check target mode is a valid mode
res = 'NO'
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = False
print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
print(' valid power modes are: %s' % modes)
print(' please choose one with -m')
# check if the tool can unlock the device
if(sysvals.android):
res = 'YES'
out1 = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
out2 = os.popen(sysvals.adb+\
' shell input').read().strip()
if(not out1.startswith('mScreenOn') or not out2.startswith('usage')):
res = 'NO (wake the android device up before running the test)'
print(' can I unlock the screen: %s' % res)
# check if ftrace is available
res = 'NO'
ftgood = verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = False
print(' is ftrace supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
check = False
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls -d '+\
sysvals.epath+e).read().strip()
if(out == sysvals.epath+e):
check = True
else:
if(os.path.exists(sysvals.epath+e)):
check = True
if(not check):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and check):
sysvals.usetraceevents = True
if(sysvals.usetraceevents and sysvals.usetraceeventsonly):
res = 'FTRACE (all trace events found)'
elif(sysvals.usetraceevents):
res = 'DMESG and FTRACE (suspend_resume trace event found)'
print(' timeline data source: %s' % res)
# check if rtcwake
res = 'NO'
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = False
print(' is rtcwake supported: %s' % res)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help):
if(help == True):
printHelp()
print('ERROR: %s\n') % msg
sys.exit()
# Function: doWarning
# Description:
# generic warning function for non-catastrophic anomalies
# Arguments:
# msg: the warning message to print
# file: If not empty, a filename to request be sent to the owner for debug
def doWarning(msg, file):
print('/* %s */') % msg
if(file):
print('/* For a fix, please send this'+\
' %s file to <todd.e.brandt@intel.com> */' % file)
# Function: rootCheck
# Description:
# quick check to see if we have root access
def rootCheck():
if(os.environ['USER'] != 'root'):
doError('This script must be run as root', False)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max):
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest():
global sysvals
if(sysvals.ftracefile != ''):
doesTraceLogHaveTraceEvents()
if(sysvals.dmesgfile == '' and not sysvals.usetraceeventsonly):
doError('recreating this html output '+\
'requires a dmesg file', False)
sysvals.setOutputFile()
vprint('Output file: %s' % sysvals.htmlfile)
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
testruns = parseTraceLog()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile != ''):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(subdir):
global sysvals
# prepare for the test
if(not sysvals.android):
initFtrace()
else:
initFtraceAndroid()
sysvals.initTestOutput(subdir)
vprint('Output files:\n %s' % sysvals.dmesgfile)
if(sysvals.usecallgraph or
sysvals.usetraceevents or
sysvals.usetraceeventsonly):
vprint(' %s' % sysvals.ftracefile)
vprint(' %s' % sysvals.htmlfile)
# execute the test
if(not sysvals.android):
executeSuspend()
else:
executeAndroidSuspend()
# analyze the data and create the html output
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
# data for kernels 3.15 or newer is entirely in ftrace
testruns = parseTraceLog()
else:
# data for kernels older than 3.15 is primarily in dmesg
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.usecallgraph or sysvals.usetraceevents):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, output):
global sysvals
# get a list of ftrace output files
files = []
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(re.match('.*_ftrace.txt', filename)):
files.append("%s/%s" % (dirname, filename))
# process the files in order and get an array of data objects
testruns = []
for file in sorted(files):
if output:
print("Test found in %s" % os.path.dirname(file))
sysvals.ftracefile = file
sysvals.dmesgfile = file.replace('_ftrace.txt', '_dmesg.txt')
doesTraceLogHaveTraceEvents()
sysvals.usecallgraph = False
if not sysvals.usetraceeventsonly:
if(not os.path.exists(sysvals.dmesgfile)):
print("Skipping %s: not a valid test input" % file)
continue
else:
if output:
f = os.path.basename(sysvals.ftracefile)
d = os.path.basename(sysvals.dmesgfile)
print("\tInput files: %s and %s" % (f, d))
testdata = loadKernelLog()
data = testdata[0]
parseKernelLog(data)
testdata = [data]
appendIncompleteTraceLog(testdata)
else:
if output:
print("\tInput file: %s" % os.path.basename(sysvals.ftracefile))
testdata = parseTraceLog()
data = testdata[0]
data.normalizeTime(data.tSuspended)
link = file.replace(subdir+'/', '').replace('_ftrace.txt', '.html')
data.outfile = link
testruns.append(data)
createHTMLSummarySimple(testruns, subdir+'/summary.html')
# Function: printHelp
# Description:
# print out the help text
def printHelp():
global sysvals
modes = getModes()
print('')
print('AnalyzeSuspend v%.1f' % sysvals.version)
print('Usage: sudo analyze_suspend.py <options>')
print('')
print('Description:')
print(' This tool is designed to assist kernel and OS developers in optimizing')
print(' their linux stack\'s suspend/resume time. Using a kernel image built')
print(' with a few extra options enabled, the tool will execute a suspend and')
print(' capture dmesg and ftrace data until resume is complete. This data is')
print(' transformed into a device timeline and an optional callgraph to give')
print(' a detailed view of which devices/subsystems are taking the most')
print(' time in suspend/resume.')
print('')
print(' Generates output files in subdirectory: suspend-mmddyy-HHMMSS')
print(' HTML output: <hostname>_<mode>.html')
print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
print('')
print('Options:')
print(' [general]')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -verbose Print extra information during execution and analysis')
print(' -status Test to see if the system is enabled to run this tool')
print(' -modes List available suspend modes')
print(' -m mode Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode)
print(' -rtcwake t Use rtcwake to autoresume after <t> seconds (default: disabled)')
print(' [advanced]')
print(' -f Use ftrace to create device callgraphs (default: disabled)')
print(' -filter "d1 d2 ..." Filter out all but this list of dev names')
print(' -x2 Run two suspend/resumes back to back (default: disabled)')
print(' -x2delay t Minimum millisecond delay <t> between the two test runs (default: 0 ms)')
print(' -postres t Time after resume completion to wait for post-resume events (default: 0 S)')
print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
print(' be created in a new subdirectory with a summary page.')
print(' [utilities]')
print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
print(' -usbtopo Print out the current USB topology with power info')
print(' -usbauto Enable autosuspend for all connected USB devices')
print(' [android testing]')
print(' -adb binary Use the given adb binary to run the test on an android device.')
print(' The device should already be connected and with root access.')
print(' Commands will be executed on the device using "adb shell"')
print(' [re-analyze data from previous runs]')
print(' -ftrace ftracefile Create HTML output using ftrace input')
print(' -dmesg dmesgfile Create HTML output using dmesg (not needed for kernel >= 3.15)')
print(' -summary directory Create a summary of all test in this dir')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
cmd = ''
cmdarg = ''
multitest = {'run': False, 'count': 0, 'delay': 0}
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = args.next()
except:
doError('No mode supplied', True)
sysvals.suspendmode = val
elif(arg == '-adb'):
try:
val = args.next()
except:
doError('No adb binary supplied', True)
if(not os.path.exists(val)):
doError('file doesnt exist: %s' % val, False)
if(not os.access(val, os.X_OK)):
doError('file isnt executable: %s' % val, False)
try:
check = os.popen(val+' version').read().strip()
except:
doError('adb version failed to execute', False)
if(not re.match('Android Debug Bridge .*', check)):
doError('adb version failed to execute', False)
sysvals.adb = val
sysvals.android = True
elif(arg == '-x2'):
if(sysvals.postresumetime > 0):
doError('-x2 is not compatible with -postres', False)
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-postres'):
if(sysvals.execcount != 1):
doError('-x2 is not compatible with -postres', False)
sysvals.postresumetime = getArgInt('-postres', args, 0, 3600)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-modes'):
cmd = 'modes'
elif(arg == '-fpdt'):
cmd = 'fpdt'
elif(arg == '-usbtopo'):
cmd = 'usbtopo'
elif(arg == '-usbauto'):
cmd = 'usbauto'
elif(arg == '-status'):
cmd = 'status'
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-v'):
print("Version %.1f" % sysvals.version)
sys.exit()
elif(arg == '-rtcwake'):
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', args, 0, 3600)
elif(arg == '-multi'):
multitest['run'] = True
multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000)
multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600)
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.usecallgraph = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
elif(arg == '-summary'):
try:
val = args.next()
except:
doError('No directory supplied', True)
cmd = 'summary'
cmdarg = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s isnt accesible' % val, False)
elif(arg == '-filter'):
try:
val = args.next()
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-h'):
printHelp()
sys.exit()
else:
doError('Invalid argument: '+arg, True)
# just run a utility command and exit
if(cmd != ''):
if(cmd == 'status'):
statusCheck()
elif(cmd == 'fpdt'):
if(sysvals.android):
doError('cannot read FPDT on android device', False)
getFPDT(True)
elif(cmd == 'usbtopo'):
if(sysvals.android):
doError('cannot read USB topology '+\
'on an android device', False)
detectUSB(True)
elif(cmd == 'modes'):
modes = getModes()
print modes
elif(cmd == 'usbauto'):
setUSBDevicesAuto()
elif(cmd == 'summary'):
print("Generating a summary of folder \"%s\"" % cmdarg)
runSummary(cmdarg, True)
sys.exit()
# run test on android device
if(sysvals.android):
if(sysvals.usecallgraph):
doError('ftrace (-f) is not yet supported '+\
'in the android kernel', False)
if(sysvals.notestrun):
doError('cannot analyze test files on the '+\
'android device', False)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
rerunTest()
sys.exit()
# verify that we can run a test
if(not statusCheck()):
print('Check FAILED, aborting the test run!')
sys.exit()
if multitest['run']:
# run multiple tests in a separte subdirectory
s = 'x%d' % multitest['count']
subdir = datetime.now().strftime('suspend-'+s+'-%m%d%y-%H%M%S')
os.mkdir(subdir)
for i in range(multitest['count']):
if(i != 0):
print('Waiting %d seconds...' % (multitest['delay']))
time.sleep(multitest['delay'])
print('TEST (%d/%d) START' % (i+1, multitest['count']))
runTest(subdir)
print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count']))
runSummary(subdir, False)
else:
# run the test in the current directory
runTest(".")
| gpl-2.0 |
alikins/ansible | lib/ansible/modules/remote_management/manageiq/manageiq_tags.py | 25 | 9319 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
module: manageiq_tags
short_description: Management of resource tags in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.5'
author: Daniel Korn (@dkorn)
description:
- The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
options:
state:
description:
- absent - tags should not exist,
- present - tags should exist,
- list - list current tags.
required: False
choices: ['absent', 'present', 'list']
default: 'present'
tags:
description:
- tags - list of dictionaries, each includes 'name' and 'category' keys.
- required if state is present or absent.
required: false
default: null
resource_type:
description:
- the relevant resource type in manageiq
required: true
choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
'data store', 'group', 'resource pool', 'service', 'service template',
'template', 'tenant', 'user']
default: null
resource_name:
description:
- the relevant resource name in manageiq
required: true
default: null
'''
EXAMPLES = '''
- name: Create new tags for a provider in ManageIQ
manageiq_tags:
resource_name: 'EngLab'
resource_type: 'provider'
tags:
- category: environment
name: prod
- category: owner
name: prod_ops
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Remove tags for a provider in ManageIQ
manageiq_tags:
state: absent
resource_name: 'EngLab'
resource_type: 'provider'
tags:
- category: environment
name: prod
- category: owner
name: prod_ops
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: List current tags for a provider in ManageIQ
manageiq_tags:
state: list
resource_name: 'EngLab'
resource_type: 'provider'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
def query_resource_id(manageiq, resource_type, resource_name):
""" Query the resource name in ManageIQ.
Returns:
the resource id if it exists in manageiq, Fail otherwise.
"""
resource = manageiq.find_collection_resource_by(resource_type, name=resource_name)
if resource:
return resource["id"]
else:
msg = "{resource_name} {resource_type} does not exist in manageiq".format(
resource_name=resource_name, resource_type=resource_type)
manageiq.module.fail_json(msg=msg)
class ManageIQTags(object):
"""
Object to execute tags management operations of manageiq resources.
"""
def __init__(self, manageiq, resource_type, resource_id):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
self.resource_type = resource_type
self.resource_id = resource_id
self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
api_url=self.api_url,
resource_type=resource_type,
resource_id=resource_id)
def full_tag_name(self, tag):
""" Returns the full tag name in manageiq
"""
return '/managed/{tag_category}/{tag_name}'.format(
tag_category=tag['category'],
tag_name=tag['name'])
def clean_tag_object(self, tag):
""" Clean a tag object to have human readable form of:
{
full_name: STR,
name: STR,
display_name: STR,
category: STR
}
"""
full_name = tag.get('name')
categorization = tag.get('categorization', {})
return dict(
full_name=full_name,
name=categorization.get('name'),
display_name=categorization.get('display_name'),
category=categorization.get('category', {}).get('name'))
def query_resource_tags(self):
""" Returns a set of the tag objects assigned to the resource
"""
url = '{resource_url}/tags?expand=resources&attributes=categorization'
try:
response = self.client.get(url.format(resource_url=self.resource_url))
except Exception as e:
msg = "Failed to query {resource_type} tags: {error}".format(
resource_type=self.resource_type,
error=e)
self.module.fail_json(msg=msg)
resources = response.get('resources', [])
# clean the returned rest api tag object to look like:
# {full_name: STR, name: STR, display_name: STR, category: STR}
tags = [self.clean_tag_object(tag) for tag in resources]
return tags
def tags_to_update(self, tags, action):
""" Create a list of tags we need to update in ManageIQ.
Returns:
Whether or not a change took place and a message describing the
operation executed.
"""
tags_to_post = []
assigned_tags = self.query_resource_tags()
# make a list of assigned full tag names strings
# e.g. ['/managed/environment/prod', ...]
assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
for tag in tags:
assigned = self.full_tag_name(tag) in assigned_tags_set
if assigned and action == 'unassign':
tags_to_post.append(tag)
elif (not assigned) and action == 'assign':
tags_to_post.append(tag)
return tags_to_post
def assign_or_unassign_tags(self, tags, action):
""" Perform assign/unassign action
"""
# get a list of tags needed to be changed
tags_to_post = self.tags_to_update(tags, action)
if not tags_to_post:
return dict(
changed=False,
msg="Tags already {action}ed, nothing to do".format(action=action))
# try to assign or unassign tags to resource
url = '{resource_url}/tags'.format(resource_url=self.resource_url)
try:
response = self.client.post(url, action=action, resources=tags)
except Exception as e:
msg = "Failed to {action} tag: {error}".format(
action=action,
error=e)
self.module.fail_json(msg=msg)
# check all entities in result to be successfull
for result in response['results']:
if not result['success']:
msg = "Failed to {action}: {message}".format(
action=action,
message=result['message'])
self.module.fail_json(msg=msg)
# successfully changed all needed tags
return dict(
changed=True,
msg="Successfully {action}ed tags".format(action=action))
def main():
actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
argument_spec = dict(
tags=dict(type='list'),
resource_name=dict(required=True, type='str'),
resource_type=dict(required=True, type='str',
choices=manageiq_entities().keys()),
state=dict(required=False, type='str',
choices=['present', 'absent', 'list'], default='present'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['tags']),
('state', 'absent', ['tags'])
],
)
tags = module.params['tags']
resource_type_key = module.params['resource_type']
resource_name = module.params['resource_name']
state = module.params['state']
# get the action and resource type
action = actions[state]
resource_type = manageiq_entities()[resource_type_key]
manageiq = ManageIQ(module)
# query resource id, fail if resource does not exist
resource_id = query_resource_id(manageiq, resource_type, resource_name)
manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
if action == 'list':
# return a list of current tags for this object
current_tags = manageiq_tags.query_resource_tags()
res_args = dict(changed=False, tags=current_tags)
else:
# assign or unassign the tags
res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
| gpl-3.0 |
tux-00/ansible | lib/ansible/parsing/utils/jsonify.py | 117 | 1292 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
import json
except ImportError:
import simplejson as json
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
indent = None
if format:
indent = 4
try:
return json.dumps(result, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result, sort_keys=True, indent=indent)
| gpl-3.0 |
bssrdf/zulip | api/integrations/trac/zulip_trac.py | 114 | 5142 | # -*- coding: utf-8 -*-
# Copyright © 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Zulip trac plugin -- sends zulips when tickets change.
#
# Install by copying this file and zulip_trac_config.py to the trac
# plugins/ subdirectory, customizing the constants in
# zulip_trac_config.py, and then adding "zulip_trac" to the
# components section of the conf/trac.ini file, like so:
#
# [components]
# zulip_trac = enabled
#
# You may then need to restart trac (or restart Apache) for the bot
# (or changes to the bot) to actually be loaded by trac.
from trac.core import Component, implements
from trac.ticket import ITicketChangeListener
import sys
import os.path
sys.path.insert(0, os.path.dirname(__file__))
import zulip_trac_config as config
VERSION = "0.9"
if config.ZULIP_API_PATH is not None:
sys.path.append(config.ZULIP_API_PATH)
import zulip
client = zulip.Client(
email=config.ZULIP_USER,
site=config.ZULIP_SITE,
api_key=config.ZULIP_API_KEY,
client="ZulipTrac/" + VERSION)
def markdown_ticket_url(ticket, heading="ticket"):
return "[%s #%s](%s/%s)" % (heading, ticket.id, config.TRAC_BASE_TICKET_URL, ticket.id)
def markdown_block(desc):
return "\n\n>" + "\n> ".join(desc.split("\n")) + "\n"
def truncate(string, length):
if len(string) <= length:
return string
return string[:length - 3] + "..."
def trac_subject(ticket):
return truncate("#%s: %s" % (ticket.id, ticket.values.get("summary")), 60)
def send_update(ticket, content):
client.send_message({
"type": "stream",
"to": config.STREAM_FOR_NOTIFICATIONS,
"content": content,
"subject": trac_subject(ticket)
})
class ZulipPlugin(Component):
implements(ITicketChangeListener)
def ticket_created(self, ticket):
"""Called when a ticket is created."""
content = "%s created %s in component **%s**, priority **%s**:\n" % \
(ticket.values.get("reporter"), markdown_ticket_url(ticket),
ticket.values.get("component"), ticket.values.get("priority"))
# Include the full subject if it will be truncated
if len(ticket.values.get("summary")) > 60:
content += "**%s**\n" % (ticket.values.get("summary"),)
if ticket.values.get("description") != "":
content += "%s" % (markdown_block(ticket.values.get("description")),)
send_update(ticket, content)
def ticket_changed(self, ticket, comment, author, old_values):
"""Called when a ticket is modified.
`old_values` is a dictionary containing the previous values of the
fields that have changed.
"""
if not (set(old_values.keys()).intersection(set(config.TRAC_NOTIFY_FIELDS)) or
(comment and "comment" in set(config.TRAC_NOTIFY_FIELDS))):
return
content = "%s updated %s" % (author, markdown_ticket_url(ticket))
if comment:
content += ' with comment: %s\n\n' % (markdown_block(comment),)
else:
content += ":\n\n"
field_changes = []
for key in old_values.keys():
if key == "description":
content += '- Changed %s from %s to %s' % (key, markdown_block(old_values.get(key)),
markdown_block(ticket.values.get(key)))
elif old_values.get(key) == "":
field_changes.append('%s: => **%s**' % (key, ticket.values.get(key)))
elif ticket.values.get(key) == "":
field_changes.append('%s: **%s** => ""' % (key, old_values.get(key)))
else:
field_changes.append('%s: **%s** => **%s**' % (key, old_values.get(key),
ticket.values.get(key)))
content += ", ".join(field_changes)
send_update(ticket, content)
def ticket_deleted(self, ticket):
"""Called when a ticket is deleted."""
content = "%s was deleted." % markdown_ticket_url(ticket, heading="Ticket")
send_update(ticket, content)
| apache-2.0 |
harisbal/pandas | pandas/tests/series/test_missing.py | 1 | 51950 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from distutils.version import LooseVersion
import numpy as np
from numpy import nan
import pytest
import pytz
from pandas._libs.tslib import iNaT
from pandas.compat import range
from pandas.errors import PerformanceWarning
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, IntervalIndex, MultiIndex, NaT, Series,
Timestamp, date_range, isna)
from pandas.core.series import remove_na
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
try:
import scipy
_is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
LooseVersion('0.19.0'))
except ImportError:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData():
def test_remove_na_deprecation(self):
# see gh-16971
with tm.assert_produces_warning(FutureWarning):
remove_na(Series([]))
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, {0}]'.format(tz)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# with timezone
# GH 15855
df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='pad'), exp)
df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='bfill'), exp)
def test_fillna_consistency(self):
# GH 16402
# fillna with a tz aware to a tz-naive, should result in object
s = Series([Timestamp('20130101'), pd.NaT])
result = s.fillna(Timestamp('20130101', tz='US/Eastern'))
expected = Series([Timestamp('20130101'),
Timestamp('2013-01-01', tz='US/Eastern')],
dtype='object')
assert_series_equal(result, expected)
# where (we ignore the errors=)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
errors='ignore')
assert_series_equal(result, expected)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
errors='ignore')
assert_series_equal(result, expected)
# with a non-datetime
result = s.fillna('foo')
expected = Series([Timestamp('20130101'),
'foo'])
assert_series_equal(result, expected)
# assignment
s2 = s.copy()
s2[1] = 'foo'
assert_series_equal(s2, expected)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
pytest.raises(TypeError, s.fillna, [1, 2])
pytest.raises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(s.values.codes,
np.array([0, 1, -1, 0], dtype=np.int8))
@pytest.mark.parametrize('fill_value, expected_output', [
('a', ['a', 'a', 'b', 'a', 'a']),
({1: 'a', 3: 'b', 4: 'b'}, ['a', 'a', 'b', 'b', 'b']),
({1: 'a'}, ['a', 'a', 'b', np.nan, np.nan]),
({1: 'a', 3: 'b'}, ['a', 'a', 'b', 'b', np.nan]),
(Series('a'), ['a', np.nan, 'b', np.nan, np.nan]),
(Series('a', index=[1]), ['a', 'a', 'b', np.nan, np.nan]),
(Series({1: 'a', 3: 'b'}), ['a', 'a', 'b', 'b', np.nan]),
(Series(['a', 'b'], index=[3, 4]), ['a', np.nan, 'b', 'a', 'b'])
])
def test_fillna_categorical(self, fill_value, expected_output):
# GH 17033
# Test fillna for a Categorical series
data = ['a', np.nan, 'b', np.nan, np.nan]
s = Series(Categorical(data, categories=['a', 'b']))
exp = Series(Categorical(expected_output, categories=['a', 'b']))
tm.assert_series_equal(s.fillna(fill_value), exp)
def test_fillna_categorical_raise(self):
data = ['a', np.nan, 'b', np.nan, np.nan]
s = Series(Categorical(data, categories=['a', 'b']))
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna('d')
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna(Series('d'))
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna({1: 'd', 3: 'a'})
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar or '
'dict, but you passed a "list"'):
s.fillna(['a', 'b'])
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar or '
'dict, but you passed a "tuple"'):
s.fillna(('a', 'b'))
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar, dict '
'or Series, but you passed a "DataFrame"'):
s.fillna(DataFrame({1: ['a'], 3: ['b']}))
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_na', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
@tm.capture_stdout
def test_isnull_for_inf_deprecated(self):
# gh-17115
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self, datetime_series):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
pytest.raises(ValueError, datetime_series.fillna, value=0,
method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
td1[1] = iNaT
assert isna(td1[1])
assert td1[1].value == iNaT
td1[1] = td[1]
assert not isna(td1[1])
td1[2] = NaT
assert isna(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= datetime_series <= 0.5
# expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
s.dropna(inplace=True)
assert len(s) == 0
# invalid axis
pytest.raises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, Asia/Tokyo]'
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
assert result.dtype == 'datetime64[ns, Asia/Tokyo]'
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
s2.dropna(inplace=True)
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[np.nan, 0, 1, 2],
[np.nan, 1, 2, 3]))
result = s.dropna()
expected = s.iloc[1:]
assert_series_equal(result, expected)
def test_valid(self, datetime_series):
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.dropna()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notna(ts)])
def test_isna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
expected = Series([False, False, False, True, False])
tm.assert_series_equal(ser.isna(), expected)
ser = Series(["hi", "", nan])
expected = Series([False, False, True])
tm.assert_series_equal(ser.isna(), expected)
def test_notna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
expected = Series([True, True, True, False, True])
tm.assert_series_equal(ser.notna(), expected)
ser = Series(["hi", "", nan])
expected = Series([True, True, False])
tm.assert_series_equal(ser.notna(), expected)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
ts.dropna(inplace=True)
assert ts.name == name
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
# TODO: what is this test doing? why are result an expected
# the same call to fillna?
with tm.assert_produces_warning(PerformanceWarning):
# TODO: release-note fillna performance warning
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
with tm.assert_produces_warning(PerformanceWarning):
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData():
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float),
datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in datetime_series.index],
index=datetime_series.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = string_series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize("kwargs", [
{},
pytest.param({'method': 'polynomial', 'order': 1},
marks=td.skip_if_no_scipy)
])
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(**kwargs), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with pytest.raises(ValueError):
s.interpolate(method='time')
@pytest.mark.parametrize("kwargs", [
{},
pytest.param({'method': 'polynomial', 'order': 1},
marks=td.skip_if_no_scipy)
])
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with pytest.raises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1., 3., np.nan, np.nan, np.nan, 11., np.nan])
expected = Series([1., 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([np.nan, 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([1., 1., 3., 5., 7., 9., 11., np.nan])
result = s.interpolate(method='linear',
limit_direction='backward')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
pytest.raises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([nan, nan, 3, nan, nan, nan, 7, nan, nan])
expected = Series([nan, nan, 3., 4., 5., 6., 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside')
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., 4., nan, nan, 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside',
limit=1)
expected = Series([nan, nan, 3., 4., nan, 6., 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside',
limit_direction='both', limit=1)
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., nan, nan, nan, 7., 7., 7.])
result = s.interpolate(method='linear', limit_area='outside')
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., nan, nan, nan, 7., 7., nan])
result = s.interpolate(method='linear', limit_area='outside',
limit=1)
expected = Series([nan, 3., 3., nan, nan, nan, 7., 7., nan])
result = s.interpolate(method='linear', limit_area='outside',
limit_direction='both', limit=1)
assert_series_equal(result, expected)
expected = Series([3., 3., 3., nan, nan, nan, 7., nan, nan])
result = s.interpolate(method='linear', limit_area='outside',
direction='backward')
# raises an error even if limit type is wrong.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_area='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_all_good(self):
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
@pytest.mark.parametrize("check_scipy", [
False,
pytest.param(True, marks=td.skip_if_no_scipy)
])
def test_interp_multiIndex(self, check_scipy):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
if check_scipy:
with pytest.raises(ValueError):
s.interpolate(method='polynomial', order=1)
@td.skip_if_no_scipy
def test_interp_nonmono_raise(self):
s = Series([1, np.nan, 3], index=[0, 2, 1])
with pytest.raises(ValueError):
s.interpolate(method='krogh')
@td.skip_if_no_scipy
def test_interp_datetime64(self):
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize("method", ['polynomial', 'spline'])
def test_no_order(self, method):
s = Series([0, 1, np.nan, 3])
with pytest.raises(ValueError):
s.interpolate(method=method)
@td.skip_if_no_scipy
def test_spline(self):
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
@td.skip_if_no('scipy', min_version='0.15')
def test_spline_extrapolate(self):
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_smooth(self):
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (s.interpolate(method='spline', order=3, s=0)[5] !=
s.interpolate(method='spline', order=3)[5])
@td.skip_if_no_scipy
def test_spline_interpolation(self):
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_error(self):
# see gh-10633
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with pytest.raises(ValueError):
s.interpolate(method='spline')
with pytest.raises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
tm.assert_numpy_array_equal(result.values, exp.values)
| bsd-3-clause |
yakovenkodenis/rethinkdb | external/v8_3.30.33.16/build/gyp/test/additional-targets/gyptest-additional.py | 139 | 1530 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Build all.
test.build('all.gyp', chdir='relocate/src')
if test.format=='xcode':
chdir = 'relocate/src/dir1'
else:
chdir = 'relocate/src'
# Output is as expected.
file_content = 'Hello from emit.py\n'
test.built_file_must_match('out2.txt', file_content, chdir=chdir)
test.built_file_must_not_exist('out.txt', chdir='relocate/src')
test.built_file_must_not_exist('foolib1',
type=test.SHARED_LIB,
chdir=chdir)
# TODO(mmoss) Make consistent with msvs, with 'dir1' before 'out/Default'?
if test.format in ('make', 'ninja', 'android', 'cmake'):
chdir='relocate/src'
else:
chdir='relocate/src/dir1'
# Build the action explicitly.
test.build('actions.gyp', 'action1_target', chdir=chdir)
# Check that things got run.
file_content = 'Hello from emit.py\n'
test.built_file_must_exist('out.txt', chdir=chdir)
# Build the shared library explicitly.
test.build('actions.gyp', 'foolib1', chdir=chdir)
test.built_file_must_exist('foolib1',
type=test.SHARED_LIB,
chdir=chdir,
subdir='dir1')
test.pass_test()
| agpl-3.0 |
Mazecreator/tensorflow | tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py | 99 | 3581 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the gradient of `tf.sparse_tensor_dense_matmul()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseTensorDenseMatMulGradientTest(test.TestCase):
def _sparsify(self, x, indices_dtype=np.int64):
x[x < 0.5] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(indices_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
def _randomTensor(self,
size,
values_dtype,
adjoint=False,
sparse=False,
indices_dtype=np.int64):
n, m = size
x = np.random.randn(n, m).astype(values_dtype)
if adjoint:
x = x.transpose()
if sparse:
return self._sparsify(x, indices_dtype=indices_dtype)
else:
return constant_op.constant(x, dtype=values_dtype)
def _testGradients(self, adjoint_a, adjoint_b, name, values_dtype,
indices_dtype):
n, k, m = np.random.randint(1, 10, size=3)
sp_t, nnz = self._randomTensor(
[n, k],
values_dtype,
adjoint=adjoint_a,
sparse=True,
indices_dtype=indices_dtype)
dense_t = self._randomTensor([k, m], values_dtype, adjoint=adjoint_b)
matmul = sparse_ops.sparse_tensor_dense_matmul(
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
with self.test_session(use_gpu=True):
dense_t_shape = [m, k] if adjoint_b else [k, m]
sp_t_val_shape = [nnz]
err = gradient_checker.compute_gradient_error(
[dense_t, sp_t.values], [dense_t_shape, sp_t_val_shape], matmul,
[n, m])
print("%s gradient err = %s" % (name, err))
self.assertLess(err, 1e-3)
def _testGradientsType(self, values_dtype, indices_dtype):
for adjoint_a in [True, False]:
for adjoint_b in [True, False]:
name = "sparse_tensor_dense_matmul_%s_%s_%s_%s" % (
adjoint_a, adjoint_b, values_dtype.__name__, indices_dtype.__name__)
self._testGradients(adjoint_a, adjoint_b, name, values_dtype,
indices_dtype)
def testGradients(self):
np.random.seed(5) # Fix seed to avoid flakiness
self._testGradientsType(np.float32, np.int64)
self._testGradientsType(np.float64, np.int64)
self._testGradientsType(np.float32, np.int32)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jhseu/tensorflow | tensorflow/python/client/device_lib.py | 16 | 1562 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import device_attributes_pb2
from tensorflow.python import _pywrap_device_lib
def list_local_devices(session_config=None):
"""List the available devices available in the local process.
Args:
session_config: a session config proto or None to use the default config.
Returns:
A list of `DeviceAttribute` protocol buffers.
"""
def _convert(pb_str):
m = device_attributes_pb2.DeviceAttributes()
m.ParseFromString(pb_str)
return m
serialized_config = None
if session_config is not None:
serialized_config = session_config.SerializeToString()
return [
_convert(s) for s in _pywrap_device_lib.list_devices(serialized_config)
]
| apache-2.0 |
abhiii5459/sympy | sympy/printing/latex.py | 15 | 71982 | """
A Printer which converts an expression into its LaTeX equivalent.
"""
from __future__ import print_function, division
from sympy.core import S, Add, Symbol
from sympy.core.function import _coeff_isneg
from sympy.core.sympify import SympifyError
from sympy.core.alphabets import greeks
from sympy.core.operations import AssocOp
from sympy.core.containers import Tuple
from sympy.logic.boolalg import true
## sympy.printing imports
from .printer import Printer
from .conventions import split_super_sub, requires_partial
from .precedence import precedence, PRECEDENCE
import mpmath.libmp as mlib
from mpmath.libmp import prec_to_dps
from sympy.core.compatibility import default_sort_key, range
from sympy.utilities.iterables import has_variety
import re
# Hand-picked functions which can be used directly in both LaTeX and MathJax
# Complete list at http://www.mathjax.org/docs/1.1/tex.html#supported-latex-commands
# This variable only contains those functions which sympy uses.
accepted_latex_functions = ['arcsin', 'arccos', 'arctan', 'sin', 'cos', 'tan',
'sinh', 'cosh', 'tanh', 'sqrt', 'ln', 'log', 'sec', 'csc',
'cot', 'coth', 're', 'im', 'frac', 'root', 'arg',
]
tex_greek_dictionary = {
'Alpha': 'A',
'Beta': 'B',
'Epsilon': 'E',
'Zeta': 'Z',
'Eta': 'H',
'Iota': 'I',
'Kappa': 'K',
'Mu': 'M',
'Nu': 'N',
'omicron': 'o',
'Omicron': 'O',
'Rho': 'P',
'Tau': 'T',
'Chi': 'X',
'lamda': r'\lambda',
'Lamda': r'\Lambda',
'khi': r'\chi',
'Khi': r'X',
'varepsilon': r'\varepsilon',
'varkappa': r'\varkappa',
'varphi': r'\varphi',
'varpi': r'\varpi',
'varrho': r'\varrho',
'varsigma': r'\varsigma',
'vartheta': r'\vartheta',
}
other_symbols = set(['aleph', 'beth', 'daleth', 'gimel', 'ell', 'eth', 'hbar',
'hslash', 'mho', 'wp', ])
# Variable name modifiers
modifier_dict = {
# Accents
'mathring': lambda s: r'\mathring{'+s+r'}',
'ddddot': lambda s: r'\ddddot{'+s+r'}',
'dddot': lambda s: r'\dddot{'+s+r'}',
'ddot': lambda s: r'\ddot{'+s+r'}',
'dot': lambda s: r'\dot{'+s+r'}',
'check': lambda s: r'\check{'+s+r'}',
'breve': lambda s: r'\breve{'+s+r'}',
'acute': lambda s: r'\acute{'+s+r'}',
'grave': lambda s: r'\grave{'+s+r'}',
'tilde': lambda s: r'\tilde{'+s+r'}',
'hat': lambda s: r'\hat{'+s+r'}',
'bar': lambda s: r'\bar{'+s+r'}',
'vec': lambda s: r'\vec{'+s+r'}',
'prime': lambda s: "{"+s+"}'",
'prm': lambda s: "{"+s+"}'",
# Faces
'bold': lambda s: r'\boldsymbol{'+s+r'}',
'bm': lambda s: r'\boldsymbol{'+s+r'}',
'cal': lambda s: r'\mathcal{'+s+r'}',
'scr': lambda s: r'\mathscr{'+s+r'}',
'frak': lambda s: r'\mathfrak{'+s+r'}',
# Brackets
'norm': lambda s: r'\left\|{'+s+r'}\right\|',
'avg': lambda s: r'\left\langle{'+s+r'}\right\rangle',
'abs': lambda s: r'\left|{'+s+r'}\right|',
'mag': lambda s: r'\left|{'+s+r'}\right|',
}
greek_letters_set = frozenset(greeks)
class LatexPrinter(Printer):
printmethod = "_latex"
_default_settings = {
"order": None,
"mode": "plain",
"itex": False,
"fold_frac_powers": False,
"fold_func_brackets": False,
"fold_short_frac": None,
"long_frac_ratio": 2,
"mul_symbol": None,
"inv_trig_style": "abbreviated",
"mat_str": None,
"mat_delim": "[",
"symbol_names": {},
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
if 'mode' in self._settings:
valid_modes = ['inline', 'plain', 'equation',
'equation*']
if self._settings['mode'] not in valid_modes:
raise ValueError("'mode' must be one of 'inline', 'plain', "
"'equation' or 'equation*'")
if self._settings['fold_short_frac'] is None and \
self._settings['mode'] == 'inline':
self._settings['fold_short_frac'] = True
mul_symbol_table = {
None: r" ",
"ldot": r" \,.\, ",
"dot": r" \cdot ",
"times": r" \times "
}
self._settings['mul_symbol_latex'] = \
mul_symbol_table[self._settings['mul_symbol']]
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table[self._settings['mul_symbol'] or 'dot']
self._delim_dict = {'(': ')', '[': ']'}
def parenthesize(self, item, level):
if precedence(item) <= level:
return r"\left(%s\right)" % self._print(item)
else:
return self._print(item)
def doprint(self, expr):
tex = Printer.doprint(self, expr)
if self._settings['mode'] == 'plain':
return tex
elif self._settings['mode'] == 'inline':
return r"$%s$" % tex
elif self._settings['itex']:
return r"$$%s$$" % tex
else:
env_str = self._settings['mode']
return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str)
def _needs_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed, False otherwise. For example: a + b => True; a => False;
10 => False; -10 => True.
"""
return not ((expr.is_Integer and expr.is_nonnegative)
or (expr.is_Atom and (expr is not S.NegativeOne
and expr.is_Rational is False)))
def _needs_function_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
passed as an argument to a function, False otherwise. This is a more
liberal version of _needs_brackets, in that many expressions which need
to be wrapped in brackets when added/subtracted/raised to a power do
not need them when passed to a function. Such an example is a*b.
"""
if not self._needs_brackets(expr):
return False
else:
# Muls of the form a*b*c... can be folded
if expr.is_Mul and not self._mul_is_clean(expr):
return True
# Pows which don't need brackets can be folded
elif expr.is_Pow and not self._pow_is_clean(expr):
return True
# Add and Function always need brackets
elif expr.is_Add or expr.is_Function:
return True
else:
return False
def _needs_mul_brackets(self, expr, first=False, last=False):
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of a Mul, False otherwise. This is True for Add,
but also for some container objects that would not need brackets
when appearing last in a Mul, e.g. an Integral. ``last=True``
specifies that this expr is the last to appear in a Mul.
``first=True`` specifies that this expr is the first to appear in a Mul.
"""
from sympy import Integral, Piecewise, Product, Sum
if expr.is_Add:
return True
elif expr.is_Relational:
return True
elif expr.is_Mul:
if not first and _coeff_isneg(expr):
return True
if (not last and
any([expr.has(x) for x in (Integral, Piecewise, Product, Sum)])):
return True
return False
def _needs_add_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of an Add, False otherwise. This is False for most
things.
"""
if expr.is_Relational:
return True
return False
def _mul_is_clean(self, expr):
for arg in expr.args:
if arg.is_Function:
return False
return True
def _pow_is_clean(self, expr):
return not self._needs_brackets(expr.base)
def _do_exponent(self, expr, exp):
if exp is not None:
return r"\left(%s\right)^{%s}" % (expr, exp)
else:
return expr
def _print_bool(self, e):
return r"\mathrm{%s}" % e
_print_BooleanTrue = _print_bool
_print_BooleanFalse = _print_bool
def _print_NoneType(self, e):
return r"\mathrm{%s}" % e
def _print_Add(self, expr, order=None):
if self.order == 'none':
terms = list(expr.args)
else:
terms = self._as_ordered_terms(expr, order=order)
tex = ""
for i, term in enumerate(terms):
if i == 0:
pass
elif _coeff_isneg(term):
tex += " - "
term = -term
else:
tex += " + "
term_tex = self._print(term)
if self._needs_add_brackets(term):
term_tex = r"\left(%s\right)" % term_tex
tex += term_tex
return tex
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
str_real = mlib.to_str(expr._mpf_, dps, strip_zeros=True)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
# thus we use the number separator
separator = self._settings['mul_symbol_latex_numbers']
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
return r"%s%s10^{%s}" % (mant, separator, exp)
elif str_real == "+inf":
return r"\infty"
elif str_real == "-inf":
return r"- \infty"
else:
return str_real
def _print_Mul(self, expr):
if _coeff_isneg(expr):
expr = -expr
tex = "- "
else:
tex = ""
from sympy.simplify import fraction
numer, denom = fraction(expr, exact=True)
separator = self._settings['mul_symbol_latex']
numbersep = self._settings['mul_symbol_latex_numbers']
def convert(expr):
if not expr.is_Mul:
return str(self._print(expr))
else:
_tex = last_term_tex = ""
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
args = expr.args
for i, term in enumerate(args):
term_tex = self._print(term)
if self._needs_mul_brackets(term, first=(i == 0),
last=(i == len(args) - 1)):
term_tex = r"\left(%s\right)" % term_tex
if re.search("[0-9][} ]*$", last_term_tex) and \
re.match("[{ ]*[-+0-9]", term_tex):
# between two numbers
_tex += numbersep
elif _tex:
_tex += separator
_tex += term_tex
last_term_tex = term_tex
return _tex
if denom is S.One:
# use the original expression here, since fraction() may have
# altered it when producing numer and denom
tex += convert(expr)
else:
snumer = convert(numer)
sdenom = convert(denom)
ldenom = len(sdenom.split())
ratio = self._settings['long_frac_ratio']
if self._settings['fold_short_frac'] \
and ldenom <= 2 and not "^" in sdenom:
# handle short fractions
if self._needs_mul_brackets(numer, last=False):
tex += r"\left(%s\right) / %s" % (snumer, sdenom)
else:
tex += r"%s / %s" % (snumer, sdenom)
elif len(snumer.split()) > ratio*ldenom:
# handle long fractions
if self._needs_mul_brackets(numer, last=True):
tex += r"\frac{1}{%s}%s\left(%s\right)" \
% (sdenom, separator, snumer)
elif numer.is_Mul:
# split a long numerator
a = S.One
b = S.One
for x in numer.args:
if self._needs_mul_brackets(x, last=False) or \
len(convert(a*x).split()) > ratio*ldenom or \
(b.is_commutative is x.is_commutative is False):
b *= x
else:
a *= x
if self._needs_mul_brackets(b, last=True):
tex += r"\frac{%s}{%s}%s\left(%s\right)" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{%s}{%s}%s%s" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{1}{%s}%s%s" % (sdenom, separator, snumer)
else:
tex += r"\frac{%s}{%s}" % (snumer, sdenom)
return tex
def _print_Pow(self, expr):
# Treat x**Rational(1,n) as special case
if expr.exp.is_Rational and abs(expr.exp.p) == 1 and expr.exp.q != 1:
base = self._print(expr.base)
expq = expr.exp.q
if expq == 2:
tex = r"\sqrt{%s}" % base
elif self._settings['itex']:
tex = r"\root{%d}{%s}" % (expq, base)
else:
tex = r"\sqrt[%d]{%s}" % (expq, base)
if expr.exp.is_negative:
return r"\frac{1}{%s}" % tex
else:
return tex
elif self._settings['fold_frac_powers'] \
and expr.exp.is_Rational \
and expr.exp.q != 1:
base, p, q = self._print(expr.base), expr.exp.p, expr.exp.q
if expr.base.is_Function:
return self._print(expr.base, "%s/%s" % (p, q))
if self._needs_brackets(expr.base):
return r"\left(%s\right)^{%s/%s}" % (base, p, q)
return r"%s^{%s/%s}" % (base, p, q)
elif expr.exp.is_Rational and expr.exp.is_negative and expr.base.is_commutative:
# Things like 1/x
return self._print_Mul(expr)
else:
if expr.base.is_Function:
return self._print(expr.base, self._print(expr.exp))
else:
if expr.is_commutative and expr.exp == -1:
#solves issue 4129
#As Mul always simplify 1/x to x**-1
#The objective is achieved with this hack
#first we get the latex for -1 * expr,
#which is a Mul expression
tex = self._print(S.NegativeOne * expr).strip()
#the result comes with a minus and a space, so we remove
if tex[:1] == "-":
return tex[1:].strip()
if self._needs_brackets(expr.base):
tex = r"\left(%s\right)^{%s}"
else:
tex = r"%s^{%s}"
return tex % (self._print(expr.base),
self._print(expr.exp))
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Product(self, expr):
if len(expr.limits) == 1:
tex = r"\prod_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\prod_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_BasisDependent(self, expr):
from sympy.vector import Vector
o1 = []
if expr == expr.zero:
return expr.zero._latex_form
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key = lambda x:x[0].__str__())
for k, v in inneritems:
if v == 1:
o1.append(' + ' + k._latex_form)
elif v == -1:
o1.append(' - ' + k._latex_form)
else:
arg_str = '(' + LatexPrinter().doprint(v) + ')'
o1.append(' + ' + arg_str + k._latex_form)
outstr = (''.join(o1))
if outstr[1] != '-':
outstr = outstr[3:]
else:
outstr = outstr[1:]
return outstr
def _print_Indexed(self, expr):
tex = self._print(expr.base)+'_{%s}' % ','.join(
map(self._print, expr.indices))
return tex
def _print_IndexedBase(self, expr):
return self._print(expr.label)
def _print_Derivative(self, expr):
dim = len(expr.variables)
if requires_partial(expr):
diff_symbol = r'\partial'
else:
diff_symbol = r'd'
if dim == 1:
tex = r"\frac{%s}{%s %s}" % (diff_symbol, diff_symbol,
self._print(expr.variables[0]))
else:
multiplicity, i, tex = [], 1, ""
current = expr.variables[0]
for symbol in expr.variables[1:]:
if symbol == current:
i = i + 1
else:
multiplicity.append((current, i))
current, i = symbol, 1
else:
multiplicity.append((current, i))
for x, i in multiplicity:
if i == 1:
tex += r"%s %s" % (diff_symbol, self._print(x))
else:
tex += r"%s %s^{%s}" % (diff_symbol, self._print(x), i)
tex = r"\frac{%s^{%s}}{%s} " % (diff_symbol, dim, tex)
if isinstance(expr.expr, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(expr.expr))
else:
return r"%s %s" % (tex, self._print(expr.expr))
def _print_Subs(self, subs):
expr, old, new = subs.args
latex_expr = self._print(expr)
latex_old = (self._print(e) for e in old)
latex_new = (self._print(e) for e in new)
latex_subs = r'\\ '.join(
e[0] + '=' + e[1] for e in zip(latex_old, latex_new))
return r'\left. %s \right|_{\substack{ %s }}' % (latex_expr, latex_subs)
def _print_Integral(self, expr):
tex, symbols = "", []
# Only up to \iiiint exists
if len(expr.limits) <= 4 and all(len(lim) == 1 for lim in expr.limits):
# Use len(expr.limits)-1 so that syntax highlighters don't think
# \" is an escaped quote
tex = r"\i" + "i"*(len(expr.limits) - 1) + "nt"
symbols = [r"\, d%s" % self._print(symbol[0])
for symbol in expr.limits]
else:
for lim in reversed(expr.limits):
symbol = lim[0]
tex += r"\int"
if len(lim) > 1:
if self._settings['mode'] in ['equation', 'equation*'] \
and not self._settings['itex']:
tex += r"\limits"
if len(lim) == 3:
tex += "_{%s}^{%s}" % (self._print(lim[1]),
self._print(lim[2]))
if len(lim) == 2:
tex += "^{%s}" % (self._print(lim[1]))
symbols.insert(0, r"\, d%s" % self._print(symbol))
return r"%s %s%s" % (tex,
str(self._print(expr.function)), "".join(symbols))
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
tex = r"\lim_{%s \to " % self._print(z)
if z0 in (S.Infinity, S.NegativeInfinity):
tex += r"%s}" % self._print(z0)
else:
tex += r"%s^%s}" % (self._print(z0), self._print(dir))
if isinstance(e, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(e))
else:
return r"%s %s" % (tex, self._print(e))
def _hprint_Function(self, func):
'''
Logic to decide how to render a function to latex
- if it is a recognized latex name, use the appropriate latex command
- if it is a single letter, just use that letter
- if it is a longer name, then put \operatorname{} around it and be
mindful of undercores in the name
'''
func = self._deal_with_super_sub(func)
if func in accepted_latex_functions:
name = r"\%s" % func
elif len(func) == 1 or func.startswith('\\'):
name = func
else:
name = r"\operatorname{%s}" % func
return name
def _print_Function(self, expr, exp=None):
'''
Render functions to LaTeX, handling functions that LaTeX knows about
e.g., sin, cos, ... by using the proper LaTeX command (\sin, \cos, ...).
For single-letter function names, render them as regular LaTeX math
symbols. For multi-letter function names that LaTeX does not know
about, (e.g., Li, sech) use \operatorname{} so that the function name
is rendered in Roman font and LaTeX handles spacing properly.
expr is the expression involving the function
exp is an exponent
'''
func = expr.func.__name__
if hasattr(self, '_print_' + func):
return getattr(self, '_print_' + func)(expr, exp)
else:
args = [ str(self._print(arg)) for arg in expr.args ]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings['inv_trig_style']
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = self._settings['fold_func_brackets'] and \
len(args) == 1 and \
not self._needs_function_brackets(expr.args[0])
inv_trig_table = ["asin", "acos", "atan", "acot"]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
func = func
elif inv_trig_style == "full":
func = "arc" + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
if func in accepted_latex_functions:
name = r"\%s^{-1}" % func
else:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
name = r'%s^{%s}' % (self._hprint_Function(func), exp)
else:
name = self._hprint_Function(func)
if can_fold_brackets:
if func in accepted_latex_functions:
# Wrap argument safely to avoid parse-time conflicts
# with the function name itself
name += r" {%s}"
else:
name += r"%s"
else:
name += r"{\left (%s \right )}"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
def _print_UndefinedFunction(self, expr):
return self._hprint_Function(str(expr))
def _print_FunctionClass(self, expr):
if hasattr(expr, '_latex_no_arg'):
return expr._latex_no_arg(self)
return self._hprint_Function(str(expr))
def _print_Lambda(self, expr):
symbols, expr = expr.args
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(tuple(symbols))
args = (symbols, self._print(expr))
tex = r"\left( %s \mapsto %s \right)" % (symbols, self._print(expr))
return tex
def _print_Min(self, expr, exp=None):
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\min\left(%s\right)" % ", ".join(texargs)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Max(self, expr, exp=None):
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\max\left(%s\right)" % ", ".join(texargs)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_floor(self, expr, exp=None):
tex = r"\lfloor{%s}\rfloor" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ceiling(self, expr, exp=None):
tex = r"\lceil{%s}\rceil" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Abs(self, expr, exp=None):
tex = r"\left|{%s}\right|" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
_print_Determinant = _print_Abs
def _print_re(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Re {\left (%s \right )}" % self._print(expr.args[0])
else:
tex = r"\Re{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_im(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Im {\left ( %s \right )}" % self._print(expr.args[0])
else:
tex = r"\Im{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_Not(self, e):
from sympy import Equivalent, Implies
if isinstance(e.args[0], Equivalent):
return self._print_Equivalent(e.args[0], r"\not\equiv")
if isinstance(e.args[0], Implies):
return self._print_Implies(e.args[0], r"\not\Rightarrow")
if (e.args[0].is_Boolean):
return r"\neg (%s)" % self._print(e.args[0])
else:
return r"\neg %s" % self._print(e.args[0])
def _print_LogOp(self, args, char):
arg = args[0]
if arg.is_Boolean and not arg.is_Not:
tex = r"\left(%s\right)" % self._print(arg)
else:
tex = r"%s" % self._print(arg)
for arg in args[1:]:
if arg.is_Boolean and not arg.is_Not:
tex += r" %s \left(%s\right)" % (char, self._print(arg))
else:
tex += r" %s %s" % (char, self._print(arg))
return tex
def _print_And(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\wedge")
def _print_Or(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\vee")
def _print_Xor(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\veebar")
def _print_Implies(self, e, altchar=None):
return self._print_LogOp(e.args, altchar or r"\Rightarrow")
def _print_Equivalent(self, e, altchar=None):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, altchar or r"\equiv")
def _print_conjugate(self, expr, exp=None):
tex = r"\overline{%s}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_polar_lift(self, expr, exp=None):
func = r"\operatorname{polar\_lift}"
arg = r"{\left (%s \right )}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (func, exp, arg)
else:
return r"%s%s" % (func, arg)
def _print_ExpBase(self, expr, exp=None):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
tex = r"e^{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_elliptic_k(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"K^{%s}%s" % (exp, tex)
else:
return r"K%s" % tex
def _print_elliptic_f(self, expr, exp=None):
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"F^{%s}%s" % (exp, tex)
else:
return r"F%s" % tex
def _print_elliptic_e(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"E^{%s}%s" % (exp, tex)
else:
return r"E%s" % tex
def _print_elliptic_pi(self, expr, exp=None):
if len(expr.args) == 3:
tex = r"\left(%s; %s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]), \
self._print(expr.args[2]))
else:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"\Pi^{%s}%s" % (exp, tex)
else:
return r"\Pi%s" % tex
def _print_gamma(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_uppergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_lowergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\gamma^{%s}%s" % (exp, tex)
else:
return r"\gamma%s" % tex
def _print_expint(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[1])
nu = self._print(expr.args[0])
if exp is not None:
return r"\operatorname{E}_{%s}^{%s}%s" % (nu, exp, tex)
else:
return r"\operatorname{E}_{%s}%s" % (nu, tex)
def _print_fresnels(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"S^{%s}%s" % (exp, tex)
else:
return r"S%s" % tex
def _print_fresnelc(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"C^{%s}%s" % (exp, tex)
else:
return r"C%s" % tex
def _print_subfactorial(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"!\left(%s\right)" % self._print(x)
else:
tex = "!" + self._print(x)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!" % self._print(x)
else:
tex = self._print(x) + "!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial2(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!!" % self._print(x)
else:
tex = self._print(x) + "!!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_binomial(self, expr, exp=None):
tex = r"{\binom{%s}{%s}}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_RisingFactorial(self, expr, exp=None):
n, k = expr.args
if self._needs_brackets(n):
base = r"\left(%s\right)" % self._print(n)
else:
base = self._print(n)
tex = r"{%s}^{\left(%s\right)}" % (base, self._print(k))
return self._do_exponent(tex, exp)
def _print_FallingFactorial(self, expr, exp=None):
n, k = expr.args
if self._needs_brackets(k):
sub = r"\left(%s\right)" % self._print(k)
else:
sub = self._print(k)
tex = r"{\left(%s\right)}_{%s}" % (self._print(n), sub)
return self._do_exponent(tex, exp)
def _hprint_BesselBase(self, expr, exp, sym):
tex = r"%s" % (sym)
need_exp = False
if exp is not None:
if tex.find('^') == -1:
tex = r"%s^{%s}" % (tex, self._print(exp))
else:
need_exp = True
tex = r"%s_{%s}\left(%s\right)" % (tex, self._print(expr.order),
self._print(expr.argument))
if need_exp:
tex = self._do_exponent(tex, exp)
return tex
def _hprint_vec(self, vec):
if len(vec) == 0:
return ""
s = ""
for i in vec[:-1]:
s += "%s, " % self._print(i)
s += self._print(vec[-1])
return s
def _print_besselj(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'J')
def _print_besseli(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'I')
def _print_besselk(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'K')
def _print_bessely(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'Y')
def _print_yn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'y')
def _print_jn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'j')
def _print_hankel1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(1)}')
def _print_hankel2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(2)}')
def _hprint_airy(self, expr, exp=None, notation=""):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (notation, exp, tex)
else:
return r"%s%s" % (notation, tex)
def _hprint_airy_prime(self, expr, exp=None, notation=""):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"{%s^\prime}^{%s}%s" % (notation, exp, tex)
else:
return r"%s^\prime%s" % (notation, tex)
def _print_airyai(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Ai')
def _print_airybi(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Bi')
def _print_airyaiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Ai')
def _print_airybiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Bi')
def _print_hyper(self, expr, exp=None):
tex = r"{{}_{%s}F_{%s}\left(\begin{matrix} %s \\ %s \end{matrix}" \
r"\middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._hprint_vec(expr.ap), self._hprint_vec(expr.bq),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, self._print(exp))
return tex
def _print_meijerg(self, expr, exp=None):
tex = r"{G_{%s, %s}^{%s, %s}\left(\begin{matrix} %s & %s \\" \
r"%s & %s \end{matrix} \middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._print(len(expr.bm)), self._print(len(expr.an)),
self._hprint_vec(expr.an), self._hprint_vec(expr.aother),
self._hprint_vec(expr.bm), self._hprint_vec(expr.bother),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, self._print(exp))
return tex
def _print_dirichlet_eta(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\eta^{%s}%s" % (self._print(exp), tex)
return r"\eta%s" % tex
def _print_zeta(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s, %s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\zeta^{%s}%s" % (self._print(exp), tex)
return r"\zeta%s" % tex
def _print_lerchphi(self, expr, exp=None):
tex = r"\left(%s, %s, %s\right)" % tuple(map(self._print, expr.args))
if exp is None:
return r"\Phi%s" % tex
return r"\Phi^{%s}%s" % (self._print(exp), tex)
def _print_polylog(self, expr, exp=None):
s, z = map(self._print, expr.args)
tex = r"\left(%s\right)" % z
if exp is None:
return r"\operatorname{Li}_{%s}%s" % (s, tex)
return r"\operatorname{Li}_{%s}^{%s}%s" % (s, self._print(exp), tex)
def _print_jacobi(self, expr, exp=None):
n, a, b, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s,%s\right)}\left(%s\right)" % (n, a, b, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_gegenbauer(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"C_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_chebyshevt(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"T_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_chebyshevu(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"U_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_legendre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"P_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_assoc_legendre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_hermite(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"H_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_laguerre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"L_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_assoc_laguerre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"L_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Ynm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Y_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Znm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Z_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Rational(self, expr):
if expr.q != 1:
sign = ""
p = expr.p
if expr.p < 0:
sign = "- "
p = -p
return r"%s\frac{%d}{%d}" % (sign, p, expr.q)
else:
return self._print(expr.p)
def _print_Order(self, expr):
s = self._print(expr.expr)
if expr.point and any(p != S.Zero for p in expr.point) or \
len(expr.variables) > 1:
s += '; '
if len(expr.variables) > 1:
s += self._print(expr.variables)
elif len(expr.variables):
s += self._print(expr.variables[0])
s += r'\rightarrow'
if len(expr.point) > 1:
s += self._print(expr.point)
else:
s += self._print(expr.point[0])
return r"\mathcal{O}\left(%s\right)" % s
def _print_Symbol(self, expr):
if expr in self._settings['symbol_names']:
return self._settings['symbol_names'][expr]
return self._deal_with_super_sub(expr.name) if \
'\\' not in expr.name else expr.name
_print_RandomSymbol = _print_Symbol
_print_MatrixSymbol = _print_Symbol
def _deal_with_super_sub(self, string):
name, supers, subs = split_super_sub(string)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
# glue all items together:
if len(supers) > 0:
name += "^{%s}" % " ".join(supers)
if len(subs) > 0:
name += "_{%s}" % " ".join(subs)
return name
def _print_Relational(self, expr):
if self._settings['itex']:
gt = r"\gt"
lt = r"\lt"
else:
gt = ">"
lt = "<"
charmap = {
"==": "=",
">": gt,
"<": lt,
">=": r"\geq",
"<=": r"\leq",
"!=": r"\neq",
}
return "%s %s %s" % (self._print(expr.lhs),
charmap[expr.rel_op], self._print(expr.rhs))
def _print_Piecewise(self, expr):
ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c))
for e, c in expr.args[:-1]]
if expr.args[-1].cond == true:
ecpairs.append(r"%s & \text{otherwise}" %
self._print(expr.args[-1].expr))
else:
ecpairs.append(r"%s & \text{for}\: %s" %
(self._print(expr.args[-1].expr),
self._print(expr.args[-1].cond)))
tex = r"\begin{cases} %s \end{cases}"
return tex % r" \\".join(ecpairs)
def _print_MatrixBase(self, expr):
lines = []
for line in range(expr.rows): # horrible, should be 'rows'
lines.append(" & ".join([ self._print(i) for i in expr[line, :] ]))
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.cols <= 10) is True:
mat_str = 'matrix'
else:
mat_str = 'array'
out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
out_str = out_str.replace('%MATSTR%', mat_str)
if mat_str == 'array':
out_str = out_str.replace('%s', '{' + 'c'*expr.cols + '}%s')
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
out_str = r'\left' + left_delim + out_str + \
r'\right' + right_delim
return out_str % r"\\".join(lines)
_print_ImmutableMatrix = _print_MatrixBase
_print_Matrix = _print_MatrixBase
def _print_MatrixElement(self, expr):
return self._print(expr.parent) + '_{%s, %s}'%(expr.i, expr.j)
def _print_MatrixSlice(self, expr):
def latexslice(x):
x = list(x)
if x[2] == 1:
del x[2]
if x[1] == x[0] + 1:
del x[1]
if x[0] == 0:
x[0] = ''
return ':'.join(map(self._print, x))
return (self._print(expr.parent) + r'\left[' +
latexslice(expr.rowslice) + ', ' +
latexslice(expr.colslice) + r'\right]')
def _print_BlockMatrix(self, expr):
return self._print(expr.blocks)
def _print_Transpose(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol
if not isinstance(mat, MatrixSymbol):
return r"\left(%s\right)^T" % self._print(mat)
else:
return "%s^T" % self._print(mat)
def _print_Adjoint(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol
if not isinstance(mat, MatrixSymbol):
return r"\left(%s\right)^\dag" % self._print(mat)
else:
return "%s^\dag" % self._print(mat)
def _print_MatAdd(self, expr):
terms = list(expr.args)
tex = " + ".join(map(self._print, terms))
return tex
def _print_MatMul(self, expr):
from sympy import Add, MatAdd, HadamardProduct
def parens(x):
if isinstance(x, (Add, MatAdd, HadamardProduct)):
return r"\left(%s\right)" % self._print(x)
return self._print(x)
return ' '.join(map(parens, expr.args))
def _print_HadamardProduct(self, expr):
from sympy import Add, MatAdd, MatMul
def parens(x):
if isinstance(x, (Add, MatAdd, MatMul)):
return r"\left(%s\right)" % self._print(x)
return self._print(x)
return ' \circ '.join(map(parens, expr.args))
def _print_MatPow(self, expr):
base, exp = expr.base, expr.exp
from sympy.matrices import MatrixSymbol
if not isinstance(base, MatrixSymbol):
return r"\left(%s\right)^{%s}" % (self._print(base), self._print(exp))
else:
return "%s^{%s}" % (self._print(base), self._print(exp))
def _print_ZeroMatrix(self, Z):
return r"\mathbb{0}"
def _print_Identity(self, I):
return r"\mathbb{I}"
def _print_tuple(self, expr):
return r"\left ( %s\right )" % \
r", \quad ".join([ self._print(i) for i in expr ])
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_list(self, expr):
return r"\left [ %s\right ]" % \
r", \quad ".join([ self._print(i) for i in expr ])
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
val = d[key]
items.append("%s : %s" % (self._print(key), self._print(val)))
return r"\left \{ %s\right \}" % r", \quad ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_DiracDelta(self, expr, exp=None):
if len(expr.args) == 1 or expr.args[1] == 0:
tex = r"\delta\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\delta^{\left( %s \right)}\left( %s \right)" % (
self._print(expr.args[1]), self._print(expr.args[0]))
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_Heaviside(self, expr, exp=None):
tex = r"\theta\left(%s\right)" % self._print(expr.args[0])
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_KroneckerDelta(self, expr, exp=None):
i = self._print(expr.args[0])
j = self._print(expr.args[1])
if expr.args[0].is_Atom and expr.args[1].is_Atom:
tex = r'\delta_{%s %s}' % (i, j)
else:
tex = r'\delta_{%s, %s}' % (i, j)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_LeviCivita(self, expr, exp=None):
indices = map(self._print, expr.args)
if all(x.is_Atom for x in expr.args):
tex = r'\varepsilon_{%s}' % " ".join(indices)
else:
tex = r'\varepsilon_{%s}' % ", ".join(indices)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_ProductSet(self, p):
if len(p.sets) > 1 and not has_variety(p.sets):
return self._print(p.sets[0]) + "^%d" % len(p.sets)
else:
return r" \times ".join(self._print(set) for set in p.sets)
def _print_RandomDomain(self, d):
try:
return 'Domain: ' + self._print(d.as_boolean())
except Exception:
try:
return ('Domain: ' + self._print(d.symbols) + ' in ' +
self._print(d.set))
except:
return 'Domain on ' + self._print(d.symbols)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_set(items)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
items = ", ".join(map(self._print, items))
return r"\left\{%s\right\}" % items
_print_frozenset = _print_set
def _print_Range(self, s):
if len(s) > 4:
it = iter(s)
printset = next(it), next(it), '\ldots', s._last_element
else:
printset = tuple(s)
return (r"\left\{"
+ r", ".join(self._print(el) for el in printset)
+ r"\right\}")
def _print_SeqFormula(self, s):
if s.start is S.NegativeInfinity:
stop = s.stop
printset = ('\ldots', s.coeff(stop - 3), s.coeff(stop - 2),
s.coeff(stop - 1), s.coeff(stop))
elif s.stop is S.Infinity or s.length > 4:
printset = s[:4]
printset.append('\ldots')
else:
printset = tuple(s)
return (r"\left\["
+ r", ".join(self._print(el) for el in printset)
+ r"\right\]")
_print_SeqPer = _print_SeqFormula
_print_SeqAdd = _print_SeqFormula
_print_SeqMul = _print_SeqFormula
def _print_Interval(self, i):
if i.start == i.end:
return r"\left\{%s\right\}" % self._print(i.start)
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return r"\left%s%s, %s\right%s" % \
(left, self._print(i.start), self._print(i.end), right)
def _print_Union(self, u):
return r" \cup ".join([self._print(i) for i in u.args])
def _print_Complement(self, u):
return r" \setminus ".join([self._print(i) for i in u.args])
def _print_Intersection(self, u):
return r" \cap ".join([self._print(i) for i in u.args])
def _print_SymmetricDifference(self, u):
return r" \triangle ".join([self._print(i) for i in u.args])
def _print_EmptySet(self, e):
return r"\emptyset"
def _print_Naturals(self, n):
return r"\mathbb{N}"
def _print_Naturals0(self, n):
return r"\mathbb{N_0}"
def _print_Integers(self, i):
return r"\mathbb{Z}"
def _print_Reals(self, i):
return r"\mathbb{R}"
def _print_Complexes(self, i):
return r"\mathbb{C}"
def _print_ImageSet(self, s):
return r"\left\{%s\; |\; %s \in %s\right\}" % (
self._print(s.lamda.expr),
', '.join([self._print(var) for var in s.lamda.variables]),
self._print(s.base_set))
def _print_ConditionSet(self, s):
vars_print = ', '.join([self._print(var) for var in Tuple(s.sym)])
return r"\left\{%s\; |\; %s \in %s \wedge %s \right\}" % (
vars_print,
vars_print,
self._print(s.base_set),
self._print(s.condition.as_expr()))
def _print_ComplexRegion(self, s):
vars_print = ', '.join([self._print(var) for var in s.args[0].variables])
return r"\left\{%s\; |\; %s \in %s \right\}" % (
self._print(s.args[0].expr),
vars_print,
self._print(s.sets))
def _print_Contains(self, e):
return r"%s \in %s" % tuple(self._print(a) for a in e.args)
def _print_FourierSeries(self, s):
return self._print_Add(s.truncate()) + self._print(' + \ldots')
def _print_FormalPowerSeries(self, s):
return self._print_Add(s.truncate())
def _print_FormalPowerSeries(self, s):
return self._print_Add(s.truncate())
def _print_FiniteField(self, expr):
return r"\mathbb{F}_{%s}" % expr.mod
def _print_IntegerRing(self, expr):
return r"\mathbb{Z}"
def _print_RationalField(self, expr):
return r"\mathbb{Q}"
def _print_RealField(self, expr):
return r"\mathbb{R}"
def _print_ComplexField(self, expr):
return r"\mathbb{C}"
def _print_PolynomialRing(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left[%s\right]" % (domain, symbols)
def _print_FractionField(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left(%s\right)" % (domain, symbols)
def _print_PolynomialRingBase(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
inv = ""
if not expr.is_Poly:
inv = r"S_<^{-1}"
return r"%s%s\left[%s\right]" % (inv, domain, symbols)
def _print_Poly(self, poly):
cls = poly.__class__.__name__
expr = self._print(poly.as_expr())
gens = list(map(self._print, poly.gens))
domain = "domain=%s" % self._print(poly.get_domain())
args = ", ".join([expr] + gens + [domain])
if cls in accepted_latex_functions:
tex = r"\%s {\left (%s \right )}" % (cls, args)
else:
tex = r"\operatorname{%s}{\left( %s \right)}" % (cls, args)
return tex
def _print_RootOf(self, root):
cls = root.__class__.__name__
expr = self._print(root.expr)
index = root.index
if cls in accepted_latex_functions:
return r"\%s {\left(%s, %d\right)}" % (cls, expr, index)
else:
return r"\operatorname{%s} {\left(%s, %d\right)}" % (cls, expr, index)
def _print_RootSum(self, expr):
cls = expr.__class__.__name__
args = [self._print(expr.expr)]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
if cls in accepted_latex_functions:
return r"\%s {\left(%s\right)}" % (cls, ", ".join(args))
else:
return r"\operatorname{%s} {\left(%s\right)}" % (cls, ", ".join(args))
def _print_PolyElement(self, poly):
mul_symbol = self._settings['mul_symbol_latex']
return poly.str(self, PRECEDENCE, "{%s}^{%d}", mul_symbol)
def _print_FracElement(self, frac):
if frac.denom == 1:
return self._print(frac.numer)
else:
numer = self._print(frac.numer)
denom = self._print(frac.denom)
return r"\frac{%s}{%s}" % (numer, denom)
def _print_euler(self, expr):
return r"E_{%s}" % self._print(expr.args[0])
def _print_catalan(self, expr):
return r"C_{%s}" % self._print(expr.args[0])
def _print_MellinTransform(self, expr):
return r"\mathcal{M}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseMellinTransform(self, expr):
return r"\mathcal{M}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_LaplaceTransform(self, expr):
return r"\mathcal{L}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseLaplaceTransform(self, expr):
return r"\mathcal{L}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_FourierTransform(self, expr):
return r"\mathcal{F}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseFourierTransform(self, expr):
return r"\mathcal{F}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_SineTransform(self, expr):
return r"\mathcal{SIN}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseSineTransform(self, expr):
return r"\mathcal{SIN}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_CosineTransform(self, expr):
return r"\mathcal{COS}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseCosineTransform(self, expr):
return r"\mathcal{COS}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(Symbol(object.name))
def _print_Morphism(self, morphism):
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
return "%s\\rightarrow %s" % (domain, codomain)
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(Symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return "%s:%s" % (pretty_name, pretty_morphism)
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(NamedMorphism(
morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [self._print(Symbol(component.name)) for
component in morphism.components]
component_names_list.reverse()
component_names = "\\circ ".join(component_names_list) + ":"
pretty_morphism = self._print_Morphism(morphism)
return component_names + pretty_morphism
def _print_Category(self, morphism):
return "\\mathbf{%s}" % self._print(Symbol(morphism.name))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
latex_result = self._print(diagram.premises)
if diagram.conclusions:
latex_result += "\\Longrightarrow %s" % \
self._print(diagram.conclusions)
return latex_result
def _print_DiagramGrid(self, grid):
latex_result = "\\begin{array}{%s}\n" % ("c" * grid.width)
for i in range(grid.height):
for j in range(grid.width):
if grid[i, j]:
latex_result += latex(grid[i, j])
latex_result += " "
if j != grid.width - 1:
latex_result += "& "
if i != grid.height - 1:
latex_result += "\\\\"
latex_result += "\n"
latex_result += "\\end{array}\n"
return latex_result
def _print_FreeModule(self, M):
return '{%s}^{%s}' % (self._print(M.ring), self._print(M.rank))
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return r"\left[ %s \right]" % ",".join(
'{' + self._print(x) + '}' for x in m)
def _print_SubModule(self, m):
return r"\left< %s \right>" % ",".join(
'{' + self._print(x) + '}' for x in m.gens)
def _print_ModuleImplementedIdeal(self, m):
return r"\left< %s \right>" % ",".join(
'{' + self._print(x) + '}' for [x] in m._module.gens)
def _print_QuotientRing(self, R):
# TODO nicer fractions for few generators...
return r"\frac{%s}{%s}" % (self._print(R.ring), self._print(R.base_ideal))
def _print_QuotientRingElement(self, x):
return r"{%s} + {%s}" % (self._print(x.data), self._print(x.ring.base_ideal))
def _print_QuotientModuleElement(self, m):
return r"{%s} + {%s}" % (self._print(m.data),
self._print(m.module.killed_module))
def _print_QuotientModule(self, M):
# TODO nicer fractions for few generators...
return r"\frac{%s}{%s}" % (self._print(M.base),
self._print(M.killed_module))
def _print_MatrixHomomorphism(self, h):
return r"{%s} : {%s} \to {%s}" % (self._print(h._sympy_matrix()),
self._print(h.domain), self._print(h.codomain))
def _print_BaseScalarField(self, field):
string = field._coord_sys._names[field._index]
return r'\boldsymbol{\mathrm{%s}}' % self._print(Symbol(string))
def _print_BaseVectorField(self, field):
string = field._coord_sys._names[field._index]
return r'\partial_{%s}' % self._print(Symbol(string))
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys._names[field._index]
return r'\mathrm{d}%s' % self._print(Symbol(string))
else:
return 'd(%s)' % self._print(field)
string = self._print(field)
return r'\mathrm{d}\left(%s\right)' % string
def _print_Tr(self, p):
#Todo: Handle indices
contents = self._print(p.args[0])
return r'\mbox{Tr}\left(%s\right)' % (contents)
def _print_totient(self, expr):
return r'\phi\left( %s \right)' % self._print(expr.args[0])
def _print_divisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^{%s}%s" % (self._print(exp), tex)
return r"\sigma%s" % tex
def _print_udivisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^*^{%s}%s" % (self._print(exp), tex)
return r"\sigma^*%s" % tex
def translate(s):
r'''
Check for a modifier ending the string. If present, convert the
modifier to latex and translate the rest recursively.
Given a description of a Greek letter or other special character,
return the appropriate latex.
Let everything else pass as given.
>>> from sympy.printing.latex import translate
>>> translate('alphahatdotprime')
"{\\dot{\\hat{\\alpha}}}'"
'''
# Process the rest
tex = tex_greek_dictionary.get(s)
if tex:
return tex
elif s.lower() in greek_letters_set or s in other_symbols:
return "\\" + s
else:
# Process modifiers, if any, and recurse
for key in sorted(modifier_dict.keys(), key=lambda k:len(k), reverse=True):
if s.lower().endswith(key) and len(s)>len(key):
return modifier_dict[key](translate(s[:-len(key)]))
return s
def latex(expr, **settings):
r"""
Convert the given expression to LaTeX representation.
>>> from sympy import latex, pi, sin, asin, Integral, Matrix, Rational
>>> from sympy.abc import x, y, mu, r, tau
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
order: Any of the supported monomial orderings (currently "lex", "grlex", or
"grevlex"), "old", and "none". This parameter does nothing for Mul objects.
Setting order to "old" uses the compatibility ordering for Add defined in
Printer. For very large expressions, set the 'order' keyword to 'none' if
speed is a concern.
mode: Specifies how the generated code will be delimited. 'mode' can be one
of 'plain', 'inline', 'equation' or 'equation*'. If 'mode' is set to
'plain', then the resulting code will not be delimited at all (this is the
default). If 'mode' is set to 'inline' then inline LaTeX $ $ will be used.
If 'mode' is set to 'equation' or 'equation*', the resulting code will be
enclosed in the 'equation' or 'equation*' environment (remember to import
'amsmath' for 'equation*'), unless the 'itex' option is set. In the latter
case, the ``$$ $$`` syntax is used.
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{\frac{7}{2}}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
itex: Specifies if itex-specific syntax is used, including emitting ``$$ $$``.
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
fold_frac_powers: Emit "^{p/q}" instead of "^{\frac{p}{q}}" for fractional
powers.
>>> print(latex((2*tau)**Rational(7,2), fold_frac_powers=True))
8 \sqrt{2} \tau^{7/2}
fold_func_brackets: Fold function brackets where applicable.
>>> print(latex((2*tau)**sin(Rational(7,2))))
\left(2 \tau\right)^{\sin{\left (\frac{7}{2} \right )}}
>>> print(latex((2*tau)**sin(Rational(7,2)), fold_func_brackets = True))
\left(2 \tau\right)^{\sin {\frac{7}{2}}}
fold_short_frac: Emit "p / q" instead of "\frac{p}{q}" when the
denominator is simple enough (at most two terms and no powers).
The default value is `True` for inline mode, False otherwise.
>>> print(latex(3*x**2/y))
\frac{3 x^{2}}{y}
>>> print(latex(3*x**2/y, fold_short_frac=True))
3 x^{2} / y
long_frac_ratio: The allowed ratio of the width of the numerator to the
width of the denominator before we start breaking off long fractions.
The default value is 2.
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=2))
\frac{\int r\, dr}{2 \pi}
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=0))
\frac{1}{2 \pi} \int r\, dr
mul_symbol: The symbol to use for multiplication. Can be one of None,
"ldot", "dot", or "times".
>>> print(latex((2*tau)**sin(Rational(7,2)), mul_symbol="times"))
\left(2 \times \tau\right)^{\sin{\left (\frac{7}{2} \right )}}
inv_trig_style: How inverse trig functions should be displayed. Can be one
of "abbreviated", "full", or "power". Defaults to "abbreviated".
>>> print(latex(asin(Rational(7,2))))
\operatorname{asin}{\left (\frac{7}{2} \right )}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="full"))
\arcsin{\left (\frac{7}{2} \right )}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="power"))
\sin^{-1}{\left (\frac{7}{2} \right )}
mat_str: Which matrix environment string to emit. "smallmatrix", "matrix",
"array", etc. Defaults to "smallmatrix" for inline mode, "matrix" for
matrices of no more than 10 columns, and "array" otherwise.
>>> print(latex(Matrix(2, 1, [x, y])))
\left[\begin{matrix}x\\y\end{matrix}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_str = "array"))
\left[\begin{array}{c}x\\y\end{array}\right]
mat_delim: The delimiter to wrap around matrices. Can be one of "[", "(",
or the empty string. Defaults to "[".
>>> print(latex(Matrix(2, 1, [x, y]), mat_delim="("))
\left(\begin{matrix}x\\y\end{matrix}\right)
symbol_names: Dictionary of symbols and the custom strings they should be
emitted as.
>>> print(latex(x**2, symbol_names={x:'x_i'}))
x_i^{2}
``latex`` also supports the builtin container types list, tuple, and
dictionary.
>>> print(latex([2/x, y], mode='inline'))
$\left [ 2 / x, \quad y\right ]$
"""
return LatexPrinter(settings).doprint(expr)
def print_latex(expr, **settings):
"""Prints LaTeX representation of the given expression."""
print(latex(expr, **settings))
| bsd-3-clause |
eblossom/gnuradio | gr-analog/python/analog/qa_ctcss_squelch.py | 47 | 3066 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks
class test_ctcss_squelch(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_ctcss_squelch_001(self):
# Test set/gets
rate = 1
rate2 = 2
freq = 100
level = 0.5
length = 1
ramp = 1
ramp2 = 2
gate = True
gate2 = False
op = analog.ctcss_squelch_ff(rate, freq, level,
length, ramp, gate)
op.set_ramp(ramp2)
r = op.ramp()
self.assertEqual(ramp2, r)
op.set_gate(gate2)
g = op.gate()
self.assertEqual(gate2, g)
def test_ctcss_squelch_002(self):
# Test runtime, gate=True
rate = 1
freq = 100
level = 0.0
length = 1
ramp = 1
gate = True
src_data = map(lambda x: float(x)/10.0, range(1, 40))
expected_result = src_data
expected_result[0] = 0
src = blocks.vector_source_f(src_data)
op = analog.ctcss_squelch_ff(rate, freq, level,
length, ramp, gate)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 4)
def test_ctcss_squelch_003(self):
# Test runtime, gate=False
rate = 1
freq = 100
level = 0.5
length = 1
ramp = 1
gate = False
src_data = map(lambda x: float(x)/10.0, range(1, 40))
src = blocks.vector_source_f(src_data)
op = analog.ctcss_squelch_ff(rate, freq, level,
length, ramp, gate)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
expected_result = src_data
expected_result[0:5] = [0, 0, 0, 0, 0]
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_ctcss_squelch, "test_ctcss_squelch.xml")
| gpl-3.0 |
miles0411/pm | venv/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.py | 488 | 6221 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import os
import sys
import errno
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
dirname = os.path.dirname(self.lock_file)
basename = os.path.split(self.path)[-1]
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
line = "%(pid)d\n" % vars()
pidfile.write(line)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| apache-2.0 |
roadmapper/ansible | test/units/modules/network/onyx/test_onyx_snmp.py | 9 | 6192 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_snmp
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxSNMPModule(TestOnyxModule):
module = onyx_snmp
enabled = False
def setUp(self):
self.enabled = False
super(TestOnyxSNMPModule, self).setUp()
self.mock_get_config = patch.object(
onyx_snmp.OnyxSNMPModule, "_show_snmp_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxSNMPModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_snmp_show.cfg'
data = load_fixture(config_file)
self.get_config.return_value = data
self.load_config.return_value = None
def test_snmp_state_no_change(self):
set_module_args(dict(state_enabled=True))
self.execute_module(changed=False)
def test_snmp_state_with_change(self):
set_module_args(dict(state_enabled=False))
commands = ['no snmp-server enable']
self.execute_module(changed=True, commands=commands)
def test_snmp_contact_no_change(self):
set_module_args(dict(contact_name='sara'))
self.execute_module(changed=False)
def test_snmp_contact_with_change(self):
set_module_args(dict(contact_name='Omar'))
commands = ['snmp-server contact Omar']
self.execute_module(changed=True, commands=commands)
def test_snmp_location_no_change(self):
set_module_args(dict(location='Jordan'))
self.execute_module(changed=False)
def test_snmp_location_with_change(self):
set_module_args(dict(location='London'))
commands = ['snmp-server location London']
self.execute_module(changed=True, commands=commands)
def test_snmp_communities_state_no_change(self):
set_module_args(dict(communities_enabled=True))
self.execute_module(changed=False)
def test_snmp_communities_state_with_change(self):
set_module_args(dict(communities_enabled=False))
commands = ['no snmp-server enable communities']
self.execute_module(changed=True, commands=commands)
def test_snmp_multi_communities_state_with_no_change(self):
set_module_args(dict(multi_communities_enabled=True))
self.execute_module(changed=False)
def test_snmp_multi_communities_state_with_change(self):
set_module_args(dict(multi_communities_enabled=False))
commands = ['no snmp-server enable mult-communities']
self.execute_module(changed=True, commands=commands)
def test_snmp_communities_no_change(self):
set_module_args(dict(snmp_communities=[dict(community_name='community_2',
community_type='read-write')]))
self.execute_module(changed=False)
def test_snmp_communities_with_change(self):
set_module_args(dict(snmp_communities=[dict(community_name='community_2',
community_type='read-only')]))
commands = ['snmp-server community community_2 ro']
self.execute_module(changed=True, commands=commands)
def test_snmp_communities_delete_with_change(self):
set_module_args(dict(snmp_communities=[dict(community_name='community_1',
state='absent')]))
commands = ['no snmp-server community community_1']
self.execute_module(changed=True, commands=commands)
def test_snmp_notify_state_no_change(self):
set_module_args(dict(notify_enabled=True))
self.execute_module(changed=False)
def test_snmp_notify_state_with_change(self):
set_module_args(dict(notify_enabled=False))
commands = ['no snmp-server enable notify']
self.execute_module(changed=True, commands=commands)
def test_snmp_notify_port_no_change(self):
set_module_args(dict(notify_port='1'))
self.execute_module(changed=False)
def test_snmp_notify_port_with_change(self):
set_module_args(dict(notify_port='2'))
commands = ['snmp-server notify port 2']
self.execute_module(changed=True, commands=commands)
def test_snmp_notify_community_no_change(self):
set_module_args(dict(notify_community='community_1'))
self.execute_module(changed=False)
def test_snmp_notify_community_with_change(self):
set_module_args(dict(notify_community='community_2'))
commands = ['snmp-server notify community community_2']
self.execute_module(changed=True, commands=commands)
def test_snmp_notify_send_test_with_change(self):
set_module_args(dict(notify_send_test='yes'))
commands = ['snmp-server notify send-test']
self.execute_module(changed=True, commands=commands)
def test_snmp_notify_event_with_change(self):
set_module_args(dict(notify_event='interface-up'))
commands = ['snmp-server notify event interface-up']
self.execute_module(changed=True, commands=commands)
def test_snmp_permissions_with_change(self):
set_module_args(dict(snmp_permissions=[dict(state_enabled=True,
permission_type='RFC1213-MIB')]))
commands = ['snmp-server enable set-permission RFC1213-MIB']
self.execute_module(changed=True, commands=commands)
def test_snmp_engine_id_reset_with_change(self):
set_module_args(dict(engine_id_reset='yes'))
commands = ['snmp-server engineID reset']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dnslib/ranges.py | 19 | 4067 | # -*- coding: utf-8 -*-
"""
Wrapper around property builtin to restrict attribute to defined
integer value range (throws ValueError).
Intended to ensure that values packed with struct are in the
correct range
>>> class T(object):
... a = range_property('a',-100,100)
... b = B('b')
... c = H('c')
... d = I('d')
... e = instance_property('e',(int,bool))
>>> t = T()
>>> for i in [0,100,-100]:
... t.a = i
... assert t.a == i
>>> t.a = 101
Traceback (most recent call last):
...
ValueError: Attribute 'a' must be between -100-100 [101]
>>> t.a = -101
Traceback (most recent call last):
...
ValueError: Attribute 'a' must be between -100-100 [-101]
>>> t.a = 'blah'
Traceback (most recent call last):
...
ValueError: Attribute 'a' must be between -100-100 [blah]
>>> t.e = 999
>>> t.e = False
>>> t.e = None
Traceback (most recent call last):
...
ValueError: Attribute 'e' must be instance of ...
>>> check_range("test",123,0,255)
>>> check_range("test",999,0,255)
Traceback (most recent call last):
...
ValueError: Attribute 'test' must be between 0-255 [999]
>>> check_instance("test",123,int)
>>> check_instance("test","xxx",int)
Traceback (most recent call last):
...
ValueError: Attribute 'test' must be instance of ...
"""
import sys
if sys.version < '3':
int_types = (int, long,)
byte_types = (str,bytearray)
else:
int_types = (int,)
byte_types = (bytes,bytearray)
def check_instance(name,val,types):
if not isinstance(val,types):
raise ValueError("Attribute '%s' must be instance of %s [%s]" %
(name,types,type(val)))
def check_bytes(name,val):
return check_instance(name,val,byte_types)
def instance_property(attr,types):
def getter(obj):
return getattr(obj,"_%s" % attr)
def setter(obj,val):
if isinstance(val,types):
setattr(obj,"_%s" % attr,val)
else:
raise ValueError("Attribute '%s' must be instance of %s [%s]" %
(attr,types,type(val)))
return property(getter,setter)
def BYTES(attr):
return instance_property(attr,byte_types)
def check_range(name,val,min,max):
if not (isinstance(val,int_types) and min <= val <= max):
raise ValueError("Attribute '%s' must be between %d-%d [%s]" %
(name,min,max,val))
def range_property(attr,min,max):
def getter(obj):
return getattr(obj,"_%s" % attr)
def setter(obj,val):
if isinstance(val,int_types) and min <= val <= max:
setattr(obj,"_%s" % attr,val)
else:
raise ValueError("Attribute '%s' must be between %d-%d [%s]" %
(attr,min,max,val))
return property(getter,setter)
def B(attr):
"""
Unsigned Byte
"""
return range_property(attr,0,255)
def H(attr):
"""
Unsigned Short
"""
return range_property(attr,0,65535)
def I(attr):
"""
Unsigned Long
"""
return range_property(attr,0,4294967295)
def ntuple_range(attr,n,min,max):
f = lambda x : isinstance(x,int_types) and min <= x <= max
def getter(obj):
return getattr(obj,"_%s" % attr)
def setter(obj,val):
if len(val) != n:
raise ValueError("Attribute '%s' must be tuple with %d elements [%s]" %
(attr,n,val))
if all(map(f,val)):
setattr(obj,"_%s" % attr,val)
else:
raise ValueError("Attribute '%s' elements must be between %d-%d [%s]" %
(attr,min,max,val))
return property(getter,setter)
def IP4(attr):
return ntuple_range(attr,4,0,255)
def IP6(attr):
return ntuple_range(attr,16,0,255)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
| gpl-3.0 |
msebire/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_intern.py | 315 | 1405 | # Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name(u"sys"), Name(u"intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, u'sys', node)
return new
| apache-2.0 |
SiLab-Bonn/basil | basil/utils/sim/BasilBusDriver.py | 1 | 4344 | #
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
# Initial version by Chris Higgs <chris.higgs@potentialventures.com>
#
# pylint: disable=pointless-statement, expression-not-assigned
from cocotb.binary import BinaryValue
from cocotb.triggers import RisingEdge, Timer
from cocotb_bus.drivers import BusDriver
class BasilBusDriver(BusDriver):
"""Abastract away interactions with the control bus."""
_signals = ["BUS_CLK", "BUS_RST", "BUS_DATA", "BUS_ADD", "BUS_RD", "BUS_WR"]
_optional_signals = ["BUS_BYTE_ACCESS"]
def __init__(self, entity):
BusDriver.__init__(self, entity, "", entity.BUS_CLK)
# Create an appropriately sized high-impedence value
self._high_impedence = BinaryValue(n_bits=len(self.bus.BUS_DATA))
self._high_impedence.binstr = "Z" * len(self.bus.BUS_DATA)
# Create an appropriately sized high-impedence value
self._x = BinaryValue(n_bits=len(self.bus.BUS_ADD))
self._x.binstr = "x" * len(self.bus.BUS_ADD)
self._has_byte_acces = False
async def init(self):
# Defaults
self.bus.BUS_RST <= 1
self.bus.BUS_RD <= 0
self.bus.BUS_WR <= 0
self.bus.BUS_ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
for _ in range(8):
await RisingEdge(self.clock)
self.bus.BUS_RST <= 0
for _ in range(2):
await RisingEdge(self.clock)
# why this does not work? hasattr(self.bus, 'BUS_BYTE_ACCESS'):
try:
getattr(self.bus, "BUS_BYTE_ACCESS")
except Exception:
self._has_byte_acces = False
else:
self._has_byte_acces = True
async def read(self, address, size):
result = []
self.bus.BUS_DATA <= self._high_impedence
self.bus.BUS_ADD <= self._x
self.bus.BUS_RD <= 0
await RisingEdge(self.clock)
byte = 0
while byte <= size:
if byte == size:
self.bus.BUS_RD <= 0
else:
self.bus.BUS_RD <= 1
self.bus.BUS_ADD <= address + byte
await RisingEdge(self.clock)
if byte != 0:
if self._has_byte_acces and self.bus.BUS_BYTE_ACCESS.value.integer == 0:
result.append(self.bus.BUS_DATA.value.integer & 0x000000FF)
result.append((self.bus.BUS_DATA.value.integer & 0x0000FF00) >> 8)
result.append((self.bus.BUS_DATA.value.integer & 0x00FF0000) >> 16)
result.append((self.bus.BUS_DATA.value.integer & 0xFF000000) >> 24)
else:
# result.append(self.bus.BUS_DATA.value[24:31].integer & 0xff)
if len(self.bus.BUS_DATA.value) == 8:
result.append(self.bus.BUS_DATA.value.integer & 0xFF)
else:
result.append(self.bus.BUS_DATA.value[24:31].integer & 0xFF)
if self._has_byte_acces and self.bus.BUS_BYTE_ACCESS.value.integer == 0:
byte += 4
else:
byte += 1
self.bus.BUS_ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
await RisingEdge(self.clock)
return result
async def write(self, address, data):
self.bus.BUS_ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
self.bus.BUS_WR <= 0
await RisingEdge(self.clock)
for index, byte in enumerate(data):
self.bus.BUS_DATA <= byte
self.bus.BUS_WR <= 1
self.bus.BUS_ADD <= address + index
await Timer(1) # This is hack for iverilog
self.bus.BUS_DATA <= byte
self.bus.BUS_WR <= 1
self.bus.BUS_ADD <= address + index
await RisingEdge(self.clock)
if self._has_byte_acces and self.bus.BUS_BYTE_ACCESS.value.integer == 0:
raise NotImplementedError("BUS_BYTE_ACCESS for write to be implemented.")
self.bus.BUS_DATA <= self._high_impedence
self.bus.BUS_ADD <= self._x
self.bus.BUS_WR <= 0
await RisingEdge(self.clock)
| bsd-3-clause |
Alzon/senlin | senlin/db/sqlalchemy/models.py | 1 | 14127 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for Senlin data.
"""
import uuid
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
import six
import sqlalchemy
from sqlalchemy.ext import declarative
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm import session as orm_session
from senlin.db.sqlalchemy import types
BASE = declarative.declarative_base()
def get_session():
from senlin.db.sqlalchemy import api as db_api
return db_api.get_session()
class SenlinBase(models.ModelBase):
"""Base class for Senlin Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
def expire(self, session=None, attrs=None):
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.expire(self, attrs)
def refresh(self, session=None, attrs=None):
"""Refresh this object."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.refresh(self, attrs)
def delete(self, session=None):
"""Delete this object."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.begin()
session.delete(self)
session.commit()
def update_and_save(self, values, session=None):
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.begin()
for k, v in six.iteritems(values):
setattr(self, k, v)
session.commit()
class SoftDelete(object):
def soft_delete(self, session=None):
# Mark an object as deleted
self.update_and_save({'deleted_time': timeutils.utcnow()},
session=session)
class Cluster(BASE, SenlinBase, SoftDelete):
"""Represents a cluster created by the Senlin engine."""
__tablename__ = 'cluster'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column('name', sqlalchemy.String(255))
profile_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('profile.id'),
nullable=False)
user = sqlalchemy.Column(sqlalchemy.String(32), nullable=False)
project = sqlalchemy.Column(sqlalchemy.String(32), nullable=False)
domain = sqlalchemy.Column(sqlalchemy.String(32))
parent = sqlalchemy.Column(sqlalchemy.String(36))
init_time = sqlalchemy.Column(sqlalchemy.DateTime)
created_time = sqlalchemy.Column(sqlalchemy.DateTime)
updated_time = sqlalchemy.Column(sqlalchemy.DateTime)
deleted_time = sqlalchemy.Column(sqlalchemy.DateTime)
min_size = sqlalchemy.Column(sqlalchemy.Integer)
max_size = sqlalchemy.Column(sqlalchemy.Integer)
desired_capacity = sqlalchemy.Column(sqlalchemy.Integer)
next_index = sqlalchemy.Column(sqlalchemy.Integer)
timeout = sqlalchemy.Column(sqlalchemy.Integer)
status = sqlalchemy.Column(sqlalchemy.String(255))
status_reason = sqlalchemy.Column(sqlalchemy.Text)
meta_data = sqlalchemy.Column(types.Dict)
data = sqlalchemy.Column(types.Dict)
class Node(BASE, SenlinBase, SoftDelete):
"""Represents a Node created by the Senlin engine."""
__tablename__ = 'node'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column(sqlalchemy.String(255))
physical_id = sqlalchemy.Column(sqlalchemy.String(36))
cluster_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('cluster.id'))
profile_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('profile.id'))
user = sqlalchemy.Column(sqlalchemy.String(32), nullable=False)
project = sqlalchemy.Column(sqlalchemy.String(32), nullable=False)
domain = sqlalchemy.Column(sqlalchemy.String(32))
index = sqlalchemy.Column(sqlalchemy.Integer)
role = sqlalchemy.Column(sqlalchemy.String(64))
init_time = sqlalchemy.Column(sqlalchemy.DateTime)
created_time = sqlalchemy.Column(sqlalchemy.DateTime)
updated_time = sqlalchemy.Column(sqlalchemy.DateTime)
deleted_time = sqlalchemy.Column(sqlalchemy.DateTime)
status = sqlalchemy.Column(sqlalchemy.String(255))
status_reason = sqlalchemy.Column(sqlalchemy.Text)
meta_data = sqlalchemy.Column(types.Dict)
data = sqlalchemy.Column(types.Dict)
class ClusterLock(BASE, SenlinBase):
"""Store cluster locks for actions performed by multiple workers.
Worker threads are able to grab this lock
"""
__tablename__ = 'cluster_lock'
cluster_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('cluster.id'),
primary_key=True, nullable=False)
action_ids = sqlalchemy.Column(types.List)
semaphore = sqlalchemy.Column(sqlalchemy.Integer)
class NodeLock(BASE, SenlinBase):
"""Store node locks for actions performed by multiple workers.
Worker threads are able to grab this lock
"""
__tablename__ = 'node_lock'
node_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('node.id'),
primary_key=True, nullable=False)
action_id = sqlalchemy.Column(sqlalchemy.String(36))
class Policy(BASE, SenlinBase, SoftDelete):
'''A policy managed by the Senlin engine.'''
__tablename__ = 'policy'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
user = sqlalchemy.Column(sqlalchemy.String(32), nullable=False)
project = sqlalchemy.Column(sqlalchemy.String(32), nullable=False)
domain = sqlalchemy.Column(sqlalchemy.String(32))
name = sqlalchemy.Column(sqlalchemy.String(255))
type = sqlalchemy.Column(sqlalchemy.String(255))
cooldown = sqlalchemy.Column(sqlalchemy.Integer)
level = sqlalchemy.Column(sqlalchemy.Integer)
created_time = sqlalchemy.Column(sqlalchemy.DateTime)
updated_time = sqlalchemy.Column(sqlalchemy.DateTime)
deleted_time = sqlalchemy.Column(sqlalchemy.DateTime)
spec = sqlalchemy.Column(types.Dict)
data = sqlalchemy.Column(types.Dict)
class ClusterPolicies(BASE, SenlinBase):
'''Association between clusters and policies.'''
__tablename__ = 'cluster_policy'
id = sqlalchemy.Column('id', sqlalchemy.String(36),
primary_key=True,
default=lambda: str(uuid.uuid4()))
cluster_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('cluster.id'),
nullable=False)
policy_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('policy.id'),
nullable=False)
cluster = relationship(Cluster, backref=backref('policies'))
policy = relationship(Policy, backref=backref('bindings'))
cooldown = sqlalchemy.Column(sqlalchemy.Integer)
priority = sqlalchemy.Column(sqlalchemy.Integer)
level = sqlalchemy.Column(sqlalchemy.Integer)
enabled = sqlalchemy.Column(sqlalchemy.Boolean)
data = sqlalchemy.Column(types.Dict)
last_op = sqlalchemy.Column(sqlalchemy.DateTime)
class Profile(BASE, SenlinBase, SoftDelete):
'''A profile managed by the Senlin engine.'''
__tablename__ = 'profile'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column(sqlalchemy.String(255))
type = sqlalchemy.Column(sqlalchemy.String(255))
context = sqlalchemy.Column(types.Dict)
spec = sqlalchemy.Column(types.Dict)
permission = sqlalchemy.Column(sqlalchemy.String(32))
meta_data = sqlalchemy.Column(types.Dict)
created_time = sqlalchemy.Column(sqlalchemy.DateTime)
updated_time = sqlalchemy.Column(sqlalchemy.DateTime)
deleted_time = sqlalchemy.Column(sqlalchemy.DateTime)
class Trigger(BASE, SenlinBase, SoftDelete):
'''A profile managed by the Senlin engine.'''
# The table cannot be named as 'trigger' because 'trigger' may be
# a reserved word in mysql
__tablename__ = 'triggers'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
physical_id = sqlalchemy.Column(sqlalchemy.String(36))
name = sqlalchemy.Column(sqlalchemy.String(255))
type = sqlalchemy.Column(sqlalchemy.String(255))
desc = sqlalchemy.Column(sqlalchemy.String(255))
state = sqlalchemy.Column(sqlalchemy.String(32))
enabled = sqlalchemy.Column(sqlalchemy.Boolean)
severity = sqlalchemy.Column(sqlalchemy.String(32))
links = sqlalchemy.Column(types.Dict)
spec = sqlalchemy.Column(types.Dict)
user = sqlalchemy.Column(sqlalchemy.String(32), nullable=False)
project = sqlalchemy.Column(sqlalchemy.String(32), nullable=False)
domain = sqlalchemy.Column(sqlalchemy.String(32))
created_time = sqlalchemy.Column(sqlalchemy.DateTime)
updated_time = sqlalchemy.Column(sqlalchemy.DateTime)
deleted_time = sqlalchemy.Column(sqlalchemy.DateTime)
class Webhook(BASE, SenlinBase, SoftDelete):
"""Represents a webhook bonded with Senlin resource entity."""
__tablename__ = 'webhook'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column('name', sqlalchemy.String(255))
user = sqlalchemy.Column(sqlalchemy.String(32))
domain = sqlalchemy.Column(sqlalchemy.String(32))
project = sqlalchemy.Column(sqlalchemy.String(32))
created_time = sqlalchemy.Column(sqlalchemy.DateTime)
deleted_time = sqlalchemy.Column(sqlalchemy.DateTime)
obj_id = sqlalchemy.Column(sqlalchemy.String(36))
obj_type = sqlalchemy.Column(sqlalchemy.String(36))
action = sqlalchemy.Column(sqlalchemy.String(36))
credential = sqlalchemy.Column(sqlalchemy.Text)
params = sqlalchemy.Column(types.Dict)
class Credential(BASE, SenlinBase):
'''A table for storing user credentials.'''
__tablename__ = 'credential'
user = sqlalchemy.Column(sqlalchemy.String(32), primary_key=True,
nullable=False)
project = sqlalchemy.Column(sqlalchemy.String(32), primary_key=True,
nullable=False)
cred = sqlalchemy.Column(types.Dict, nullable=False)
data = sqlalchemy.Column(types.Dict)
class Action(BASE, SenlinBase, SoftDelete):
'''An action persisted in the Senlin database.'''
__tablename__ = 'action'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column(sqlalchemy.String(63))
context = sqlalchemy.Column(types.Dict)
target = sqlalchemy.Column(sqlalchemy.String(36))
action = sqlalchemy.Column(sqlalchemy.Text)
cause = sqlalchemy.Column(sqlalchemy.String(255))
owner = sqlalchemy.Column(sqlalchemy.String(36))
interval = sqlalchemy.Column(sqlalchemy.Integer)
start_time = sqlalchemy.Column(sqlalchemy.Float)
end_time = sqlalchemy.Column(sqlalchemy.Float)
timeout = sqlalchemy.Column(sqlalchemy.Integer)
status = sqlalchemy.Column(sqlalchemy.String(255))
status_reason = sqlalchemy.Column(sqlalchemy.Text)
control = sqlalchemy.Column(sqlalchemy.String(255))
inputs = sqlalchemy.Column(types.Dict)
outputs = sqlalchemy.Column(types.Dict)
depends_on = sqlalchemy.Column(types.List)
depended_by = sqlalchemy.Column(types.List)
created_time = sqlalchemy.Column(sqlalchemy.DateTime)
updated_time = sqlalchemy.Column(sqlalchemy.DateTime)
deleted_time = sqlalchemy.Column(sqlalchemy.DateTime)
data = sqlalchemy.Column(types.Dict)
class Event(BASE, SenlinBase, SoftDelete):
"""Represents an event generated by the Senin engine."""
__tablename__ = 'event'
id = sqlalchemy.Column('id', sqlalchemy.String(36),
primary_key=True,
default=lambda: str(uuid.uuid4()))
timestamp = sqlalchemy.Column(sqlalchemy.DateTime)
deleted_time = sqlalchemy.Column(sqlalchemy.DateTime)
obj_id = sqlalchemy.Column(sqlalchemy.String(36))
obj_name = sqlalchemy.Column(sqlalchemy.String(255))
obj_type = sqlalchemy.Column(sqlalchemy.String(36))
cluster_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('cluster.id'),
nullable=True)
cluster = relationship(Cluster, backref=backref('events'))
level = sqlalchemy.Column(sqlalchemy.String(64))
user = sqlalchemy.Column(sqlalchemy.String(32))
project = sqlalchemy.Column(sqlalchemy.String(32))
action = sqlalchemy.Column(sqlalchemy.String(36))
status = sqlalchemy.Column(sqlalchemy.String(255))
status_reason = sqlalchemy.Column(sqlalchemy.Text)
meta_data = sqlalchemy.Column(types.Dict)
| apache-2.0 |
muckrack/python-goose | tests/extractors/base.py | 13 | 6777 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import json
import urllib2
import unittest
import socket
from StringIO import StringIO
from goose import Goose
from goose.utils import FileHelper
from goose.configuration import Configuration
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
# Response
class MockResponse():
"""\
Base mock response class
"""
code = 200
msg = "OK"
def __init__(self, cls):
self.cls = cls
def content(self):
return "response"
def response(self, req):
data = self.content(req)
url = req.get_full_url()
resp = urllib2.addinfourl(StringIO(data), data, url)
resp.code = self.code
resp.msg = self.msg
return resp
class MockHTTPHandler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
"""\
Mocked HTTPHandler in order to query APIs locally
"""
cls = None
def https_open(self, req):
return self.http_open(req)
def http_open(self, req):
r = self.cls.callback(self.cls)
return r.response(req)
@staticmethod
def patch(cls):
opener = urllib2.build_opener(MockHTTPHandler)
urllib2.install_opener(opener)
# dirty !
for h in opener.handlers:
if isinstance(h, MockHTTPHandler):
h.cls = cls
return [h for h in opener.handlers if isinstance(h, MockHTTPHandler)][0]
@staticmethod
def unpatch():
# urllib2
urllib2._opener = None
class BaseMockTests(unittest.TestCase):
"""\
Base Mock test case
"""
callback = MockResponse
def setUp(self):
# patch DNS
self.original_getaddrinfo = socket.getaddrinfo
socket.getaddrinfo = self.new_getaddrinfo
MockHTTPHandler.patch(self)
def tearDown(self):
MockHTTPHandler.unpatch()
# DNS
socket.getaddrinfo = self.original_getaddrinfo
def new_getaddrinfo(self, *args):
return [(2, 1, 6, '', ('127.0.0.1', 0))]
def _get_current_testname(self):
return self.id().split('.')[-1:][0]
class MockResponseExtractors(MockResponse):
def content(self, req):
test, suite, module, cls, func = self.cls.id().split('.')
path = os.path.join(
os.path.dirname(CURRENT_PATH),
"data",
suite,
module,
"%s.html" % func)
path = os.path.abspath(path)
content = FileHelper.loadResourceFile(path)
return content
class TestExtractionBase(BaseMockTests):
"""\
Extraction test case
"""
callback = MockResponseExtractors
def getRawHtml(self):
test, suite, module, cls, func = self.id().split('.')
path = os.path.join(
os.path.dirname(CURRENT_PATH),
"data",
suite,
module,
"%s.html" % func)
path = os.path.abspath(path)
content = FileHelper.loadResourceFile(path)
return content
def loadData(self):
"""\
"""
test, suite, module, cls, func = self.id().split('.')
path = os.path.join(
os.path.dirname(CURRENT_PATH),
"data",
suite,
module,
"%s.json" % func)
path = os.path.abspath(path)
content = FileHelper.loadResourceFile(path)
self.data = json.loads(content)
def assert_cleaned_text(self, field, expected_value, result_value):
"""\
"""
# # TODO : handle verbose level in tests
# print "\n=======================::. ARTICLE REPORT %s .::======================\n" % self.id()
# print 'expected_value (%s) \n' % len(expected_value)
# print expected_value
# print "-------"
# print 'result_value (%s) \n' % len(result_value)
# print result_value
# cleaned_text is Null
msg = u"Resulting article text was NULL!"
self.assertNotEqual(result_value, None, msg=msg)
# cleaned_text length
msg = u"Article text was not as long as expected beginning!"
self.assertTrue(len(expected_value) <= len(result_value), msg=msg)
# clean_text value
result_value = result_value[0:len(expected_value)]
msg = u"The beginning of the article text was not as expected!"
self.assertEqual(expected_value, result_value, msg=msg)
def runArticleAssertions(self, article, fields):
"""\
"""
for field in fields:
expected_value = self.data['expected'][field]
result_value = getattr(article, field, None)
# custom assertion for a given field
assertion = 'assert_%s' % field
if hasattr(self, assertion):
getattr(self, assertion)(field, expected_value, result_value)
continue
# default assertion
msg = u"Error %s \nexpected: %s\nresult: %s" % (field, expected_value, result_value)
self.assertEqual(expected_value, result_value, msg=msg)
def extract(self, instance):
article = instance.extract(url=self.data['url'])
return article
def getConfig(self):
config = Configuration()
config.enable_image_fetching = False
return config
def getArticle(self):
"""\
"""
# load test case data
self.loadData()
# basic configuration
# no image fetching
config = self.getConfig()
self.parser = config.get_parser()
# target language
# needed for non english language most of the time
target_language = self.data.get('target_language')
if target_language:
config.target_language = target_language
config.use_meta_language = False
# run goose
g = Goose(config=config)
return self.extract(g)
| apache-2.0 |
jmesteve/saas3 | openerp/addons/website/models/test_models.py | 56 | 1285 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
class test_converter(orm.Model):
_name = 'website.converter.test'
_columns = {
'char': fields.char(),
'integer': fields.integer(),
'float': fields.float(),
'numeric': fields.float(digits=(16, 2)),
'many2one': fields.many2one('website.converter.test.sub'),
'binary': fields.binary(),
'date': fields.date(),
'datetime': fields.datetime(),
'selection': fields.selection([
(1, "réponse A"),
(2, "réponse B"),
(3, "réponse C"),
(4, "réponse D"),
]),
'selection_str': fields.selection([
('A', "Qu'il n'est pas arrivé à Toronto"),
('B', "Qu'il était supposé arriver à Toronto"),
('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
('D', "La réponse D"),
], string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et "
u"qu'il fait une escale technique à St Claude, on dit:"),
'html': fields.html(),
'text': fields.text(),
}
class test_converter_sub(orm.Model):
_name = 'website.converter.test.sub'
_columns = {
'name': fields.char(),
}
| agpl-3.0 |
ktok07b6/polyphony | tests/module/nesting03.py | 1 | 1185 | from polyphony import module
from polyphony import testbench
from polyphony import is_worker_running
from polyphony.io import Port
from polyphony.typing import int8
from polyphony.timing import clksleep, wait_value
@module
class Submodule:
def __init__(self, param):
self.i = Port(int8, 'in')
self.o = Port(int8, 'out')
self.param = param
self.append_worker(self.sub_worker)
def sub_worker(self):
while is_worker_running():
v = self.i.rd() * self.param
self.o.wr(v)
@module
class Nesting03:
def __init__(self):
self.sub1 = Submodule(2)
self.sub2 = Submodule(3)
self.append_worker(self.worker)
self.start = Port(bool, 'in', init=False)
self.result = Port(bool, 'out', init=False, protocol='valid')
def worker(self):
wait_value(True, self.start)
self.sub1.i.wr(10)
self.sub2.i.wr(20)
clksleep(10)
result1 = self.sub1.o.rd() == 20
result2 = self.sub2.o.rd() == 60
self.result.wr(result1 and result2)
@testbench
def test(m):
m.start.wr(True)
assert True == m.result.rd()
m = Nesting03()
test(m)
| mit |
dreal/dreal-next | benchmarks/drh/network/airplane/gen.py | 55 | 1248 |
flow_var = {}
flow_dec = {}
state_dec = {}
state_val = {}
cont_cond = {}
jump_cond = {}
def getHdr(n):
res = []
for i in range(n):
getHdr.counter += 1
res.append(getHdr.counter)
return res
getHdr.counter = 0
######################
# Formula generation #
######################
def print_loop(bound, steps, keys, holder):
c = 0
while True:
for j in range(steps):
hd = getHdr(holder)
for i in keys:
print(cont_cond[i][j].format(c,*hd).strip())
if c >= bound:
return
for i in keys:
print(jump_cond[i][j].format(c,c+1).strip())
c += 1
def generate(bound, steps, keys, holder, init, goal):
print("(set-logic QF_NRA_ODE)")
for i in keys:
print(flow_var[i].strip())
for i in keys:
print(flow_dec[i].strip())
for b in range(bound + 1):
for i in keys:
print(state_dec[i].format(b).strip())
for b in range(bound + 1):
for i in keys:
print(state_val[i].format(b).strip())
print(init.format(0).strip())
print_loop(bound, steps, keys, holder)
print(goal.format(bound).strip())
print("(check-sat)\n(exit)")
| gpl-3.0 |
NCAR/icar | helpers/erai/config.py | 2 | 4707 | import datetime,os
import argparse
import numpy as np
from bunch import Bunch
import mygis
import io_routines as io
version="1.1"
def set_bounds(info):
atm_file=info.atmdir+info.atmfile
erai_file=atm_file.replace("_Y_","2000").replace("_M_","01").replace("_D_","01").replace("_h_","00")
varlist=["g4_lat_0","g4_lon_1"]
output_dir=info.nc_file_dir
try:
os.mkdir(output_dir)
except:
pass
ncfile=io.grib2nc(erai_file,varlist,output_dir)
lat=mygis.read_nc(ncfile,varlist[0]).data
lon=mygis.read_nc(ncfile,varlist[1]).data-360
# print(lon, info.lon[0])
lon[lon<-180]+=360
info.xmin = max(0,np.argmin(np.abs(lon-info.lon[0]))-1)
info.xmax = min(lon.size-1,np.argmin(np.abs(lon-info.lon[1]))+1)
if (info.xmax < info.xmin):
print("ERROR: attempting to wrap around the ERAi boundary lon="+str(lon[0])+str(lon[-1]))
print(" Requested East lon = "+str(info.lon[0]))
print(" Requested West lon = "+str(info.lon[1]))
print("Requires Custom Solution!")
raise IndexError
# info.xmin = max(0,np.where(lon >= info.lon[0])[0][0]-1)
# info.xmax = min(lon.size-1, np.where(lon[info.xmin:] >= info.lon[1])[0][0] + info.xmin + 1)
#note lat is inverted from "expected"
info.ymin=np.where(lat<=info.lat[1])[0][0]
info.ymax=np.where(lat>=info.lat[0])[0][-1]+1
lon,lat=np.meshgrid(lon[info.xmin:info.xmax],lat[info.ymin:info.ymax][::-1])
info.lat_data=lat
info.lon_data=lon
def make_timelist(info):
hrs=6.0
dt=datetime.timedelta(hrs/24)
info.ntimes=np.int(np.round((info.end_date-info.start_date).total_seconds()/60./60./hrs))
info.times=[info.start_date+dt*i for i in range(info.ntimes)]
def update_info(info):
make_timelist(info)
set_bounds(info)
def parse():
parser= argparse.ArgumentParser(description='Convert ERAi files to ICAR input forcing files')
parser.add_argument('start_date',nargs="?", action='store', help="Specify starting date (yyyy-mm-dd)", default="2000-10-01")
parser.add_argument('end_date', nargs="?", action='store', help="Specify end date (yyyy-mm-dd)", default="2000-10-02")
parser.add_argument('lat_n', nargs="?", action='store', help="northern latitude boundary", default="60")
parser.add_argument('lat_s', nargs="?", action='store', help="southern latitude boundary", default="20")
parser.add_argument('lon_e', nargs="?", action='store', help="eastern longitude boundary", default="-50")
parser.add_argument('lon_w', nargs="?", action='store', help="western longitude boundary", default="-140")
parser.add_argument('dir', nargs="?", action='store', help="ERAi file location", default="/glade/collections/rda/data/ds627.0/")
parser.add_argument('atmdir', nargs="?", action='store', help="ERAi atmospheric data file location",default="ei.oper.an.ml/_Y__M_/")
parser.add_argument('sfcdir', nargs="?", action='store', help="ERAi surface data file location", default="ei.oper.fc.sfc/_Y__M_/")
parser.add_argument('atmfile', nargs="?", action='store', help="ERAi primary atmospheric file", default="ei.oper.an.ml.regn128sc._Y__M__D__h_")
parser.add_argument('atmuvfile',nargs="?", action='store', help="ERAi U/V atm file", default="ei.oper.an.ml.regn128uv._Y__M__D__h_")
parser.add_argument('sfcfile', nargs="?", action='store', help="ERAi surface file", default="ei.oper.fc.sfc.regn128sc._Y__M__D__h_")
parser.add_argument('temp_nc_dir',nargs="?", action='store', help="temporary directory to store netCDF files in",default="temp_nc_dir")
parser.add_argument('-v', '--version',action='version',
version='ERAi2ICAR v'+version)
parser.add_argument ('--verbose', action='store_true',
default=False, help='verbose output', dest='verbose')
args = parser.parse_args()
date0=args.start_date.split("-")
start_date=datetime.datetime(int(date0[0]),int(date0[1]),int(date0[2]))
date0=args.end_date.split("-")
end_date=datetime.datetime(int(date0[0]),int(date0[1]),int(date0[2]))
if args.temp_nc_dir[-1]!="/":
args.temp_nc_dir+="/"
info=Bunch(lat=[float(args.lat_s),float(args.lat_n)],
lon=[float(args.lon_w),float(args.lon_e)],
start_date=start_date,end_date=end_date,
atmdir=args.dir+args.atmdir,sfcdir=args.dir+args.sfcdir,
atmfile=args.atmfile,uvfile=args.atmuvfile,sfcfile=args.sfcfile,
nc_file_dir=args.temp_nc_dir,version=version)
return info
| gpl-2.0 |
alu0100207385/dsi_3Django | django/db/models/sql/datastructures.py | 114 | 1853 | """
Useful auxilliary data structures for query construction. Not useful outside
the SQL domain.
"""
class EmptyResultSet(Exception):
pass
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, names_pos, path_with_names):
self.level = names_pos
# The path travelled, this includes the path to the multijoin.
self.names_with_path = path_with_names
class Empty(object):
pass
class RawValue(object):
def __init__(self, value):
self.value = value
class Date(object):
"""
Add a date selection column.
"""
def __init__(self, col, lookup_type):
self.col = col
self.lookup_type = lookup_type
def relabeled_clone(self, change_map):
return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple([qn(c) for c in self.col])
else:
col = self.col
return connection.ops.date_trunc_sql(self.lookup_type, col), []
class DateTime(object):
"""
Add a datetime selection column.
"""
def __init__(self, col, lookup_type, tzname):
self.col = col
self.lookup_type = lookup_type
self.tzname = tzname
def relabeled_clone(self, change_map):
return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple([qn(c) for c in self.col])
else:
col = self.col
return connection.ops.datetime_trunc_sql(self.lookup_type, col, self.tzname)
| bsd-3-clause |
paulodiogo/foursquared.eclair | util/oget.py | 262 | 3416 | #!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| apache-2.0 |
tvenkat/askbot-devel | askbot/management/commands/junk.py | 10 | 1145 | import os
import sys
import tempfile
import threading
from django.core.management.base import NoArgsCommand
from django.core import management
class SEImporterThread(threading.Thread):
def __init__(self, stdout = None):
self.stdout = stdout
super(SEImporterThread, self).__init__()
def run(self):
management.call_command('load_stackexchange','/home/fadeev/personal/asksci/asksci.zip')
class Command(NoArgsCommand):
def handle_noargs(self, **options):
fake_stdout = tempfile.NamedTemporaryFile()
real_stdout = sys.stdout
sys.stdout = fake_stdout
importer = SEImporterThread(stdout = fake_stdout)
importer.start()
read_stdout = open(fake_stdout.name, 'r')
file_pos = 0
fd = read_stdout.fileno()
while importer.isAlive():
c_size = os.fstat(fd).st_size
if c_size > file_pos:
line = read_stdout.readline()
real_stdout.write('Have line :' + line)
file_pos = read_stdout.tell()
fake_stdout.close()
read_stdout.close()
sys.stdout = real_stdout
| gpl-3.0 |
pedrobaeza/account-financial-tools | currency_rate_date_check/__openerp__.py | 4 | 2030 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Currency rate date check module for Odoo
# Copyright (C) 2012-2014 Akretion (http://www.akretion.com).
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Currency Rate Date Check',
'version': '1.0',
'category': 'Financial Management/Configuration',
'license': 'AGPL-3',
'summary': "Make sure currency rates used are always up-to-update",
'description': """
Currency Rate Date Check
========================
This module adds a check on dates when doing currency conversion in Odoo.
It checks that the currency rate used to make the conversion
is not more than N days away from the date of the amount to convert.
The maximum number of days of the interval can be
configured on the company form.
Please contact Alexis de Lattre from Akretion <alexis.delattre@akretion.com>
for any help or question about this module.
""",
'author': 'Akretion',
'website': 'http://www.akretion.com',
'depends': ['base'],
'data': ['company_view.xml'],
'images': [
'images/date_check_error_popup.jpg',
'images/date_check_company_config.jpg',
],
'installable': True,
}
| agpl-3.0 |
moreati/gatspy | gatspy/periodic/modeler.py | 1 | 9034 | from __future__ import division, print_function
import numpy as np
from .optimizer import LinearScanOptimizer
class PeriodicModeler(object):
"""Base class for periodic modeling"""
def __init__(self, optimizer=None, fit_period=False,
optimizer_kwds=None, *args, **kwargs):
if optimizer is None:
kwds = optimizer_kwds or {}
optimizer = LinearScanOptimizer(**kwds)
elif optimizer_kwds:
warnings.warn("Optimizer specified, so optimizer keywords ignored")
if not hasattr(optimizer, 'best_period'):
raise ValueError("optimizer must be a PeriodicOptimizer instance: "
"{0} has no best_period method".format(optimizer))
self.optimizer = optimizer
self.fit_period = fit_period
self.args = args
self.kwargs = kwargs
self._best_period = None
def fit(self, t, y, dy=None):
"""Fit the multiterm Periodogram model to the data.
Parameters
----------
t : array_like, one-dimensional
sequence of observation times
y : array_like, one-dimensional
sequence of observed values
dy : float or array_like (optional)
errors on observed values
"""
# For linear models, dy=1 is equivalent to no errors
if dy is None:
dy = 1
self.t, self.y, self.dy = np.broadcast_arrays(t, y, dy)
self._fit(self.t, self.y, self.dy)
self._best_period = None # reset best period in case of refitting
if self.fit_period:
self._best_period = self._calc_best_period()
return self
def predict(self, t, period=None):
"""Compute the best-fit model at ``t`` for a given frequency omega
Parameters
----------
t : float or array_like
times at which to predict
period : float (optional)
The period at which to compute the model. If not specified, it
will be computed via the optimizer provided at initialization.
Returns
-------
y : np.ndarray
predicted model values at times t
"""
t = np.asarray(t)
if period is None:
period = self.best_period
result = self._predict(t.ravel(), period=period)
return result.reshape(t.shape)
def score_frequency_grid(self, f0, df, N):
"""Compute the score on a frequency grid.
Some models can compute results faster if the inputs are passed in this
manner.
Parameters
----------
f0, df, N : (float, float, int)
parameters describing the frequency grid freq = f0 + df * arange(N)
Note that these are frequencies, not angular frequencies.
Returns
-------
score : ndarray
the length-N array giving the score at each frequency
"""
return self._score_frequency_grid(f0, df, N)
def periodogram_auto(self, oversampling=5, nyquist_factor=3,
return_periods=True):
"""Compute the periodogram on an automatically-determined grid
This function uses heuristic arguments to choose a suitable frequency
grid for the data. Note that depending on the data window function,
the model may be sensitive to periodicity at higher frequencies than
this function returns!
The final number of frequencies will be
Nf = oversampling * nyquist_factor * len(t) / 2
Parameters
----------
oversampling : float
the number of samples per approximate peak width
nyquist_factor : float
the highest frequency, in units of the nyquist frequency for points
spread uniformly through the data range.
Returns
-------
period : ndarray
the grid of periods
power : ndarray
the power at each frequency
"""
N = len(self.t)
T = np.max(self.t) - np.min(self.t)
df = 1. / T / oversampling
f0 = df
Nf = int(0.5 * oversampling * nyquist_factor * N)
freq = f0 + df * np.arange(Nf)
return 1. / freq, self._score_frequency_grid(f0, df, Nf)
def score(self, periods=None):
"""Compute the periodogram for the given period or periods
Parameters
----------
periods : float or array_like
Array of angular frequencies at which to compute
the periodogram.
Returns
-------
scores : np.ndarray
Array of normalized powers (between 0 and 1) for each frequency.
Shape of scores matches the shape of the provided periods.
"""
periods = np.asarray(periods)
return self._score(periods.ravel()).reshape(periods.shape)
periodogram = score
@property
def best_period(self):
"""Lazy evaluation of the best period given the model"""
if self._best_period is None:
self._best_period = self._calc_best_period()
return self._best_period
def find_best_periods(self, n_periods=5, return_scores=False):
"""Find the top several best periods for the model"""
return self.optimizer.find_best_periods(self, n_periods,
return_scores=return_scores)
def _calc_best_period(self):
"""Compute the best period using the optimizer"""
return self.optimizer.best_period(self)
# The following methods should be overloaded by derived classes:
def _score_frequency_grid(self, f0, df, N):
freq = f0 + df * np.arange(N)
return self._score(1. / freq)
def _score(self, periods):
"""Compute the score of the model given the periods"""
raise NotImplementedError()
def _fit(self, t, y, dy):
"""Fit the model to the given data"""
raise NotImplementedError()
def _predict(self, t, period):
"""Predict the model values at the given times"""
raise NotImplementedError()
class PeriodicModelerMultiband(PeriodicModeler):
"""Base class for periodic modeling on multiband data"""
def fit(self, t, y, dy=None, filts=0):
"""Fit the multiterm Periodogram model to the data.
Parameters
----------
t : array_like, one-dimensional
sequence of observation times
y : array_like, one-dimensional
sequence of observed values
dy : float or array_like (optional)
errors on observed values
filts : array_like (optional)
The array specifying the filter/bandpass for each observation.
"""
self.unique_filts_ = np.unique(filts)
# For linear models, dy=1 is equivalent to no errors
if dy is None:
dy = 1
all_data = np.broadcast_arrays(t, y, dy, filts)
self.t, self.y, self.dy, self.filts = map(np.ravel, all_data)
self._fit(self.t, self.y, self.dy, self.filts)
self._best_period = None # reset best period in case of refitting
if self.fit_period:
self._best_period = self._calc_best_period()
return self
def predict(self, t, filts, period=None):
"""Compute the best-fit model at ``t`` for a given frequency omega
Parameters
----------
t : float or array_like
times at which to predict
filts : array_like (optional)
the array specifying the filter/bandpass for each observation. This
is used only in multiband periodograms.
period : float (optional)
The period at which to compute the model. If not specified, it
will be computed via the optimizer provided at initialization.
Returns
-------
y : np.ndarray
predicted model values at times t
"""
unique_filts = set(np.unique(filts))
if not unique_filts.issubset(self.unique_filts_):
raise ValueError("filts does not match training data: "
"input: {0} output: {1}"
"".format(set(self.unique_filts_),
set(unique_filts)))
t, filts = np.broadcast_arrays(t, filts)
if period is None:
period = self.best_period
result = self._predict(t.ravel(), filts=filts.ravel(), period=period)
return result.reshape(t.shape)
# The following methods should be overloaded by derived classes:
def _score(self, periods):
"""Compute the score of the model given the periods"""
raise NotImplementedError()
def _fit(self, t, y, dy, filts):
"""Fit the model to the given data"""
raise NotImplementedError()
def _predict(self, t, filts, period):
"""Predict the model values at the given times & filters"""
raise NotImplementedError()
| bsd-2-clause |
bionomicron/Redirector | util/FlatFileParser.py | 1 | 14961 | #!/usr/bin/env python
'''
@author: Graham Rockwell
@organization: Church Lab Harvard Genetics
@version: 03/04/2013
--Rename to metabolic-model parser
'''
import string, sets, re
from util.Report import Report
class TagedElement(dict):
def __init__(self):
self.annotation = {}
def __str__(self):
result = dict.__str__(self)
result += self.annotation.__str__()
class FlatFileParser:
'''
@summary: Parent class for parsing flat (delimited) files
'''
def __init__( self, delimiter='\t', comment= '#', emptyField='na' ):
self.delimiter = delimiter
self.requiredHeaderMap = {}
self.optionalHeaderMap = {}
self.headerIndex = {}
self.headerLine = 0
self.startLine = 0
self.endLine = float("inf")
self.failLine = True
self.emptyLine = ''
self.emptyData = ''
self.wrapperString = '\"'
self.comment = comment
self.emptyField = emptyField
self.fileHandle = None
self.index = None
def setEmptyLine( self, emptyLine ):
self.emptyLine = emptyLine
def setDelimiter( self, delimiter ):
self.delimiter = delimiter
def getDelimiter( self ):
return self.delimiter
def setEmptyField( self, emptyField ):
self.emptyField = emptyField
def getEmptyField( self ):
return self.emptyField
def setWrapper(self,wrapperString):
self.wrapperString = wrapperString
def setStartLine( self, startLine ):
self.startLine = startLine
def setHeaderLine( self, headerLine ):
self.headerLine = headerLine
self.startLine = headerLine
def setDataNames( self, dataNames ):
self.dataNames = dataNames
def resetHeaders( self ):
self.requiredHeaderMap = {}
self.optionalHeaderMap = {}
self.headerIndex = {}
def addRequiredHeaders(self, headerMap):
self.requiredHeaderMap.update(headerMap)
def addOptionalHeaders(self, headerMap):
self.optionalHeaderMap.update(headerMap)
def setRequiredHeaderMap( self, headerMap ):
self.resetHeaders()
self.addRequiredHeaders( headerMap )
def setHeader( self, headers ):
self.resetHeaders()
headerMap = {}
for h in headers:
headerMap[h] = h
self.addRequiredHeaders(headerMap)
def _safeSplit( self, line ):
'''
@type line: String
@return: [String]
@summary:
Splits the line into a list
checks for a wrapper on the elements of the list and removes them
'''
line.replace('\n','')
nline = []
for v in line.split(self.delimiter):
v = string.replace(v,self.wrapperString,'')
v = string.strip(v)
nline.append(v)
return nline
def _splitLineCheck( self, line ):
'''
@type line: String
@return: [String]
@summary:
Splits the line into a list
checks for a wrapper on the elements of the list and removes them
'''
sline = self._safeSplit( line )
if len( sline ) != len( self.headerIndex ):
if ( self.failLine ):
print self.headerIndex
raise ParseError( "Line should have %d column found %s \n [%s] \n" % ( len( self.headerIndex ), len( sline ) , sline ) )
else:
return None
else:
return sline
def checkRequiredHeaders( self, line ):
'''
@var line: list of Strings to be checked for required headers
@type line: [String]
Checks the header line to see if required headers are present.
@return boolean
'''
rheaders = sets.Set( self.requiredHeaderMap.keys() )
sLine = sets.Set( line )
if rheaders.issubset( sLine ):
return True
else:
headerErrorTag = "Expecting headers:[%s]\n found:[%s] \n expected - found (missing): [%s]" % ( '|,|'.join( rheaders ), '|,|'.join( sLine ), '|,|'.join( rheaders - sLine ))
raise ParseError(headerErrorTag)
return False
def _indexHeader( self, line ):
'''
@summary:
Find index of each header in the line.
Used for matching headers to data.
'''
index = 0
for value in line:
self.headerIndex[value] = index
index += 1
return self.headerIndex
def checkFieldValue( self, value ):
'''
Checks the value of the and strip of extra characters
'''
v = string.strip( value )
if( string.strip( v ) == self.emptyField or string.strip( v ) == '' ):
return self.emptyData
else:
return v
def parseRequired( self, line ):
'''
#! to be removed
Parses map of list of data matching to required headers
'''
result = {}
for header in self.requiredHeaderMap.keys():
dataName = self.requiredHeaderMap[header]
value = line[self.headerIndex[header]]
result[dataName] = self.checkFieldValue( value )
return result
def parseOptional( self, line ):
'''
#! to be removed
Parses map of list of data matching to required headers
'''
result = {}
for header in self.optionalHeaderMap.keys():
dataName = self.optionalHeaderMap[header]
if self.headerIndex.has_key( header ):
index = self.headerIndex[header]
value = line[index]
result[dataName] = self.checkFieldValue( value )
else:
result[dataName] = self.emptyData
return result
def parseAnnotation(self,line):
'''
Parses columns that are present but not required in the output.
#! currently being reviewed for redundancy.
'''
result = {}
knownHeaders = self.requiredHeaderMap.keys()
knownHeaders.extend(self.optionalHeaderMap.keys())
for headerName in self.headerIndex.keys():
if headerName not in knownHeaders:
index = self.headerIndex[headerName]
value = line[index]
result[headerName] = self.checkFieldValue(value)
return result
def parseTagedLine( self, line ):
'''
@var line:
@type line: String
@return: [String]
#! being updated to remove annotation and just return data for all headers found.
'''
result = TagedElement()
sline = self._splitLineCheck( line )
if len( sline ) != len( self.headerIndex ):
print self.headerIndex
raise ParseError( "Line should have %d column found %s \n [%s] \n" % ( len( self.headerIndex ), len( sline ) , sline ) )
requiredValues = self.parseRequired( sline )
optionalValues = self.parseOptional( sline )
annotationValues = self.parseAnnotation( sline )
result.update( requiredValues )
result.update( optionalValues )
result.annotation = annotationValues
return result
def isComment( self, line ):
'''
Checks a line to see if it is a comment line
'''
strip_line = string.strip( line )
if len( strip_line ) > 0 and strip_line != self.emptyLine:
cprefix = "^%s.*" % self.comment
m = re.match(cprefix,strip_line)
check = m != None
return check
#return strip_line[0] == self.comment
else:
return True
def getNextLine(self):
try:
line = self.fileHandle.next()
except StopIteration:
line = None
return line
def getTagedLine(self):
result = None
line = self.getNextLine()
if line == None:
result = None
elif self.isComment( line ):
result = ""
elif self.index > self.startLine:
value = self.parseTagedLine( line )
result = value
self.index += 1
return result
def closeFile(self):
self.fileHandle.close()
def checkHeader( self, headerLine):
hLine = self._safeSplit( headerLine )
hLineSet = sets.Set( hLine )
if len( hLineSet ) != len( hLine ):
raise ParseError( "Duplicate column name %s" % ( hLine ) )
if self.checkRequiredHeaders( hLine ):
self._indexHeader( hLine )
return True
return False
def parseHeader(self, headerLine, unique = True):
hLine = self._safeSplit( headerLine )
hLineSet = sets.Set( hLine )
if len( hLineSet ) != len( hLine ) and unique:
raise ParseError( "Duplicate column name %s" % ( hLine ) )
if self.requiredHeaderMap != {}:
if not self.checkRequiredHeaders(hLine):
raise ParseError("Failed to find required headers [%s].\n In line [%s}" % (hLine))
self.setHeader(hLine )
self._indexHeader( hLine )
return hLine
def parseFile( self, fileName ):
'''
@var fileName:
@type: String
@summary:
Older file parsing function
@return: [{header:value}]
'''
result = []
lines = open( fileName, 'r' )
index = 0
for line in lines:
if self.isComment( line ):
pass
elif index < self.startLine:
break
elif index == self.headerLine:
self.checkHeader( line )
elif index > self.startLine:
value = self.parseTagedLine( line )
result.append( value )
index += 1
lines.close()
return result
def parseArray(self,fileName,):
result = []
lines = open(fileName,'r')
index = 1
for line in lines:
if self.isComment( line ):
pass
elif self.endLine >= index >= self.startLine:
sLine = self._safeSplit(line)
result.append(sLine)
index += 1
return result
def parseToMap( self, fileName, keyTag, valueTag = None, multi=False ):
result = {}
self.startFile(fileName)
d = self.getTagedLine()
while d != None:
if d != "":
keyName = d[keyTag]
del d[keyTag]
if valueTag:
value = d[valueTag]
else:
value = d
if multi and valueTag:
if keyName not in result.keys():
result[keyName] = []
result[keyName].append(value)
else:
result[keyName] = value
d = self.getTagedLine()
self.closeFile()
return result
def parseAnyToMap(self,header,fileName,keyTag,valueTags=None):
self.setHeader(header)
result = self.parseToMap(fileName,keyTag,valueTags)
return result
def startFile(self,fileName):
'''
@summary:
Find header and checks for required headers.
Indexes headers
Finds first line to start parsing delimited file.
'''
self.fileHandle = open(fileName,'r')
self.index = 0
line = self.getNextLine()
while line != None:
if self.isComment( line ):
self.index += 1
elif self.index < self.startLine:
self.index += 1
elif self.index >= self.headerLine:
if not self.checkHeader( line ):
raise ParseError("failed to find required headers")
self.index += 1
return True
line = self.getNextLine()
return False
def parseGenericReport(self, fileName, keyTag=None, header = None, unique = True):
'''
@var fileName: name of flat (delimited) in text format to be parsed
@type fileName: String
@var keyTag: ID of column to be used for report key row
@type keyTag: String
@summary:
Primary Function
Parses flat file returns report object.
'''
result = Report()
kIndex = None
lines = open( fileName, 'r' )
index = 0
for line in lines:
if self.isComment( line ):
pass
elif self.endLine < index < self.startLine:
index += 1
continue
elif index == self.headerLine:
header = self.parseHeader( line, unique )
if keyTag in header:
kIndex = header.index(keyTag)
elif self.endLine > index > self.startLine:
line = line.replace('\n','')
sLine =self._safeSplit(line)
if kIndex != None:
rName = sLine[kIndex]
else:
rName = str(index)
for i in range(len(sLine)):
if i != kIndex:
cName = header[i]
value = sLine[i]
result.add(rName,cName,value)
index += 1
lines.close()
return result
def parseToReport( self, fileName, keyTag, header = None, unique = True):
'''
@var fileName: name of flat (delimited) in text format to be parsed
@type fileName: String
@var keyTag: ID of column to be used for report key row
@type keyTag: String
@summary:
Primary Function
Parses flat file returns report object.
'''
if header != None:
self.setHeader(header)
result = Report()
self.startFile(fileName)
d = self.getTagedLine()
while d != None:
if d != "":
keyName = d[keyTag]
del d[keyTag]
for valueTag in d.keys():
v = d[valueTag]
result.addElement(keyName,valueTag,v)
d = self.getTagedLine()
self.closeFile()
return result
#=====================================
#=============ParseError==============
#=====================================
class ParseError( Exception ):
pass
| mit |
ofekd/servo | tests/wpt/css-tests/tools/pywebsocket/src/example/echo_noext_wsh.py | 465 | 2404 | # Copyright 2013, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
"""Received Sec-WebSocket-Extensions header value is parsed into
request.ws_requested_extensions. pywebsocket creates extension
processors using it before do_extra_handshake call and never looks at it
after the call.
To reject requested extensions, clear the processor list.
"""
request.ws_extension_processors = []
def web_socket_transfer_data(request):
"""Echo. Same as echo_wsh.py."""
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
request.ws_stream.send_message(line, binary=False)
if line == _GOODBYE_MESSAGE:
return
else:
request.ws_stream.send_message(line, binary=True)
# vi:sts=4 sw=4 et
| mpl-2.0 |
hfp/tensorflow-xsmm | tensorflow/python/debug/examples/debug_keras.py | 13 | 3110 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tfdbg example: debugging tf.keras models training on tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
def main(_):
# Create a dummy dataset.
num_examples = 8
steps_per_epoch = 2
input_dims = 3
output_dims = 1
xs = np.zeros([num_examples, input_dims])
ys = np.zeros([num_examples, output_dims])
dataset = tf.data.Dataset.from_tensor_slices(
(xs, ys)).repeat(num_examples).batch(int(num_examples / steps_per_epoch))
sess = tf.Session()
if FLAGS.debug:
# Use the command-line interface (CLI) of tfdbg.
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type)
elif FLAGS.tensorboard_debug_address:
# Use the TensorBoard Debugger Plugin (GUI of tfdbg).
sess = tf_debug.TensorBoardDebugWrapperSession(
sess, FLAGS.tensorboard_debug_address)
tf.keras.backend.set_session(sess)
# Create a dummy model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(1, input_shape=[input_dims])])
model.compile(loss="mse", optimizer="sgd")
# Train the model using the dummy dataset created above.
model.fit(dataset, epochs=FLAGS.epochs, steps_per_epoch=steps_per_epoch)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training. "
"Mutually exclusive with the --tensorboard_debug_address flag.")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline).")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
parser.add_argument(
"--epochs",
type=int,
default=2,
help="Number of epochs to train the model for.")
FLAGS, unparsed = parser.parse_known_args()
with tf.Graph().as_default():
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
hcmlab/nova | bin/cml/models/templates/continuous/video/tensorflow/interface.py | 1 | 9317 |
import sys
import importlib
if not hasattr(sys, 'argv'):
sys.argv = ['']
#import tensorflow as tf
import numpy as np
import random
#import time
from xml.dom import minidom
import os
import shutil
import site
import pprint
import h5py
import imageio
from skimage.transform import resize
import site as s
s.getusersitepackages()
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras import backend, optimizers
from tensorflow.keras.preprocessing import image as kerasimage
from nova_data_generator import DataGenerator
from PIL import Image
# from numpy.random import seed
# seed(1234)
# from tensorflow import set_random_seed
# set_random_seed(1234)
#interface
def getModelType(types, opts, vars):
return types.REGRESSION if opts["is_regression"] else types.CLASSIFICATION
def getOptions(opts, vars):
try:
vars['x'] = None
vars['y'] = None
vars['session'] = None
vars['model'] = None
vars['model_id'] = "."
vars['monitor'] = ""
'''Setting the default options. All options can be overwritten by adding them to the conf-dictionary in the same file as the network'''
opts['network'] = ''
opts['experiment_id'] = ''
opts['is_regression'] = True
opts['n_fp'] = 1
opts['loss_function'] = 'mse'
opts['optimizer'] = 'adam'
opts['metrics'] = ['mse']
opts['lr'] = 0.0001
opts['batch_size'] = 32
opts['n_epoch'] = 10
opts['image_width'] = 0
opts['image_height'] = 0
opts['n_channels'] = 1
opts['shuffle'] = True
opts['max_queue_size'] = 20
opts['workers'] = 4
opts['batch_size_train'] = 32
opts['batch_size_val'] = 32
opts['data_path_train'] = ''
opts['data_path_val'] = ''
opts['datagen_rescale'] = 1./255
opts['datagen_rotation_range'] = 20
opts['datagen_width_shift_range'] = 0.2
opts['datagen_height_shift_range'] = 0.2
except Exception as e:
print_exception(e, 'getOptions')
sys.exit()
def train(data, label_score, opts, vars):
try:
module = __import__(opts['network'])
set_opts_from_config(opts, module.conf)
n_input = opts['image_width'] * opts['image_height'] * opts['n_channels']
if not opts['is_regression']:
#adding one output for the restclass
n_output = int(max(label_score)+1)
vars['monitor'] = "accuracy"
else:
n_output = 1
vars['monitor'] = "mean_squared_error"
# callbacks
log_path, ckp_path = get_paths()
experiment_id = opts['experiment_id'] if opts['experiment_id'] else opts['network']
print('Checkpoint dir: {}\nLogdir: {}\nExperiment ID: {}'.format(ckp_path, log_path, experiment_id))
tensorboard = TensorBoard(
log_dir=os.path.join(log_path, experiment_id),
write_graph=True,
write_images=True,
update_freq='batch')
checkpoint = ModelCheckpoint(
filepath = os.path.join(ckp_path, experiment_id + '.trainer.PythonModel.model.h5'),
monitor=vars['monitor'],
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
callbacklist = [tensorboard, checkpoint]
# data
training_generator = DataGenerator(dim=(opts['image_width'], opts['image_height']), n_channels=opts['n_channels'] ,batch_size=opts['batch_size'], n_classes=n_output)
# model
print(opts['image_width'], opts['image_height'], opts['n_channels'])
model = module.getModel(shape=(opts['image_width'], opts['image_height'], opts['n_channels']), n_classes=n_output)
model.compile(optimizer=opts['optimizer'], loss=opts['loss_function'], metrics=opts['metrics'])
print(model.summary())
model.fit_generator(generator=training_generator,
shuffle=opts['shuffle'],
workers=opts['workers'],
max_queue_size=opts['max_queue_size'],
verbose=1,
epochs=opts['n_epoch'],
callbacks=callbacklist)
# setting variables
vars['model_id'] = experiment_id
vars['model'] = model
except Exception as e:
print_exception(e, 'train')
sys.exit()
def forward(data, probs_or_score, opts, vars):
try:
model = vars['model']
sess = vars['session']
graph = vars['graph']
if model and sess and graph:
n_output = len(probs_or_score)
npdata = np.asarray(data)
img = Image.fromarray(npdata)
b, g, r = img.split()
img = Image.merge("RGB", (r, g, b))
x = img.resize((opts['image_height'], opts['image_width']))
x = kerasimage.img_to_array(x)
x = np.expand_dims(x, axis=0)
x = x*(1./255)
with sess.as_default():
with graph.as_default():
pred = model.predict(x, batch_size=1, verbose=0)
#sanity_check(probs_or_score)
for i in range(len(pred[0])):
probs_or_score[i] = pred[0][i]
return max(probs_or_score)
else:
print('Train model first')
return 1
except Exception as e:
print_exception(e, 'forward')
sys.exit()
def save(path, opts, vars):
try:
# save model
_, ckp_path = get_paths()
model_path = path + '.' + opts['network']
print('Move best checkpoint to ' + model_path + '.h5')
shutil.move(os.path.join(ckp_path, vars['model_id'] + '.trainer.PythonModel.model.h5'), model_path + '.h5')
# copy scripts
src_dir = os.path.dirname(os.path.realpath(__file__))
dst_dir = os.path.dirname(path)
print('copy scripts from \'' + src_dir + '\' to \'' + dst_dir + '\'')
srcFiles = os.listdir(src_dir)
for fileName in srcFiles:
full_file_name = os.path.join(src_dir, fileName)
if os.path.isfile(full_file_name) and (fileName.endswith('interface.py') or fileName.endswith('customlayer.py') or fileName.endswith('nova_data_generator.py') or fileName.endswith('db_handler.py')):
shutil.copy(full_file_name, dst_dir)
elif os.path.isfile(full_file_name) and fileName.endswith(opts['network']+'.py'):
shutil.copy(full_file_name, os.path.join(dst_dir, model_path + '.py' ))
except Exception as e:
print_exception(e, 'save')
sys.exit()
def load(path, opts, vars):
try:
print('\nLoading model\nCreating session and graph')
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
graph = tf.get_default_graph()
backend.set_session(sess)
model_path = path + '.' + opts['network'] + '.h5'
print('Loading model from {}'.format(model_path))
model = load_model(model_path);
print('Create prediction function')
model._make_predict_function()
with graph.as_default():
with sess.as_default():
input_shape = list(model.layers[0].input_shape)
input_shape[0] = 1
model.predict(np.zeros(tuple(input_shape)))
vars['graph'] = graph
vars['session'] = sess
vars['model'] = model
except Exception as e:
print_exception(e, 'load')
sys.exit()
# helper functions
def convert_to_one_hot(label, n_classes):
int_label = int(label)
one_hot = np.zeros(n_classes)
one_hot[int_label] = 1.0
return one_hot
def print_exception(exception, function):
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print('Exception in {}: {} \nType: {} Fname: {} LN: {} '.format(function, exception, exc_type, fname, exc_tb.tb_lineno))
def set_opts_from_config(opts, conf):
for key, value in conf.items():
opts[key] = value
print('\nOptions haven been set to:\n')
pprint.pprint(opts)
print('\n')
# checking the input for corrupted values
def sanity_check(x):
if np.any(np.isnan(x)):
print('At least one input is not a number!')
if np.any(np.isinf(x)):
print('At least one input is inf!')
# retreives the paths for log and checkpoint directories. paths are created if they do not exist
def get_paths():
file_dir = os.path.dirname(os.path.realpath(__file__))
ckp_dir = 'checkpoints'
log_dir = 'logs'
ckp_path = os.path.join(file_dir, ckp_dir)
log_path = os.path.join(file_dir, log_dir)
if not os.path.exists(ckp_path):
os.makedirs(ckp_path)
print('Created checkpoint folder: {}'.format(ckp_path))
if not os.path.exists(log_path):
os.makedirs(log_path)
print('Created log folder: {}'.format(log_path))
return(log_path, ckp_path)
| gpl-3.0 |
viniciusgama/blog_gae | django/contrib/gis/geos/prototypes/topology.py | 311 | 2226 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
__all__ = ['geos_boundary', 'geos_buffer', 'geos_centroid', 'geos_convexhull',
'geos_difference', 'geos_envelope', 'geos_intersection',
'geos_linemerge', 'geos_pointonsurface', 'geos_preservesimplify',
'geos_simplify', 'geos_symdifference', 'geos_union', 'geos_relate']
from ctypes import c_char_p, c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
def topology(func, *args):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
### Topology Routines ###
geos_boundary = topology(GEOSFunc('GEOSBoundary'))
geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int)
geos_centroid = topology(GEOSFunc('GEOSGetCentroid'))
geos_convexhull = topology(GEOSFunc('GEOSConvexHull'))
geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR)
geos_envelope = topology(GEOSFunc('GEOSEnvelope'))
geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR)
geos_linemerge = topology(GEOSFunc('GEOSLineMerge'))
geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface'))
geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double)
geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double)
geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR)
geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFunc('GEOSRelate')
geos_relate.argtypes = [GEOM_PTR, GEOM_PTR]
geos_relate.restype = geos_char_p
geos_relate.errcheck = check_string
# Routines only in GEOS 3.1+
if GEOS_PREPARE:
geos_cascaded_union = GEOSFunc('GEOSUnionCascaded')
geos_cascaded_union.argtypes = [GEOM_PTR]
geos_cascaded_union.restype = GEOM_PTR
__all__.append('geos_cascaded_union')
| bsd-3-clause |
kdmurray91/ooni-probe | ooni/tests/test_nettest.py | 5 | 11006 | import os
from tempfile import mkstemp
from twisted.trial import unittest
from twisted.internet import defer, reactor
from twisted.python.usage import UsageError
from ooni.settings import config
from ooni.errors import MissingRequiredOption, OONIUsageError, IncoherentOptions
from ooni.nettest import NetTest, NetTestLoader
from ooni.director import Director
from ooni.tests.bases import ConfigTestCase
net_test_string = """
from twisted.python import usage
from ooni.nettest import NetTestCase
class UsageOptions(usage.Options):
optParameters = [['spam', 's', None, 'ham']]
class DummyTestCase(NetTestCase):
usageOptions = UsageOptions
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
"""
double_net_test_string = """
from twisted.python import usage
from ooni.nettest import NetTestCase
class UsageOptions(usage.Options):
optParameters = [['spam', 's', None, 'ham']]
class DummyTestCaseA(NetTestCase):
usageOptions = UsageOptions
def test_a(self):
self.report['bar'] = 'bar'
class DummyTestCaseB(NetTestCase):
usageOptions = UsageOptions
def test_b(self):
self.report['foo'] = 'foo'
"""
double_different_options_net_test_string = """
from twisted.python import usage
from ooni.nettest import NetTestCase
class UsageOptionsA(usage.Options):
optParameters = [['spam', 's', None, 'ham']]
class UsageOptionsB(usage.Options):
optParameters = [['spam', 's', None, 'ham']]
class DummyTestCaseA(NetTestCase):
usageOptions = UsageOptionsA
def test_a(self):
self.report['bar'] = 'bar'
class DummyTestCaseB(NetTestCase):
usageOptions = UsageOptionsB
def test_b(self):
self.report['foo'] = 'foo'
"""
net_test_root_required = net_test_string + """
requiresRoot = True
"""
net_test_string_with_file = """
from twisted.python import usage
from ooni.nettest import NetTestCase
class UsageOptions(usage.Options):
optParameters = [['spam', 's', None, 'ham']]
class DummyTestCase(NetTestCase):
inputFile = ['file', 'f', None, 'The input File']
usageOptions = UsageOptions
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
"""
net_test_string_with_required_option = """
from twisted.python import usage
from ooni.nettest import NetTestCase
class UsageOptions(usage.Options):
optParameters = [['spam', 's', None, 'ham'],
['foo', 'o', None, 'moo'],
['bar', 'o', None, 'baz'],
]
class DummyTestCase(NetTestCase):
inputFile = ['file', 'f', None, 'The input File']
requiredOptions = ['foo', 'bar']
usageOptions = UsageOptions
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
"""
http_net_test = """
from twisted.internet import defer
from twisted.python import usage, failure
from ooni.utils import log
from ooni.utils.net import userAgents
from ooni.templates import httpt
from ooni.errors import failureToString, handleAllFailures
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test.'],
]
class HTTPBasedTest(httpt.HTTPTest):
usageOptions = UsageOptions
def test_get(self):
return self.doRequest(self.localOptions['url'], method="GET",
use_tor=False)
"""
dummyInputs = range(1)
dummyArgs = ('--spam', 'notham')
dummyOptions = {'spam': 'notham'}
dummyInvalidArgs = ('--cram', 'jam')
dummyInvalidOptions = {'cram': 'jam'}
dummyArgsWithRequiredOptions = ('--foo', 'moo', '--bar', 'baz')
dummyRequiredOptions = {'foo': 'moo', 'bar': 'baz'}
dummyArgsWithFile = ('--spam', 'notham', '--file', 'dummyInputFile.txt')
dummyInputFile = 'dummyInputFile.txt'
class TestNetTest(unittest.TestCase):
timeout = 1
def setUp(self):
self.filename = ""
with open(dummyInputFile, 'w') as f:
for i in range(10):
f.write("%s\n" % i)
def tearDown(self):
os.remove(dummyInputFile)
if self.filename != "":
os.remove(self.filename)
def assertCallable(self, thing):
self.assertIn('__call__', dir(thing))
def verifyMethods(self, testCases):
uniq_test_methods = set()
for test_class, test_methods in testCases:
instance = test_class()
for test_method in test_methods:
c = getattr(instance, test_method)
self.assertCallable(c)
uniq_test_methods.add(test_method)
self.assertEqual(set(['test_a', 'test_b']), uniq_test_methods)
def verifyClasses(self, test_cases, control_classes):
actual_classes = set()
for test_class, test_methods in test_cases:
actual_classes.add(test_class.__name__)
self.assertEqual(actual_classes, control_classes)
def test_load_net_test_from_file(self):
"""
Given a file verify that the net test cases are properly
generated.
"""
__, net_test_file = mkstemp()
with open(net_test_file, 'w') as f:
f.write(net_test_string)
f.close()
ntl = NetTestLoader(dummyArgs)
ntl.loadNetTestFile(net_test_file)
self.verifyMethods(ntl.testCases)
os.unlink(net_test_file)
def test_load_net_test_from_str(self):
"""
Given a file like object verify that the net test cases are properly
generated.
"""
ntl = NetTestLoader(dummyArgs)
ntl.loadNetTestString(net_test_string)
self.verifyMethods(ntl.testCases)
def test_load_net_test_multiple(self):
ntl = NetTestLoader(dummyArgs)
ntl.loadNetTestString(double_net_test_string)
self.verifyMethods(ntl.testCases)
self.verifyClasses(ntl.testCases, set(('DummyTestCaseA', 'DummyTestCaseB')))
ntl.checkOptions()
def test_load_net_test_multiple_different_options(self):
ntl = NetTestLoader(dummyArgs)
ntl.loadNetTestString(double_different_options_net_test_string)
self.verifyMethods(ntl.testCases)
self.verifyClasses(ntl.testCases, set(('DummyTestCaseA', 'DummyTestCaseB')))
self.assertRaises(IncoherentOptions, ntl.checkOptions)
def test_load_with_option(self):
ntl = NetTestLoader(dummyArgs)
ntl.loadNetTestString(net_test_string)
self.assertIsInstance(ntl, NetTestLoader)
for test_klass, test_meth in ntl.testCases:
for option in dummyOptions.keys():
self.assertIn(option, test_klass.usageOptions())
def test_load_with_invalid_option(self):
ntl = NetTestLoader(dummyInvalidArgs)
ntl.loadNetTestString(net_test_string)
self.assertRaises(UsageError, ntl.checkOptions)
self.assertRaises(OONIUsageError, ntl.checkOptions)
def test_load_with_required_option(self):
ntl = NetTestLoader(dummyArgsWithRequiredOptions)
ntl.loadNetTestString(net_test_string_with_required_option)
self.assertIsInstance(ntl, NetTestLoader)
def test_load_with_missing_required_option(self):
ntl = NetTestLoader(dummyArgs)
ntl.loadNetTestString(net_test_string_with_required_option)
self.assertRaises(MissingRequiredOption, ntl.checkOptions)
def test_net_test_inputs(self):
ntl = NetTestLoader(dummyArgsWithFile)
ntl.loadNetTestString(net_test_string_with_file)
ntl.checkOptions()
nt = NetTest(ntl, None)
nt.initializeInputProcessor()
# XXX: if you use the same test_class twice you will have consumed all
# of its inputs!
tested = set([])
for test_class, test_method in ntl.testCases:
if test_class not in tested:
tested.update([test_class])
self.assertEqual(len(list(test_class.inputs)), 10)
def test_setup_local_options_in_test_cases(self):
ntl = NetTestLoader(dummyArgs)
ntl.loadNetTestString(net_test_string)
ntl.checkOptions()
for test_class, test_method in ntl.testCases:
self.assertEqual(test_class.localOptions, dummyOptions)
def test_generate_measurements_size(self):
ntl = NetTestLoader(dummyArgsWithFile)
ntl.loadNetTestString(net_test_string_with_file)
ntl.checkOptions()
net_test = NetTest(ntl, None)
net_test.initializeInputProcessor()
measurements = list(net_test.generateMeasurements())
self.assertEqual(len(measurements), 20)
def test_net_test_completed_callback(self):
ntl = NetTestLoader(dummyArgsWithFile)
ntl.loadNetTestString(net_test_string_with_file)
ntl.checkOptions()
director = Director()
self.filename = 'dummy_report.yamloo'
d = director.startNetTest(ntl, self.filename)
@d.addCallback
def complete(result):
self.assertEqual(result, None)
self.assertEqual(director.successfulMeasurements, 20)
return d
def test_require_root_succeed(self):
# XXX: will require root to run
ntl = NetTestLoader(dummyArgs)
ntl.loadNetTestString(net_test_root_required)
for test_class, method in ntl.testCases:
self.assertTrue(test_class.requiresRoot)
class TestNettestTimeout(ConfigTestCase):
@defer.inlineCallbacks
def setUp(self):
super(TestNettestTimeout, self).setUp()
from twisted.internet.protocol import Protocol, Factory
from twisted.internet.endpoints import TCP4ServerEndpoint
class DummyProtocol(Protocol):
def dataReceived(self, data):
pass
class DummyFactory(Factory):
def __init__(self):
self.protocols = []
def buildProtocol(self, addr):
proto = DummyProtocol()
self.protocols.append(proto)
return proto
def stopFactory(self):
for proto in self.protocols:
proto.transport.loseConnection()
self.factory = DummyFactory()
endpoint = TCP4ServerEndpoint(reactor, 8007)
self.port = yield endpoint.listen(self.factory)
config.advanced.measurement_timeout = 2
def tearDown(self):
super(TestNettestTimeout, self).tearDown()
self.factory.stopFactory()
self.port.stopListening()
os.remove(self.filename)
def test_nettest_timeout(self):
ntl = NetTestLoader(('-u', 'http://localhost:8007/'))
ntl.loadNetTestString(http_net_test)
ntl.checkOptions()
director = Director()
self.filename = 'dummy_report.yamloo'
d = director.startNetTest(ntl, self.filename)
@d.addCallback
def complete(result):
assert director.failedMeasurements == 1
return d
| bsd-2-clause |
chirilo/mozillians | vendor-local/lib/python/unidecode/x05d.py | 252 | 4670 | data = (
'Lang ', # 0x00
'Kan ', # 0x01
'Lao ', # 0x02
'Lai ', # 0x03
'Xian ', # 0x04
'Que ', # 0x05
'Kong ', # 0x06
'Chong ', # 0x07
'Chong ', # 0x08
'Ta ', # 0x09
'Lin ', # 0x0a
'Hua ', # 0x0b
'Ju ', # 0x0c
'Lai ', # 0x0d
'Qi ', # 0x0e
'Min ', # 0x0f
'Kun ', # 0x10
'Kun ', # 0x11
'Zu ', # 0x12
'Gu ', # 0x13
'Cui ', # 0x14
'Ya ', # 0x15
'Ya ', # 0x16
'Gang ', # 0x17
'Lun ', # 0x18
'Lun ', # 0x19
'Leng ', # 0x1a
'Jue ', # 0x1b
'Duo ', # 0x1c
'Zheng ', # 0x1d
'Guo ', # 0x1e
'Yin ', # 0x1f
'Dong ', # 0x20
'Han ', # 0x21
'Zheng ', # 0x22
'Wei ', # 0x23
'Yao ', # 0x24
'Pi ', # 0x25
'Yan ', # 0x26
'Song ', # 0x27
'Jie ', # 0x28
'Beng ', # 0x29
'Zu ', # 0x2a
'Jue ', # 0x2b
'Dong ', # 0x2c
'Zhan ', # 0x2d
'Gu ', # 0x2e
'Yin ', # 0x2f
'[?] ', # 0x30
'Ze ', # 0x31
'Huang ', # 0x32
'Yu ', # 0x33
'Wei ', # 0x34
'Yang ', # 0x35
'Feng ', # 0x36
'Qiu ', # 0x37
'Dun ', # 0x38
'Ti ', # 0x39
'Yi ', # 0x3a
'Zhi ', # 0x3b
'Shi ', # 0x3c
'Zai ', # 0x3d
'Yao ', # 0x3e
'E ', # 0x3f
'Zhu ', # 0x40
'Kan ', # 0x41
'Lu ', # 0x42
'Yan ', # 0x43
'Mei ', # 0x44
'Gan ', # 0x45
'Ji ', # 0x46
'Ji ', # 0x47
'Huan ', # 0x48
'Ting ', # 0x49
'Sheng ', # 0x4a
'Mei ', # 0x4b
'Qian ', # 0x4c
'Wu ', # 0x4d
'Yu ', # 0x4e
'Zong ', # 0x4f
'Lan ', # 0x50
'Jue ', # 0x51
'Yan ', # 0x52
'Yan ', # 0x53
'Wei ', # 0x54
'Zong ', # 0x55
'Cha ', # 0x56
'Sui ', # 0x57
'Rong ', # 0x58
'Yamashina ', # 0x59
'Qin ', # 0x5a
'Yu ', # 0x5b
'Kewashii ', # 0x5c
'Lou ', # 0x5d
'Tu ', # 0x5e
'Dui ', # 0x5f
'Xi ', # 0x60
'Weng ', # 0x61
'Cang ', # 0x62
'Dang ', # 0x63
'Hong ', # 0x64
'Jie ', # 0x65
'Ai ', # 0x66
'Liu ', # 0x67
'Wu ', # 0x68
'Song ', # 0x69
'Qiao ', # 0x6a
'Zi ', # 0x6b
'Wei ', # 0x6c
'Beng ', # 0x6d
'Dian ', # 0x6e
'Cuo ', # 0x6f
'Qian ', # 0x70
'Yong ', # 0x71
'Nie ', # 0x72
'Cuo ', # 0x73
'Ji ', # 0x74
'[?] ', # 0x75
'Tao ', # 0x76
'Song ', # 0x77
'Zong ', # 0x78
'Jiang ', # 0x79
'Liao ', # 0x7a
'Kang ', # 0x7b
'Chan ', # 0x7c
'Die ', # 0x7d
'Cen ', # 0x7e
'Ding ', # 0x7f
'Tu ', # 0x80
'Lou ', # 0x81
'Zhang ', # 0x82
'Zhan ', # 0x83
'Zhan ', # 0x84
'Ao ', # 0x85
'Cao ', # 0x86
'Qu ', # 0x87
'Qiang ', # 0x88
'Zui ', # 0x89
'Zui ', # 0x8a
'Dao ', # 0x8b
'Dao ', # 0x8c
'Xi ', # 0x8d
'Yu ', # 0x8e
'Bo ', # 0x8f
'Long ', # 0x90
'Xiang ', # 0x91
'Ceng ', # 0x92
'Bo ', # 0x93
'Qin ', # 0x94
'Jiao ', # 0x95
'Yan ', # 0x96
'Lao ', # 0x97
'Zhan ', # 0x98
'Lin ', # 0x99
'Liao ', # 0x9a
'Liao ', # 0x9b
'Jin ', # 0x9c
'Deng ', # 0x9d
'Duo ', # 0x9e
'Zun ', # 0x9f
'Jiao ', # 0xa0
'Gui ', # 0xa1
'Yao ', # 0xa2
'Qiao ', # 0xa3
'Yao ', # 0xa4
'Jue ', # 0xa5
'Zhan ', # 0xa6
'Yi ', # 0xa7
'Xue ', # 0xa8
'Nao ', # 0xa9
'Ye ', # 0xaa
'Ye ', # 0xab
'Yi ', # 0xac
'E ', # 0xad
'Xian ', # 0xae
'Ji ', # 0xaf
'Xie ', # 0xb0
'Ke ', # 0xb1
'Xi ', # 0xb2
'Di ', # 0xb3
'Ao ', # 0xb4
'Zui ', # 0xb5
'[?] ', # 0xb6
'Ni ', # 0xb7
'Rong ', # 0xb8
'Dao ', # 0xb9
'Ling ', # 0xba
'Za ', # 0xbb
'Yu ', # 0xbc
'Yue ', # 0xbd
'Yin ', # 0xbe
'[?] ', # 0xbf
'Jie ', # 0xc0
'Li ', # 0xc1
'Sui ', # 0xc2
'Long ', # 0xc3
'Long ', # 0xc4
'Dian ', # 0xc5
'Ying ', # 0xc6
'Xi ', # 0xc7
'Ju ', # 0xc8
'Chan ', # 0xc9
'Ying ', # 0xca
'Kui ', # 0xcb
'Yan ', # 0xcc
'Wei ', # 0xcd
'Nao ', # 0xce
'Quan ', # 0xcf
'Chao ', # 0xd0
'Cuan ', # 0xd1
'Luan ', # 0xd2
'Dian ', # 0xd3
'Dian ', # 0xd4
'[?] ', # 0xd5
'Yan ', # 0xd6
'Yan ', # 0xd7
'Yan ', # 0xd8
'Nao ', # 0xd9
'Yan ', # 0xda
'Chuan ', # 0xdb
'Gui ', # 0xdc
'Chuan ', # 0xdd
'Zhou ', # 0xde
'Huang ', # 0xdf
'Jing ', # 0xe0
'Xun ', # 0xe1
'Chao ', # 0xe2
'Chao ', # 0xe3
'Lie ', # 0xe4
'Gong ', # 0xe5
'Zuo ', # 0xe6
'Qiao ', # 0xe7
'Ju ', # 0xe8
'Gong ', # 0xe9
'Kek ', # 0xea
'Wu ', # 0xeb
'Pwu ', # 0xec
'Pwu ', # 0xed
'Chai ', # 0xee
'Qiu ', # 0xef
'Qiu ', # 0xf0
'Ji ', # 0xf1
'Yi ', # 0xf2
'Si ', # 0xf3
'Ba ', # 0xf4
'Zhi ', # 0xf5
'Zhao ', # 0xf6
'Xiang ', # 0xf7
'Yi ', # 0xf8
'Jin ', # 0xf9
'Xun ', # 0xfa
'Juan ', # 0xfb
'Phas ', # 0xfc
'Xun ', # 0xfd
'Jin ', # 0xfe
'Fu ', # 0xff
)
| bsd-3-clause |
haxwithaxe/qutebrowser | tests/unit/misc/test_msgbox.py | 4 | 2768 | # Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.msgbox."""
import sys
import pytest
from qutebrowser.misc import msgbox
from PyQt5.QtCore import pyqtSlot, Qt
from PyQt5.QtWidgets import QMessageBox, QWidget
def test_attributes(qtbot):
"""Test basic QMessageBox attributes."""
title = 'title'
text = 'text'
parent = QWidget()
qtbot.add_widget(parent)
icon = QMessageBox.Critical
buttons = QMessageBox.Ok | QMessageBox.Cancel
box = msgbox.msgbox(parent=parent, title=title, text=text, icon=icon,
buttons=buttons)
qtbot.add_widget(box)
if sys.platform != 'darwin':
assert box.windowTitle() == title
assert box.icon() == icon
assert box.standardButtons() == buttons
assert box.text() == text
assert box.parent() is parent
@pytest.mark.parametrize('plain_text, expected', [
(True, Qt.PlainText),
(False, Qt.RichText),
(None, Qt.AutoText),
])
def test_plain_text(qtbot, plain_text, expected):
box = msgbox.msgbox(parent=None, title='foo', text='foo',
icon=QMessageBox.Information, plain_text=plain_text)
qtbot.add_widget(box)
assert box.textFormat() == expected
def test_finished_signal(qtbot):
"""Make sure we can pass a slot to be called when the dialog finished."""
signal_triggered = False
@pyqtSlot()
def on_finished():
nonlocal signal_triggered
signal_triggered = True
box = msgbox.msgbox(parent=None, title='foo', text='foo',
icon=QMessageBox.Information, on_finished=on_finished)
qtbot.add_widget(box)
with qtbot.waitSignal(box.finished):
box.accept()
assert signal_triggered
def test_information(qtbot):
box = msgbox.information(parent=None, title='foo', text='bar')
qtbot.add_widget(box)
if sys.platform != 'darwin':
assert box.windowTitle() == 'foo'
assert box.text() == 'bar'
assert box.icon() == QMessageBox.Information
| gpl-3.0 |
Daniel-CA/odoo | addons/l10n_be_intrastat/wizard/xml_decl.py | 32 | 17798 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import xml.etree.ElementTree as ET
from collections import namedtuple
from datetime import datetime
from openerp import exceptions, SUPERUSER_ID, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
INTRASTAT_XMLNS = 'http://www.onegate.eu/2010-01-01'
class xml_decl(osv.TransientModel):
"""
Intrastat XML Declaration
"""
_name = "l10n_be_intrastat_xml.xml_decl"
_description = 'Intrastat XML Declaration'
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id),
('parent_id', '=', False)],
context=context)
return tax_code_ids and tax_code_ids[0] or False
def _get_def_monthyear(self, cr, uid, context=None):
td = datetime.strptime(fields.date.context_today(self, cr, uid, context=context),
tools.DEFAULT_SERVER_DATE_FORMAT).date()
return td.strftime('%Y'), td.strftime('%m')
def _get_def_month(self, cr, uid, context=None):
return self._get_def_monthyear(cr, uid, context=context)[1]
def _get_def_year(self, cr, uid, context=None):
return self._get_def_monthyear(cr, uid, context=context)[0]
_columns = {
'name': fields.char('File Name'),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'),
('04','April'), ('05','May'), ('06','June'), ('07','July'),
('08','August'), ('09','September'), ('10','October'),
('11','November'), ('12','December')], 'Month', required=True),
'year': fields.char('Year', size=4, required=True),
'tax_code_id': fields.many2one('account.tax.code', 'Company Tax Chart',
domain=[('parent_id', '=', False)], required=True),
'arrivals': fields.selection([('be-exempt', 'Exempt'),
('be-standard', 'Standard'),
('be-extended', 'Extended')],
'Arrivals', required=True),
'dispatches': fields.selection([('be-exempt', 'Exempt'),
('be-standard', 'Standard'),
('be-extended', 'Extended')],
'Dispatches', required=True),
'file_save': fields.binary('Intrastat Report File', readonly=True),
'state': fields.selection([('draft', 'Draft'), ('download', 'Download')], string="State"),
}
_defaults = {
'arrivals': 'be-standard',
'dispatches': 'be-standard',
'name': 'intrastat.xml',
'tax_code_id': _get_tax_code,
'month': _get_def_month,
'year': _get_def_year,
'state': 'draft',
}
def _company_warning(self, cr, uid, translated_msg, context=None):
""" Raise a error with custom message, asking user to configure company settings """
xmlid_mod = self.pool['ir.model.data']
action_id = xmlid_mod.xmlid_to_res_id(cr, uid, 'base.action_res_company_form')
raise exceptions.RedirectWarning(
translated_msg, action_id, _('Go to company configuration screen'))
def create_xml(self, cr, uid, ids, context=None):
"""Creates xml that is to be exported and sent to estate for partner vat intra.
:return: Value for next action.
:rtype: dict
"""
decl_datas = self.browse(cr, uid, ids[0])
company = decl_datas.tax_code_id.company_id
if not (company.partner_id and company.partner_id.country_id and
company.partner_id.country_id.id):
self._company_warning(
cr, uid,
_('The country of your company is not set, '
'please make sure to configure it first.'),
context=context)
kbo = company.company_registry
if not kbo:
self._company_warning(
cr, uid,
_('The registry number of your company is not set, '
'please make sure to configure it first.'),
context=context)
if len(decl_datas.year) != 4:
raise exceptions.Warning(_('Year must be 4 digits number (YYYY)'))
#Create root declaration
decl = ET.Element('DeclarationReport')
decl.set('xmlns', INTRASTAT_XMLNS)
#Add Administration elements
admin = ET.SubElement(decl, 'Administration')
fromtag = ET.SubElement(admin, 'From')
fromtag.text = kbo
fromtag.set('declarerType', 'KBO')
ET.SubElement(admin, 'To').text = "NBB"
ET.SubElement(admin, 'Domain').text = "SXX"
if decl_datas.arrivals == 'be-standard':
decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
dispatchmode=False, extendedmode=False, context=context))
elif decl_datas.arrivals == 'be-extended':
decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
dispatchmode=False, extendedmode=True, context=context))
if decl_datas.dispatches == 'be-standard':
decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
dispatchmode=True, extendedmode=False, context=context))
elif decl_datas.dispatches == 'be-extended':
decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
dispatchmode=True, extendedmode=True, context=context))
#Get xml string with declaration
data_file = ET.tostring(decl, encoding='UTF-8', method='xml')
#change state of the wizard
self.write(cr, uid, ids,
{'name': 'intrastat_%s%s.xml' % (decl_datas.year, decl_datas.month),
'file_save': base64.encodestring(data_file),
'state': 'download'},
context=context)
return {
'name': _('Save'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'l10n_be_intrastat_xml.xml_decl',
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': ids[0],
}
def _get_lines(self, cr, uid, ids, decl_datas, company, dispatchmode=False,
extendedmode=False, context=None):
intrastatcode_mod = self.pool['report.intrastat.code']
invoiceline_mod = self.pool['account.invoice.line']
product_mod = self.pool['product.product']
region_mod = self.pool['l10n_be_intrastat.region']
warehouse_mod = self.pool['stock.warehouse']
if dispatchmode:
mode1 = 'out_invoice'
mode2 = 'in_refund'
declcode = "29"
else:
mode1 = 'in_invoice'
mode2 = 'out_refund'
declcode = "19"
decl = ET.Element('Report')
if not extendedmode:
decl.set('code', 'EX%sS' % declcode)
else:
decl.set('code', 'EX%sE' % declcode)
decl.set('date', '%s-%s' % (decl_datas.year, decl_datas.month))
datas = ET.SubElement(decl, 'Data')
if not extendedmode:
datas.set('form', 'EXF%sS' % declcode)
else:
datas.set('form', 'EXF%sE' % declcode)
datas.set('close', 'true')
intrastatkey = namedtuple("intrastatkey",
['EXTRF', 'EXCNT', 'EXTTA', 'EXREG',
'EXGO', 'EXTPC', 'EXDELTRM'])
entries = {}
sqlreq = """
select
inv_line.id
from
account_invoice_line inv_line
join account_invoice inv on inv_line.invoice_id=inv.id
left join res_country on res_country.id = inv.intrastat_country_id
left join res_partner on res_partner.id = inv.partner_id
left join res_country countrypartner on countrypartner.id = res_partner.country_id
join product_product on inv_line.product_id=product_product.id
join product_template on product_product.product_tmpl_id=product_template.id
left join account_period on account_period.id=inv.period_id
where
inv.state in ('open','paid')
and inv.company_id=%s
and not product_template.type='service'
and (res_country.intrastat=true or (inv.intrastat_country_id is null
and countrypartner.intrastat=true))
and ((res_country.code is not null and not res_country.code=%s)
or (res_country.code is null and countrypartner.code is not null
and not countrypartner.code=%s))
and inv.type in (%s, %s)
and to_char(account_period.date_start, 'YYYY')=%s
and to_char(account_period.date_start, 'MM')=%s
"""
cr.execute(sqlreq, (company.id, company.partner_id.country_id.code,
company.partner_id.country_id.code, mode1, mode2,
decl_datas.year, decl_datas.month))
lines = cr.fetchall()
invoicelines_ids = [rec[0] for rec in lines]
invoicelines = invoiceline_mod.browse(cr, uid, invoicelines_ids, context=context)
for inv_line in invoicelines:
#Check type of transaction
if inv_line.invoice_id.intrastat_transaction_id:
extta = inv_line.invoice_id.intrastat_transaction_id.code
else:
extta = "1"
#Check country
if inv_line.invoice_id.intrastat_country_id:
excnt = inv_line.invoice_id.intrastat_country_id.code
else:
excnt = inv_line.invoice_id.partner_id.country_id.code
#Check region
#If purchase, comes from purchase order, linked to a location,
#which is linked to the warehouse
#if sales, the sale order is linked to the warehouse
#if sales, from a delivery order, linked to a location,
#which is linked to the warehouse
#If none found, get the company one.
exreg = None
if inv_line.invoice_id.type in ('in_invoice', 'in_refund'):
#comes from purchase
POL = self.pool['purchase.order.line']
poline_ids = POL.search(
cr, uid, [('invoice_lines', 'in', inv_line.id)], context=context)
if poline_ids:
purchaseorder = POL.browse(cr, uid, poline_ids[0], context=context).order_id
region_id = warehouse_mod.get_regionid_from_locationid(
cr, uid, purchaseorder.location_id.id, context=context)
if region_id:
exreg = region_mod.browse(cr, uid, region_id).code
elif inv_line.invoice_id.type in ('out_invoice', 'out_refund'):
#comes from sales
soline_ids = self.pool['sale.order.line'].search(
cr, uid, [('invoice_lines', 'in', inv_line.id)], context=context)
if soline_ids:
saleorder = self.pool['sale.order.line'].browse(
cr, uid, soline_ids[0], context=context).order_id
if saleorder and saleorder.warehouse_id and saleorder.warehouse_id.region_id:
exreg = region_mod.browse(
cr, uid, saleorder.warehouse_id.region_id.id, context=context).code
if not exreg:
if company.region_id:
exreg = company.region_id.code
else:
self._company_warning(
cr, uid,
_('The Intrastat Region of the selected company is not set, '
'please make sure to configure it first.'),
context=context)
#Check commodity codes
intrastat_id = product_mod.get_intrastat_recursively(
cr, uid, inv_line.product_id.id, context=context)
if intrastat_id:
exgo = intrastatcode_mod.browse(cr, uid, intrastat_id, context=context).name
else:
raise exceptions.Warning(
_('Product "%s" has no intrastat code, please configure it') %
inv_line.product_id.display_name)
#In extended mode, 2 more fields required
if extendedmode:
#Check means of transport
if inv_line.invoice_id.transport_mode_id:
extpc = inv_line.invoice_id.transport_mode_id.code
elif company.transport_mode_id:
extpc = company.transport_mode_id.code
else:
self._company_warning(
cr, uid,
_('The default Intrastat transport mode of your company '
'is not set, please make sure to configure it first.'),
context=context)
#Check incoterm
if inv_line.invoice_id.incoterm_id:
exdeltrm = inv_line.invoice_id.incoterm_id.code
elif company.incoterm_id:
exdeltrm = company.incoterm_id.code
else:
self._company_warning(
cr, uid,
_('The default Incoterm of your company is not set, '
'please make sure to configure it first.'),
context=context)
else:
extpc = ""
exdeltrm = ""
linekey = intrastatkey(EXTRF=declcode, EXCNT=excnt,
EXTTA=extta, EXREG=exreg, EXGO=exgo,
EXTPC=extpc, EXDELTRM=exdeltrm)
#We have the key
#calculate amounts
if inv_line.price_unit and inv_line.quantity:
amount = inv_line.price_unit * inv_line.quantity
else:
amount = 0
weight = (inv_line.product_id.weight_net or 0.0) * \
self.pool.get('product.uom')._compute_qty(cr, uid, inv_line.uos_id.id, inv_line.quantity, inv_line.product_id.uom_id.id)
if (not inv_line.uos_id.category_id or not inv_line.product_id.uom_id.category_id
or inv_line.uos_id.category_id.id != inv_line.product_id.uom_id.category_id.id):
supply_units = inv_line.quantity
else:
supply_units = inv_line.quantity * inv_line.uos_id.factor
amounts = entries.setdefault(linekey, (0, 0, 0))
amounts = (amounts[0] + amount, amounts[1] + weight, amounts[2] + supply_units)
entries[linekey] = amounts
numlgn = 0
for linekey in entries:
amounts = entries[linekey]
if round(amounts[0], 0) == 0:
continue
numlgn += 1
item = ET.SubElement(datas, 'Item')
self._set_Dim(item, 'EXSEQCODE', unicode(numlgn))
self._set_Dim(item, 'EXTRF', unicode(linekey.EXTRF))
self._set_Dim(item, 'EXCNT', unicode(linekey.EXCNT))
self._set_Dim(item, 'EXTTA', unicode(linekey.EXTTA))
self._set_Dim(item, 'EXREG', unicode(linekey.EXREG))
self._set_Dim(item, 'EXTGO', unicode(linekey.EXGO))
if extendedmode:
self._set_Dim(item, 'EXTPC', unicode(linekey.EXTPC))
self._set_Dim(item, 'EXDELTRM', unicode(linekey.EXDELTRM))
self._set_Dim(item, 'EXTXVAL', unicode(round(amounts[0], 0)).replace(".", ","))
self._set_Dim(item, 'EXWEIGHT', unicode(round(amounts[1], 0)).replace(".", ","))
self._set_Dim(item, 'EXUNITS', unicode(round(amounts[2], 0)).replace(".", ","))
if numlgn == 0:
#no datas
datas.set('action', 'nihil')
return decl
def _set_Dim(self, item, prop, value):
dim = ET.SubElement(item, 'Dim')
dim.set('prop', prop)
dim.text = value
| agpl-3.0 |
Ahmad31/Web_Flask_Cassandra | flask/lib/python2.7/site-packages/pony/orm/tests/test_lazy.py | 2 | 2091 | from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
class TestLazy(unittest.TestCase):
def setUp(self):
self.db = Database('sqlite', ':memory:')
class X(self.db.Entity):
a = Required(int)
b = Required(unicode, lazy=True)
self.X = X
self.db.generate_mapping(create_tables=True)
with db_session:
x1 = X(a=1, b='first')
x2 = X(a=2, b='second')
x3 = X(a=3, b='third')
@db_session
def test_lazy_1(self):
X = self.X
x1 = X[1]
self.assertTrue(X.a in x1._vals_)
self.assertTrue(X.b not in x1._vals_)
b = x1.b
self.assertEqual(b, 'first')
@db_session
def test_lazy_2(self):
X = self.X
x1 = X[1]
x2 = X[2]
x3 = X[3]
self.assertTrue(X.b not in x1._vals_)
self.assertTrue(X.b not in x2._vals_)
self.assertTrue(X.b not in x3._vals_)
b = x1.b
self.assertTrue(X.b in x1._vals_)
self.assertTrue(X.b not in x2._vals_)
self.assertTrue(X.b not in x3._vals_)
@db_session
def test_lazy_3(self): # coverage of https://github.com/ponyorm/pony/issues/49
X = self.X
x1 = X.get(b='first')
self.assertTrue(X._bits_[X.b] & x1._rbits_)
self.assertTrue(X.b, x1._vals_)
@db_session
def test_lazy_4(self): # coverage of https://github.com/ponyorm/pony/issues/49
X = self.X
result = select(x for x in X if x.b == 'first')[:]
for x in result:
self.assertTrue(X._bits_[X.b] & x._rbits_)
self.assertTrue(X.b in x._vals_)
@db_session
def test_lazy_5(self): # coverage of https://github.com/ponyorm/pony/issues/49
X = self.X
result = select(x for x in X if x.b == 'first' if count() > 0)[:]
for x in result:
self.assertFalse(X._bits_[X.b] & x._rbits_)
self.assertTrue(X.b not in x._vals_)
| apache-2.0 |
Bolton-and-Menk-GIS/restapi | restapi/admin/utils.py | 1 | 14423 | from __future__ import print_function
from .. import admin, has_arcpy, munch
from ..rest_utils import JsonGetter, NameEncoder
import os
import json
from six.moves import range
__all__ = ['ServerAdministrator']
if has_arcpy:
import arcpy
mapping = getattr(arcpy, 'mapping' if hasattr(arcpy, 'mapping') else '_mp')
layer_types = (mapping.Layer, getattr(mapping, 'TableView' if hasattr(mapping, 'TableView') else 'Table'))
class AdiminstratorBase(object):
"""Admin base class."""
@staticmethod
def find_ws(path, ws_type='', return_type=False):
"""Finds a valid workspace path for an arcpy.da.Editor() Session.
Args:
path: Path to features or workspace.
ws_type: Option to find specific workspace type
(FileSystem|LocalDatabase|RemoteDatabase). Defaults to ''.
return_type: Optional boolean to return workspace type as well.
If this option is selected, a tuple of the full workspace
path and type are returned.
Returns:
A valid workspace path.
"""
def find_existing(path):
"""Returns an existing path, if one is found."""
if arcpy.Exists(path):
return path
else:
if not arcpy.Exists(path):
return find_existing(os.path.dirname(path))
# try original path first
if isinstance(path, layer_types):
path = path.dataSource
if os.sep not in str(path):
if hasattr(path, 'dataSource'):
path = path.dataSource
else:
path = arcpy.Describe(path).catalogPath
path = find_existing(path)
desc = arcpy.Describe(path)
if hasattr(desc, 'workspaceType'):
if ws_type == desc.workspaceType:
if return_type:
return (path, desc.workspaceType)
else:
return path
else:
if return_type:
return (path, desc.workspaceType)
else:
return path
# search until finding a valid workspace
path = str(path)
split = filter(None, str(path).split(os.sep))
if path.startswith('\\\\'):
split[0] = r'\\{0}'.format(split[0])
# find valid workspace
for i in range(1, len(split)):
sub_dir = os.sep.join(split[:-i])
desc = arcpy.Describe(sub_dir)
if hasattr(desc, 'workspaceType'):
if ws_type == desc.workspaceType:
if return_type:
return (sub_dir, desc.workspaceType)
else:
return sub_dir
else:
if return_type:
return (sub_dir, desc.workspaceType)
else:
return sub_dir
@staticmethod
def form_connection_string(ws):
"""Esri's describe workspace connection string does not work at 10.4, bug???"""
desc = arcpy.Describe(ws)
if 'SdeWorkspaceFactory' in desc.workspaceFactoryProgID:
cp = desc.connectionProperties
props = ['server', 'instance', 'database', 'version', 'authentication_mode']
db_client = cp.instance.split(':')[1]
con_properties = cp.server
parts = []
for prop in props:
parts.append('{}={}'.format(prop.upper(), getattr(cp, prop)))
parts.insert(2, 'DBCLIENT={}'.format(db_client))
parts.insert(3, 'DB_CONNECTION_PROPERTIES={}'.format(cp.server))
return ';'.join(parts)
else:
return 'DATABASE=' + ws
def stopServiceAndCompressDatabase(self, sde_loc, service_url_or_name):
"""Stops a service and compresses all SDE databases within the map
service.
Args:
sde_loc: Location containing .sde connections.
service_url_or_name: Full path to REST endpoint or service name.
Returns:
A list of the workspaces, if found.
"""
service = self.ags.service(service_url_or_name)
workspaces = []
manifest = service.manifest()
if hasattr(manifest, 'databases'):
for db in manifest.databases:
# read layer xmls to find all workspaces
dbType = db.onServerWorkspaceFactoryProgID
if 'SdeWorkspaceFactory' in dbType:
cs = db.onServerConnectionString or db.onPremiseConnectionString
db_name = {k:v for k, v in iter(s.split('=') for s in cs.split(';'))}['DATABASE']
sde = os.path.join(sde_loc, db_name + '.sde')
workspaces.append(sde)
if workspaces:
# stop service
service.stop()
self.__stopped_services.append(service)
print('Stopped Service...\n')
# compress databases
for ws in workspaces:
arcpy.management.Compress(ws)
# start service
service.start()
self.__started_services.append(service)
print('\nStarted Service')
return workspaces
else:
class AdiminstratorBase(object):
@staticmethod
def find_ws(path, ws_type='', return_type=False):
"""Finds a valid workspace path for an arcpy.da.Editor() Session.
Args:
path: Path to features or workspace.
Args:*
ws_type: Option to find specific workspace type
(FileSystem|LocalDatabase|RemoteDatabase). Defaults to ''.
return_type: Optional boolean to return workspace type as well.
If this option is True, a tuple of the full workspace
path and type are returned. Defaults to False.
Returns:
A valid workspace.
"""
if os.path.splitext(path)[1] in ('.gdb', '.mdb', '.sde') and ws_type != 'FileSystem':
if return_type:
return (path, 'RemoteDatabase' if os.path.splitext(path)[1] == '.sde' else 'LocalDatabase')
return path
elif os.path.isdir(path):
if return_type:
return (path, 'FileSystem')
return path
elif os.path.isfile(path):
return find_ws(os.path.dirname(path))
@staticmethod
def form_connection_string(ws):
"""Forms connection string by parsing .sde connection files."""
if ws.endswith('.sde'):
with open(ws, 'rb') as f:
data = f.read()
datastr = data.replace('\x00','')
server = datastr.split('SERVER\x08\x0e')[1].split('\x12')[0]
instance = datastr.split('INSTANCE\x08*')[1].split('\x12')[0]
dbclient = ''.join(s for s in datastr.split('DBCLIENT\x08\x14')[1].split('DB_CONNECTION')[0] if s.isalpha())
db_connection_properties = datastr.split('DB_CONNECTION_PROPERTIES\x08\x0e')[1].split('\x12')[0]
database = datastr.split('DATABASE\x08\x16')[1].split('\x1e')[0]
version = datastr.split('VERSION\x08\x18')[1].split('\x1a')[0]
authentication_mode = datastr.split('AUTHENTICATION_MODE\x08\x08')[1].split('\x10')[0]
parts = [server, instance, dbclient, db_connection_properties, database, version, authentication_mode]
props = ['SERVER', 'INSTANCE', 'DBCLIENT', 'DB_CONNECTION_PROPERTIES', 'DATABASE', 'VERSION', 'AUTHENTICATION_MODE']
return ';'.join(map(lambda p: '{}={}'.format(*p), zip(props, parts)))
else:
return 'DATABASE=' + ws
def stopServiceAndCompressDatabase(self, sde_loc, service_url_or_name):
"""Stops service and compresses all associated databases.
Raises:
NotImplementedError: 'No access to the Arcpy Module!'
"""
raise NotImplementedError('No access to the Arcpy Module!')
class MunchEncoder(munch.Munch):
"""Class that encodes Munch objects."""
def __repr__(self):
return json.dumps(self, indent=2, cls=NameEncoder)
def __str__(self):
return self.__repr__()
class ServerResources(JsonGetter):
"""Class that handles server resources.
Attribute:
json: JSON of resources.
"""
def __init__(self, json):
self.json = MunchEncoder(json)
def __repr__(self):
return json.dumps(self.json, indent=2, cls=NameEncoder)
def __str__(self):
return self.__repr__()
class ServerAdministrator(AdiminstratorBase):
"""Class for server admin."""
def __init__(self, server_url, usr='', pw='', token=''):
"""Inits class with server info.
Args:
server_url: URL for server.
usr: Username.
pw: Password.
token: Token for URL.
"""
self.ags = admin.ArcServerAdmin(server_url, usr, pw, token)
self.__stopped_services = []
self.__started_services = []
@staticmethod
def test_connection_string(string1, string2, match_version=False):
"""Tests if a database has the same instance and name.
Args:
string1: One string to test against.
string2: Other string to test against.
match_version (bool): option to make sure the schema versions match, default is False.
Returns:
The combined string from string1 and string2 with instance and name.
"""
db_props1 = {k:v for k, v in iter(s.split('=') for s in string1.split(';'))}
db_props2 = {k:v for k, v in iter(s.split('=') for s in string2.split(';'))}
db_info1 = ';'.join(filter(None, [db_props1.get('DATABASE'), db_props1.get('INSTANCE','NULL'), db_props1.get('VERSION') if match_version else None]))
db_info2 = ';'.join(filter(None, [db_props2.get('DATABASE'), db_props2.get('INSTANCE','NULL'), db_props2.get('VERSION') if match_version else None]))
return db_info1 == db_info2
def find_services_containing(self, ws, fcs=[], stop=False, match_version=False):
"""Finds services containing an entire workspace and any specific feature classes.
Args:
ws: SDE workspace path.
fcs: List of specific feature classes to search for.
stop: Optional boolean, stops service once item is found if True.
Default is False.
match_version (bool): option to only return services where the schema version matches the schema version of the target database, default is False.
Returns:
The services that were found.
"""
ws = self.find_ws(ws)
con_str = self.form_connection_string(ws)
service_map = {'workspace': [], 'feature_classes': {}}
toStop = []
for fc in fcs:
service_map['feature_classes'][fc.split('.')[-1]] = []
# iterate through services and find matching workspace/layers
for service in self.ags.iter_services():
if hasattr(service, 'type') and service.type == 'MapServer':
# feature servers have map servers too
manifest = service.manifest()
if hasattr(manifest, 'databases'):
for db in manifest.databases:
# iterate through all layers to find workspaces/fc's
if self.test_connection_string(con_str, db.onServerConnectionString, match_version) or self.test_connection_string(con_str, db.onPremiseConnectionString, match_version):
service_map['workspace'].append(MunchEncoder({
'name': service.serviceName,
'service': service
}))
if service not in toStop:
toStop.append(service)
# check for specific feature classes
for ds in db.datasets:
lyr_name = ds.onServerName
if lyr_name in service_map['feature_classes']:
service_map['feature_classes'][lyr_name].append(MunchEncoder({
'name': service.serviceName,
'service': service
}))
if service not in toStop:
toStop.append(service)
if stop:
for service in toStop:
service.stop()
print('Stopped service: "{}"'.format(service.serviceName))
self.__stopped_services.append(service)
return ServerResources(service_map)
def startStoppedServices(self):
"""Starts all stopped services that are in this instances cache, meaning
those that have been stopped from this instance.
"""
for s in self.__stopped_services:
s.start()
print('Started service: "{}"'.format(s.serviceName))
self.__stopped_services.remove(s)
self.__started_services.append(s) | gpl-2.0 |
zhexiao/estory | vendor/bower-asset/bootstrap/test-infra/s3_cache.py | 2166 | 5734 | #!/usr/bin/env python2.7
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024
@contextmanager
def timer():
start = datetime.utcnow()
yield
end = datetime.utcnow()
elapsed = end - start
print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")
@contextmanager
def todo_file(writeback=True):
try:
with open(UPLOAD_TODO_FILE, 'rt') as json_file:
todo = load(json_file)
except (IOError, OSError, ValueError):
todo = {}
yield todo
if writeback:
try:
with open(UPLOAD_TODO_FILE, 'wt') as json_file:
save(todo, json_file)
except (OSError, IOError) as save_err:
print("Error saving {}:".format(UPLOAD_TODO_FILE), save_err)
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def mark_needs_uploading(cache_name):
with todo_file() as todo:
todo[cache_name] = True
def mark_uploaded(cache_name):
with todo_file() as todo:
todo.pop(cache_name, None)
def need_to_upload(cache_name):
with todo_file(writeback=False) as todo:
return todo.get(cache_name, False)
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
with timer():
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
with timer():
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
mark_uploaded(cache_name) # reset
try:
print("Downloading {} tarball from S3...".format(cache_name))
with timer():
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
mark_needs_uploading(cache_name)
raise SystemExit("Cached {} download failed!".format(cache_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(cache_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(cache_name, _tarball_size(directory)))
with timer():
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(cache_name))
mark_uploaded(cache_name)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 2:
raise SystemExit("USAGE: s3_cache.py <download | upload> <cache name>")
mode, cache_name = argv
script_dir = dirname(realpath(__file__))
chdir(script_dir)
try:
with open(CONFIG_FILE, 'rt') as config_file:
config = load(config_file)
except (IOError, OSError, ValueError) as config_err:
print(config_err)
raise SystemExit("Error when trying to load config from JSON file!")
try:
cache_info = config[cache_name]
key_file = expandvars(cache_info["key"])
fallback_cmd = cache_info["generate"]
directory = expandvars(cache_info["cache"])
except (TypeError, KeyError) as load_err:
print(load_err)
raise SystemExit("Config for cache named {!r} is missing or malformed!".format(cache_name))
try:
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME)
if bucket is None:
raise SystemExit("Could not access bucket!")
key_file_hash = _sha256_of_file(key_file)
key = Key(bucket, key_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if need_to_upload(cache_name):
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
except BaseException as exc:
if mode != 'download':
raise
print("Error!:", exc)
print("Unable to download from cache.")
print("Running fallback command to generate cache directory {!r}: {}".format(directory, fallback_cmd))
with timer():
run(fallback_cmd, shell=True)
| apache-2.0 |
libos-nuse/net-next-nuse | tools/perf/scripts/python/net_dropmonitor.py | 1812 | 1749 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
dropbox/changes | tests/changes/api/test_plan_options.py | 1 | 1754 | from changes.config import db
from changes.models.option import ItemOption
from changes.testutils import APITestCase
class PlanOptionsListTest(APITestCase):
def test_simple(self):
project = self.create_project()
plan = self.create_plan(project)
path = '/api/0/plans/{0}/options/'.format(plan.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['build.expect-tests'] == '0'
db.session.add(ItemOption(
name='build.expect-tests',
value='1',
item_id=plan.id,
))
db.session.commit()
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['build.expect-tests'] == '1'
class PlanOptionsUpdateTest(APITestCase):
def test_simple(self):
project = self.create_project()
plan = self.create_plan(project)
path = '/api/0/plans/{0}/options/'.format(plan.id.hex)
resp = self.client.post(path, data={
'build.expect-tests': '1',
})
assert resp.status_code == 401
self.login_default()
resp = self.client.post(path, data={
'build.expect-tests': '1',
})
assert resp.status_code == 403
self.create_and_login_project_admin([project.slug])
resp = self.client.post(path, data={
'build.expect-tests': '1',
})
assert resp.status_code == 200
options = dict(db.session.query(
ItemOption.name, ItemOption.value
).filter(
ItemOption.item_id == plan.id,
))
assert options.get('build.expect-tests') == '1'
| apache-2.0 |
kevinthesun/mxnet | example/neural-style/end_to_end/model_vgg19.py | 43 | 6553 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import os, sys
from collections import namedtuple
ConvExecutor = namedtuple('ConvExecutor', ['executor', 'data', 'data_grad', 'style', 'content', 'arg_dict'])
def get_vgg_symbol(prefix, content_only=False):
# declare symbol
data = mx.sym.Variable("%s_data" % prefix)
conv1_1 = mx.symbol.Convolution(name='%s_conv1_1' % prefix, data=data , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu1_1 = mx.symbol.Activation(data=conv1_1 , act_type='relu')
conv1_2 = mx.symbol.Convolution(name='%s_conv1_2' % prefix, data=relu1_1 , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu1_2 = mx.symbol.Activation(data=conv1_2 , act_type='relu')
pool1 = mx.symbol.Pooling(data=relu1_2 , pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
conv2_1 = mx.symbol.Convolution(name='%s_conv2_1' % prefix, data=pool1 , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu2_1 = mx.symbol.Activation(data=conv2_1 , act_type='relu')
conv2_2 = mx.symbol.Convolution(name='%s_conv2_2' % prefix, data=relu2_1 , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu2_2 = mx.symbol.Activation(data=conv2_2 , act_type='relu')
pool2 = mx.symbol.Pooling(data=relu2_2 , pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
conv3_1 = mx.symbol.Convolution(name='%s_conv3_1' % prefix, data=pool2 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu3_1 = mx.symbol.Activation(data=conv3_1 , act_type='relu')
conv3_2 = mx.symbol.Convolution(name='%s_conv3_2' % prefix, data=relu3_1 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu3_2 = mx.symbol.Activation(data=conv3_2 , act_type='relu')
conv3_3 = mx.symbol.Convolution(name='%s_conv3_3' % prefix, data=relu3_2 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu3_3 = mx.symbol.Activation(data=conv3_3 , act_type='relu')
conv3_4 = mx.symbol.Convolution(name='%s_conv3_4' % prefix, data=relu3_3 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu3_4 = mx.symbol.Activation(data=conv3_4 , act_type='relu')
pool3 = mx.symbol.Pooling(data=relu3_4 , pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
conv4_1 = mx.symbol.Convolution(name='%s_conv4_1' % prefix, data=pool3 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu4_1 = mx.symbol.Activation(data=conv4_1 , act_type='relu')
conv4_2 = mx.symbol.Convolution(name='%s_conv4_2' % prefix, data=relu4_1 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu4_2 = mx.symbol.Activation(data=conv4_2 , act_type='relu')
conv4_3 = mx.symbol.Convolution(name='%s_conv4_3' % prefix, data=relu4_2 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu4_3 = mx.symbol.Activation(data=conv4_3 , act_type='relu')
conv4_4 = mx.symbol.Convolution(name='%s_conv4_4' % prefix, data=relu4_3 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu4_4 = mx.symbol.Activation(data=conv4_4 , act_type='relu')
pool4 = mx.symbol.Pooling(data=relu4_4 , pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
conv5_1 = mx.symbol.Convolution(name='%s_conv5_1' % prefix, data=pool4 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), workspace=1024)
relu5_1 = mx.symbol.Activation(data=conv5_1 , act_type='relu')
if content_only:
return relu4_2
# style and content layers
style = mx.sym.Group([relu1_1, relu2_1, relu3_1, relu4_1, relu5_1])
content = mx.sym.Group([relu4_2])
return style, content
def get_executor_with_style(style, content, input_size, ctx):
out = mx.sym.Group([style, content])
# make executor
arg_shapes, output_shapes, aux_shapes = out.infer_shape(data=(1, 3, input_size[0], input_size[1]))
arg_names = out.list_arguments()
arg_dict = dict(zip(arg_names, [mx.nd.zeros(shape, ctx=ctx) for shape in arg_shapes]))
grad_dict = {"data": arg_dict["data"].copyto(ctx)}
# init with pretrained weight
pretrained = mx.nd.load("./model/vgg19.params")
for name in arg_names:
if name == "data":
continue
key = "arg:" + name
if key in pretrained:
pretrained[key].copyto(arg_dict[name])
else:
print("Skip argument %s" % name)
executor = out.bind(ctx=ctx, args=arg_dict, args_grad=grad_dict, grad_req="write")
return ConvExecutor(executor=executor,
data=arg_dict["data"],
data_grad=grad_dict["data"],
style=executor.outputs[:-1],
content=executor.outputs[-1],
arg_dict=arg_dict)
def get_executor_content(content, input_size, ctx):
arg_shapes, output_shapes, aux_shapes = content.infer_shape(data=(1, 3, input_size[0], input_size[1]))
arg_names = out.list_arguments()
arg_dict = dict(zip(arg_names, [mx.nd.zeros(shape, ctx=ctx) for shape in arg_shapes]))
pretrained = mx.nd.load("./model/vgg19.params")
for name in arg_names:
if name == "data":
continue
key = "arg:" + name
if key in pretrained:
pretrained[key].copyto(arg_dict[name])
else:
print("Skip argument %s" % name)
executor = out.bind(ctx=ctx, args=arg_dict, args_grad=[], grad_req="null")
return ConvExecutor(executor=executor,
data=arg_dict["data"],
data_grad=None,
style=None,
content=executor.outputs[0],
arg_dict=arg_dict)
| apache-2.0 |
endlessm/chromium-browser | third_party/llvm/llvm/utils/extract_vplan.py | 29 | 1614 | #!/usr/bin/env python
# This script extracts the VPlan digraphs from the vectoriser debug messages
# and saves them in individual dot files (one for each plan). Optionally, and
# providing 'dot' is installed, it can also render the dot into a PNG file.
from __future__ import print_function
import sys
import re
import argparse
import shutil
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('--png', action='store_true')
args = parser.parse_args()
dot = shutil.which('dot')
if args.png and not dot:
raise RuntimeError("Can't export to PNG without 'dot' in the system")
pattern = re.compile(r"(digraph VPlan {.*?\n})",re.DOTALL)
matches = re.findall(pattern, sys.stdin.read())
for vplan in matches:
m = re.search("graph \[.+(VF=.+,UF.+), ", vplan)
if not m:
raise ValueError("Can't get the right VPlan name")
name = re.sub('[^a-zA-Z0-9]', '', m.group(1))
if args.png:
filename = 'VPlan' + name + '.png'
print("Exporting " + name + " to PNG via dot: " + filename)
p = subprocess.Popen([dot, '-Tpng', '-o', filename],
encoding='utf-8',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(input=vplan)
if err:
raise RuntimeError("Error running dot: " + err)
else:
filename = 'VPlan' + name + '.dot'
print("Exporting " + name + " to DOT: " + filename)
with open(filename, 'w') as out:
out.write(vplan)
| bsd-3-clause |
yury-s/v8-inspector | Source/chrome/tools/telemetry/telemetry/core/platform/android_platform_backend.py | 2 | 27874 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import shutil
import subprocess
import tempfile
from telemetry.core.backends import adb_commands
from telemetry.core import exceptions
from telemetry.core.forwarders import android_forwarder
from telemetry.core import platform
from telemetry.core.platform import android_device
from telemetry.core.platform import android_platform
from telemetry.core.platform import linux_based_platform_backend
from telemetry.core.platform.power_monitor import android_ds2784_power_monitor
from telemetry.core.platform.power_monitor import android_dumpsys_power_monitor
from telemetry.core.platform.power_monitor import android_temperature_monitor
from telemetry.core.platform.power_monitor import monsoon_power_monitor
from telemetry.core.platform.power_monitor import power_monitor_controller
from telemetry.core.platform.profiler import android_prebuilt_profiler_helper
from telemetry.core import util
from telemetry.core import video
from telemetry import decorators
from telemetry.util import exception_formatter
from telemetry.util import external_modules
psutil = external_modules.ImportOptionalModule('psutil')
util.AddDirToPythonPath(util.GetChromiumSrcDir(),
'third_party', 'webpagereplay')
import adb_install_cert # pylint: disable=import-error
import certutils # pylint: disable=import-error
import platformsettings # pylint: disable=import-error
# Get build/android scripts into our path.
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib import constants # pylint: disable=import-error
from pylib import screenshot # pylint: disable=import-error
from pylib.device import battery_utils # pylint: disable=import-error
from pylib.device import device_errors # pylint: disable=import-error
from pylib.perf import cache_control # pylint: disable=import-error
from pylib.perf import perf_control # pylint: disable=import-error
from pylib.perf import thermal_throttle # pylint: disable=import-error
from pylib.utils import device_temp_file # pylint: disable=import-error
try:
from pylib.perf import surface_stats_collector # pylint: disable=import-error
except Exception:
surface_stats_collector = None
_DEVICE_COPY_SCRIPT_FILE = os.path.join(
constants.DIR_SOURCE_ROOT, 'build', 'android', 'pylib',
'efficient_android_directory_copy.sh')
_DEVICE_COPY_SCRIPT_LOCATION = (
'/data/local/tmp/efficient_android_directory_copy.sh')
class AndroidPlatformBackend(
linux_based_platform_backend.LinuxBasedPlatformBackend):
def __init__(self, device, finder_options):
assert device, (
'AndroidPlatformBackend can only be initialized from remote device')
super(AndroidPlatformBackend, self).__init__(device)
self._adb = adb_commands.AdbCommands(device=device.device_id)
installed_prebuilt_tools = adb_commands.SetupPrebuiltTools(self._adb)
if not installed_prebuilt_tools:
logging.error(
'%s detected, however prebuilt android tools could not '
'be used. To run on Android you must build them first:\n'
' $ ninja -C out/Release android_tools' % device.name)
raise exceptions.PlatformError()
# Trying to root the device, if possible.
if not self._adb.IsRootEnabled():
# Ignore result.
self._adb.EnableAdbRoot()
self._device = self._adb.device()
self._battery = battery_utils.BatteryUtils(self._device)
self._enable_performance_mode = device.enable_performance_mode
self._surface_stats_collector = None
self._perf_tests_setup = perf_control.PerfControl(self._device)
self._thermal_throttle = thermal_throttle.ThermalThrottle(self._device)
self._raw_display_frame_rate_measurements = []
self._can_access_protected_file_contents = (
self._device.HasRoot() or self._device.NeedsSU())
self._device_copy_script = None
power_controller = power_monitor_controller.PowerMonitorController([
monsoon_power_monitor.MonsoonPowerMonitor(self._device, self),
android_ds2784_power_monitor.DS2784PowerMonitor(self._device, self),
android_dumpsys_power_monitor.DumpsysPowerMonitor(self._battery, self),
])
self._power_monitor = android_temperature_monitor.AndroidTemperatureMonitor(
power_controller, self._device)
self._video_recorder = None
self._installed_applications = None
self._wpr_ca_cert_path = None
self._device_cert_util = None
self._is_test_ca_installed = False
self._use_rndis_forwarder = (
finder_options.android_rndis or
finder_options.browser_options.netsim or
platform.GetHostPlatform().GetOSName() != 'linux')
_FixPossibleAdbInstability()
@classmethod
def SupportsDevice(cls, device):
return isinstance(device, android_device.AndroidDevice)
@classmethod
def CreatePlatformForDevice(cls, device, finder_options):
assert cls.SupportsDevice(device)
platform_backend = AndroidPlatformBackend(device, finder_options)
return android_platform.AndroidPlatform(platform_backend)
@property
def forwarder_factory(self):
if not self._forwarder_factory:
self._forwarder_factory = android_forwarder.AndroidForwarderFactory(
self._adb, self._use_rndis_forwarder)
return self._forwarder_factory
@property
def use_rndis_forwarder(self):
return self._use_rndis_forwarder
@property
def adb(self):
return self._adb
def IsDisplayTracingSupported(self):
return bool(self.GetOSVersionName() >= 'J')
def StartDisplayTracing(self):
assert not self._surface_stats_collector
# Clear any leftover data from previous timed out tests
self._raw_display_frame_rate_measurements = []
self._surface_stats_collector = \
surface_stats_collector.SurfaceStatsCollector(self._device)
self._surface_stats_collector.Start()
def StopDisplayTracing(self):
if not self._surface_stats_collector:
return
refresh_period, timestamps = self._surface_stats_collector.Stop()
pid = self._surface_stats_collector.GetSurfaceFlingerPid()
self._surface_stats_collector = None
# TODO(sullivan): should this code be inline, or live elsewhere?
events = []
for ts in timestamps:
events.append({
'cat': 'SurfaceFlinger',
'name': 'vsync_before',
'ts': ts,
'pid': pid,
'tid': pid,
'args': {'data': {
'frame_count': 1,
'refresh_period': refresh_period,
}}
})
return events
def SetFullPerformanceModeEnabled(self, enabled):
if not self._enable_performance_mode:
logging.warning('CPU governor will not be set!')
return
if enabled:
self._perf_tests_setup.SetHighPerfMode()
else:
self._perf_tests_setup.SetDefaultPerfMode()
def CanMonitorThermalThrottling(self):
return True
def IsThermallyThrottled(self):
return self._thermal_throttle.IsThrottled()
def HasBeenThermallyThrottled(self):
return self._thermal_throttle.HasBeenThrottled()
def GetCpuStats(self, pid):
if not self._can_access_protected_file_contents:
logging.warning('CPU stats cannot be retrieved on non-rooted device.')
return {}
return super(AndroidPlatformBackend, self).GetCpuStats(pid)
def GetCpuTimestamp(self):
if not self._can_access_protected_file_contents:
logging.warning('CPU timestamp cannot be retrieved on non-rooted device.')
return {}
return super(AndroidPlatformBackend, self).GetCpuTimestamp()
def PurgeUnpinnedMemory(self):
"""Purges the unpinned ashmem memory for the whole system.
This can be used to make memory measurements more stable. Requires root.
"""
if not self._can_access_protected_file_contents:
logging.warning('Cannot run purge_ashmem. Requires a rooted device.')
return
if not android_prebuilt_profiler_helper.InstallOnDevice(
self._device, 'purge_ashmem'):
raise Exception('Error installing purge_ashmem.')
output = self._device.RunShellCommand(
android_prebuilt_profiler_helper.GetDevicePath('purge_ashmem'),
check_return=True)
for l in output:
logging.info(l)
def GetMemoryStats(self, pid):
memory_usage = self._device.GetMemoryUsageForPid(pid)
if not memory_usage:
return {}
return {'ProportionalSetSize': memory_usage['Pss'] * 1024,
'SharedDirty': memory_usage['Shared_Dirty'] * 1024,
'PrivateDirty': memory_usage['Private_Dirty'] * 1024,
'VMPeak': memory_usage['VmHWM'] * 1024}
def GetChildPids(self, pid):
child_pids = []
ps = self.GetPsOutput(['pid', 'name'])
for curr_pid, curr_name in ps:
if int(curr_pid) == pid:
name = curr_name
for curr_pid, curr_name in ps:
if curr_name.startswith(name) and curr_name != name:
child_pids.append(int(curr_pid))
break
return child_pids
@decorators.Cache
def GetCommandLine(self, pid):
ps = self.GetPsOutput(['pid', 'name'], pid)
if not ps:
raise exceptions.ProcessGoneException()
return ps[0][1]
def GetOSName(self):
return 'android'
@decorators.Cache
def GetOSVersionName(self):
return self._device.GetProp('ro.build.id')[0]
def CanFlushIndividualFilesFromSystemCache(self):
return False
def FlushEntireSystemCache(self):
cache = cache_control.CacheControl(self._device)
cache.DropRamCaches()
def FlushSystemCacheForDirectory(self, directory):
raise NotImplementedError()
def FlushDnsCache(self):
self._device.RunShellCommand('ndc resolver flushdefaultif', as_root=True)
def StopApplication(self, application):
"""Stop the given |application|.
Args:
application: The full package name string of the application to stop.
"""
self._device.ForceStop(application)
def KillApplication(self, application):
"""Kill the given |application|.
Might be used instead of ForceStop for efficiency reasons.
Args:
application: The full package name string of the application to kill.
"""
self._device.KillAll(application, blocking=True, quiet=True)
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
"""Launches the given |application| with a list of |parameters| on the OS.
Args:
application: The full package name string of the application to launch.
parameters: A list of parameters to be passed to the ActivityManager.
elevate_privilege: Currently unimplemented on Android.
"""
if elevate_privilege:
raise NotImplementedError("elevate_privilege isn't supported on android.")
if not parameters:
parameters = ''
result_lines = self._device.RunShellCommand('am start %s %s' %
(parameters, application))
for line in result_lines:
if line.startswith('Error: '):
raise ValueError('Failed to start "%s" with error\n %s' %
(application, line))
def IsApplicationRunning(self, application):
return len(self._device.GetPids(application)) > 0
def CanLaunchApplication(self, application):
if not self._installed_applications:
self._installed_applications = self._device.RunShellCommand(
'pm list packages')
return 'package:' + application in self._installed_applications
def InstallApplication(self, application):
self._installed_applications = None
self._device.Install(application)
@decorators.Cache
def CanCaptureVideo(self):
return self.GetOSVersionName() >= 'K'
def StartVideoCapture(self, min_bitrate_mbps):
"""Starts the video capture at specified bitrate."""
min_bitrate_mbps = max(min_bitrate_mbps, 0.1)
if min_bitrate_mbps > 100:
raise ValueError('Android video capture cannot capture at %dmbps. '
'Max capture rate is 100mbps.' % min_bitrate_mbps)
if self.is_video_capture_running:
self._video_recorder.Stop()
self._video_recorder = screenshot.VideoRecorder(
self._device, megabits_per_second=min_bitrate_mbps)
self._video_recorder.Start()
util.WaitFor(self._video_recorder.IsStarted, 5)
@property
def is_video_capture_running(self):
return self._video_recorder is not None
def StopVideoCapture(self):
assert self.is_video_capture_running, 'Must start video capture first'
self._video_recorder.Stop()
video_file_obj = tempfile.NamedTemporaryFile()
self._video_recorder.Pull(video_file_obj.name)
self._video_recorder = None
return video.Video(video_file_obj)
def CanMonitorPower(self):
return self._power_monitor.CanMonitorPower()
def StartMonitoringPower(self, browser):
self._power_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._power_monitor.StopMonitoringPower()
def CanMonitorNetworkData(self):
if (self._device.build_version_sdk <
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP):
return False
return True
def GetNetworkData(self, browser):
return self._battery.GetNetworkData(browser._browser_backend.package)
def GetFileContents(self, fname):
if not self._can_access_protected_file_contents:
logging.warning('%s cannot be retrieved on non-rooted device.' % fname)
return ''
return self._device.ReadFile(fname, as_root=True)
def GetPsOutput(self, columns, pid=None):
assert columns == ['pid', 'name'] or columns == ['pid'], \
'Only know how to return pid and name. Requested: ' + columns
command = 'ps'
if pid:
command += ' -p %d' % pid
with device_temp_file.DeviceTempFile(self._device.adb) as ps_out:
command += ' > %s' % ps_out.name
self._device.RunShellCommand(command)
# Get rid of trailing new line and header.
ps = self._device.ReadFile(ps_out.name).split('\n')[1:-1]
output = []
for line in ps:
data = line.split()
curr_pid = data[1]
curr_name = data[-1]
if columns == ['pid', 'name']:
output.append([curr_pid, curr_name])
else:
output.append([curr_pid])
return output
def RunCommand(self, command):
return '\n'.join(self._device.RunShellCommand(command))
@staticmethod
def ParseCStateSample(sample):
sample_stats = {}
for cpu in sample:
values = sample[cpu].splitlines()
# Each state has three values after excluding the time value.
num_states = (len(values) - 1) / 3
names = values[:num_states]
times = values[num_states:2 * num_states]
cstates = {'C0': int(values[-1]) * 10 ** 6}
for i, state in enumerate(names):
if state == 'C0':
# The Exynos cpuidle driver for the Nexus 10 uses the name 'C0' for
# its WFI state.
# TODO(tmandel): We should verify that no other Android device
# actually reports time in C0 causing this to report active time as
# idle time.
state = 'WFI'
cstates[state] = int(times[i])
cstates['C0'] -= int(times[i])
sample_stats[cpu] = cstates
return sample_stats
def SetRelaxSslCheck(self, value):
old_flag = self._device.GetProp('socket.relaxsslcheck')
self._device.SetProp('socket.relaxsslcheck', value)
return old_flag
def ForwardHostToDevice(self, host_port, device_port):
self._adb.Forward('tcp:%d' % host_port, device_port)
def DismissCrashDialogIfNeeded(self):
"""Dismiss any error dialogs.
Limit the number in case we have an error loop or we are failing to dismiss.
"""
for _ in xrange(10):
if not self._device.old_interface.DismissCrashDialogIfNeeded():
break
def IsAppRunning(self, process_name):
"""Determine if the given process is running.
Args:
process_name: The full package name string of the process.
"""
pids = self._adb.ExtractPid(process_name)
return len(pids) != 0
@property
def wpr_ca_cert_path(self):
"""Path to root certificate installed on browser (or None).
If this is set, web page replay will use it to sign HTTPS responses.
"""
if self._wpr_ca_cert_path:
assert os.path.isfile(self._wpr_ca_cert_path)
return self._wpr_ca_cert_path
def InstallTestCa(self):
"""Install a randomly generated root CA on the android device.
This allows transparent HTTPS testing with WPR server without need
to tweak application network stack.
"""
# TODO(slamm): Move certificate creation related to webpagereplay.py.
# The only code that needs to be in platform backend is installing the cert.
if certutils.openssl_import_error:
logging.warning(
'The OpenSSL module is unavailable. '
'Will fallback to ignoring certificate errors.')
return
if not platformsettings.HasSniSupport():
logging.warning(
'Web Page Replay requires SNI support (pyOpenSSL 0.13 or greater) '
'to generate certificates from a test CA. '
'Will fallback to ignoring certificate errors.')
return
try:
self._wpr_ca_cert_path = os.path.join(tempfile.mkdtemp(), 'testca.pem')
certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(),
cert_path=self._wpr_ca_cert_path)
self._device_cert_util = adb_install_cert.AndroidCertInstaller(
self._adb.device_serial(), None, self._wpr_ca_cert_path)
logging.info('Installing test certificate authority on device: %s',
self._adb.device_serial())
self._device_cert_util.install_cert(overwrite_cert=True)
self._is_test_ca_installed = True
except Exception as e:
# Fallback to ignoring certificate errors.
self.RemoveTestCa()
logging.warning(
'Unable to install test certificate authority on device: %s. '
'Will fallback to ignoring certificate errors. Install error: %s',
self._adb.device_serial(), e)
@property
def is_test_ca_installed(self):
return self._is_test_ca_installed
def RemoveTestCa(self):
"""Remove root CA generated by previous call to InstallTestCa().
Removes the test root certificate from both the device and host machine.
"""
if not self._wpr_ca_cert_path:
return
if self._is_test_ca_installed:
try:
self._device_cert_util.remove_cert()
except Exception:
# Best effort cleanup - show the error and continue.
exception_formatter.PrintFormattedException(
msg=('Error while trying to remove certificate authority: %s. '
% self._adb.device_serial()))
self._is_test_ca_installed = False
shutil.rmtree(os.path.dirname(self._wpr_ca_cert_path), ignore_errors=True)
self._wpr_ca_cert_path = None
self._device_cert_util = None
def PushProfile(self, package, new_profile_dir):
"""Replace application profile with files found on host machine.
Pushing the profile is slow, so we don't want to do it every time.
Avoid this by pushing to a safe location using PushChangedFiles, and
then copying into the correct location on each test run.
Args:
package: The full package name string of the application for which the
profile is to be updated.
new_profile_dir: Location where profile to be pushed is stored on the
host machine.
"""
(profile_parent, profile_base) = os.path.split(new_profile_dir)
# If the path ends with a '/' python split will return an empty string for
# the base name; so we now need to get the base name from the directory.
if not profile_base:
profile_base = os.path.basename(profile_parent)
saved_profile_location = '/sdcard/profile/%s' % profile_base
self._device.PushChangedFiles([(new_profile_dir, saved_profile_location)])
profile_dir = self._GetProfileDir(package)
self._EfficientDeviceDirectoryCopy(
saved_profile_location, profile_dir)
dumpsys = self._device.RunShellCommand('dumpsys package %s' % package)
id_line = next(line for line in dumpsys if 'userId=' in line)
uid = re.search(r'\d+', id_line).group()
files = self._device.RunShellCommand(
'ls "%s"' % profile_dir, as_root=True)
files.remove('lib')
paths = ['%s%s' % (profile_dir, f) for f in files]
for path in paths:
extended_path = '%s %s/* %s/*/* %s/*/*/*' % (path, path, path, path)
self._device.RunShellCommand(
'chown %s.%s %s' % (uid, uid, extended_path))
def _EfficientDeviceDirectoryCopy(self, source, dest):
if not self._device_copy_script:
self._device.adb.Push(
_DEVICE_COPY_SCRIPT_FILE,
_DEVICE_COPY_SCRIPT_LOCATION)
self._device_copy_script = _DEVICE_COPY_SCRIPT_FILE
self._device.RunShellCommand(
['sh', self._device_copy_script, source, dest],
check_return=True)
def RemoveProfile(self, package, ignore_list):
"""Delete application profile on device.
Args:
package: The full package name string of the application for which the
profile is to be deleted.
ignore_list: List of files to keep.
"""
profile_dir = self._GetProfileDir(package)
files = self._device.RunShellCommand(
'ls "%s"' % profile_dir, as_root=True)
paths = ['"%s%s"' % (profile_dir, f) for f in files
if f not in ignore_list]
self._device.RunShellCommand('rm -r %s' % ' '.join(paths), as_root=True)
def PullProfile(self, package, output_profile_path):
"""Copy application profile from device to host machine.
Args:
package: The full package name string of the application for which the
profile is to be copied.
output_profile_dir: Location where profile to be stored on host machine.
"""
profile_dir = self._GetProfileDir(package)
logging.info("Pulling profile directory from device: '%s'->'%s'.",
profile_dir, output_profile_path)
# To minimize bandwidth it might be good to look at whether all the data
# pulled down is really needed e.g. .pak files.
if not os.path.exists(output_profile_path):
os.makedirs(output_profile_path)
files = self._device.RunShellCommand(
['ls', profile_dir], check_return=True)
for f in files:
# Don't pull lib, since it is created by the installer.
if f != 'lib':
source = '%s%s' % (profile_dir, f)
dest = os.path.join(output_profile_path, f)
try:
self._device.PullFile(source, dest, timeout=240)
except device_errors.CommandFailedError:
logging.exception('Failed to pull %s to %s', source, dest)
def _GetProfileDir(self, package):
"""Returns the on-device location where the application profile is stored
based on Android convention.
Args:
package: The full package name string of the application.
"""
return '/data/data/%s/' % package
def SetDebugApp(self, package):
"""Set application to debugging.
Args:
package: The full package name string of the application.
"""
if self._adb.IsUserBuild():
logging.debug('User build device, setting debug app')
self._device.RunShellCommand('am set-debug-app --persistent %s' % package)
def GetStandardOutput(self, number_of_lines=500):
"""Returns most recent lines of logcat dump.
Args:
number_of_lines: Number of lines of log to return.
"""
return '\n'.join(self.adb.device().RunShellCommand(
'logcat -d -t %d' % number_of_lines))
def GetStackTrace(self, target_arch):
"""Returns stack trace.
The stack trace consists of raw logcat dump, logcat dump with symbols,
and stack info from tomstone files.
Args:
target_arch: String specifying device architecture (eg. arm, arm64, mips,
x86, x86_64)
"""
def Decorate(title, content):
return "%s\n%s\n%s\n" % (title, content, '*' * 80)
# Get the last lines of logcat (large enough to contain stacktrace)
logcat = self.GetStandardOutput()
ret = Decorate('Logcat', logcat)
stack = os.path.join(util.GetChromiumSrcDir(), 'third_party',
'android_platform', 'development', 'scripts', 'stack')
# Try to symbolize logcat.
if os.path.exists(stack):
cmd = [stack]
if target_arch:
cmd.append('--arch=%s' % target_arch)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret += Decorate('Stack from Logcat', p.communicate(input=logcat)[0])
# Try to get tombstones.
tombstones = os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'tombstones.py')
if os.path.exists(tombstones):
ret += Decorate('Tombstones',
subprocess.Popen([tombstones, '-w', '--device',
self._adb.device_serial()],
stdout=subprocess.PIPE).communicate()[0])
return ret
@staticmethod
def _IsScreenOn(input_methods):
"""Parser method of IsScreenOn()
Args:
input_methods: Output from dumpsys input_methods
Returns:
boolean: True if screen is on, false if screen is off.
Raises:
ValueError: An unknown value is found for the screen state.
AndroidDeviceParsingError: Error in detecting screen state.
"""
for line in input_methods:
if 'mScreenOn' in line or 'mInteractive' in line:
for pair in line.strip().split(' '):
key, value = pair.split('=', 1)
if key == 'mScreenOn' or key == 'mInteractive':
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ValueError('Unknown value for %s: %s' % (key, value))
raise exceptions.AndroidDeviceParsingError(str(input_methods))
def IsScreenOn(self):
"""Determines if device screen is on."""
input_methods = self._device.RunShellCommand('dumpsys input_method')
return self._IsScreenOn(input_methods)
@staticmethod
def _IsScreenLocked(input_methods):
"""Parser method for IsScreenLocked()
Args:
input_methods: Output from dumpsys input_methods
Returns:
boolean: True if screen is locked, false if screen is not locked.
Raises:
ValueError: An unknown value is found for the screen lock state.
AndroidDeviceParsingError: Error in detecting screen state.
"""
for line in input_methods:
if 'mHasBeenInactive' in line:
for pair in line.strip().split(' '):
key, value = pair.split('=', 1)
if key == 'mHasBeenInactive':
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ValueError('Unknown value for %s: %s' % (key, value))
raise exceptions.AndroidDeviceParsingError(str(input_methods))
def IsScreenLocked(self):
"""Determines if device screen is locked."""
input_methods = self._device.RunShellCommand('dumpsys input_method')
return self._IsScreenLocked(input_methods)
def _FixPossibleAdbInstability():
"""Host side workaround for crbug.com/268450 (adb instability).
The adb server has a race which is mitigated by binding to a single core.
"""
if not psutil:
return
for process in psutil.process_iter():
try:
if psutil.version_info >= (2, 0):
if 'adb' in process.name():
process.cpu_affinity([0])
else:
if 'adb' in process.name:
process.set_cpu_affinity([0])
except (psutil.NoSuchProcess, psutil.AccessDenied):
logging.warn('Failed to set adb process CPU affinity')
| bsd-3-clause |
rahulunair/nova | nova/compute/build_results.py | 96 | 1039 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible results from instance build
Results represent the ultimate result of an attempt to build an instance.
Results describe whether an instance was actually built, failed to build, or
was rescheduled.
"""
ACTIVE = 'active' # Instance is running
FAILED = 'failed' # Instance failed to build and was not rescheduled
RESCHEDULED = 'rescheduled' # Instance failed to build, but was rescheduled
| apache-2.0 |
sassoftware/mirrorball | updatebot/lib/util.py | 1 | 7770 | #
# Copyright (c) SAS Institute, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Module for common utility functions.
"""
# W0611 - Unused import rmtree
# pylint: disable=W0611
import os
import epdb
import signal
import resource
from rmake.lib import osutil
from conary.lib.util import rmtree
from conary.lib.util import convertPackageNameToClassName as _pkgNameToClassName
from rpmutils import rpmvercmp
def join(a, *b):
"""
Version of os.path.join that doesn't reroot when it finds a leading /.
"""
root = os.path.normpath(a)
if root == '/':
root = ''
for path in b:
root += os.sep + os.path.normpath(path)
return os.path.abspath(root)
def srpmToConaryVersion(srcPkg):
"""
Get the equvialent conary version from a srcPkg object.
@param srcPkg: package object for a srpm
@type srcPkg: repomd.packagexml._Package
@return conary trailing version
"""
version = srcPkg.version.replace('-', '_')
release = srcPkg.release.replace('-', '_')
cnyver = '_'.join([version, release])
return cnyver
def packagevercmp(a, b):
"""
Compare two package objects.
@param a: package object from repo metadata
@type a: repomd.packagexml._Package
@param b: package object from repo metadata
@type b: repomd.packagexml._Package
"""
# Not all "packages" have epoch set. If comparing between two packages, at
# least one without an epoch specified, ignore epoch.
if a.epoch is not None and b.epoch is not None:
epochcmp = rpmvercmp(a.epoch, b.epoch)
if epochcmp != 0:
return epochcmp
vercmp = rpmvercmp(a.version, b.version)
if vercmp != 0:
return vercmp
relcmp = rpmvercmp(a.release, b.release)
if relcmp != 0:
return relcmp
return 0
def packageCompare(a, b):
"""
Compare package with arch.
"""
pkgvercmp = packagevercmp(a, b)
if pkgvercmp != 0:
return pkgvercmp
archcmp = cmp(a.arch, b.arch)
if archcmp != 0:
return archcmp
return 0
def packageCompareByName(a, b):
"""
Compare packages by name and the follow packagevercmp.
"""
nameCmp = cmp(a.name, b.name)
if nameCmp != 0:
return nameCmp
return packagevercmp(a, b)
class Metadata(object):
"""
Base class for repository metadata.
"""
def __init__(self, pkgs):
self.pkgs = pkgs
self.locationMap = {}
self.binPkgMap = {}
src = None
for pkg in self.pkgs:
if hasattr(pkg, 'location'):
self.locationMap[pkg.location] = pkg
elif hasattr(pkg, 'files'):
for path in pkg.files:
self.locationMap[path] = pkg
if pkg.arch == 'src':
src = pkg
for pkg in self.pkgs:
self.binPkgMap[pkg] = src
def isKernelModulePackage(paths):
"""
Check if a package file name or location is a kernel module.
"""
if type(paths) == str:
paths = [ paths, ]
for path in paths:
basePath = os.path.basename(path)
if (basePath.startswith('kmod-') or
basePath.startswith('kernel-module') or
'-kmod' in basePath):
return True
return False
def setMaxRLimit():
"""
Set the max file descriptors to the maximum allowed.
"""
cur, max = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (max, max))
return max
def getRLimit():
"""
Get the current number of file descriptors.
"""
cur, max = resource.getrlimit(resource.RLIMIT_NOFILE)
return cur
def getAvailableFileDescriptors(setMax=False):
"""
Get the number of available file descriptors.
"""
openfds = len(os.listdir('/proc/self/fd'))
if setMax:
setMaxRLimit()
limit = getRLimit()
return limit - openfds
def setupDebugHandler(serve=False):
"""
Sets up a USR1 signal handler to trigger epdb.serv().
"""
def handler(signum, sigtb):
if serve:
epdb.serve()
else:
epdb.st()
signal.signal(signal.SIGUSR1, handler)
def convertPackageNameToClassName(pkgName):
name = _pkgNameToClassName(pkgName)
return name.replace('.', '_')
def askYn(prompt, default=None):
while True:
try:
resp = raw_input(prompt + ' ')
except EOFError:
return False
resp = resp.lower()
if resp in ('y', 'yes'):
return True
elif resp in ('n', 'no'):
return False
elif resp in ('d', 'debug'):
epdb.st()
elif not resp:
return default
else:
print "Unknown response '%s'." % resp
def setproctitle(title):
try:
osutil.setproctitle('mirrorball %s' % (title,))
except:
pass
def recurseDeps(pkg):
deps = []
seen = set()
def _helper(p):
if p in seen:
return
seen.add(p)
for d in p.dependencies:
_helper(d)
if d not in deps:
deps.append(d)
_helper(pkg)
return deps
class BoundedCounter(object):
"""
Basic counter that can be incremented and decremented while enforcing
bounds.
"""
def __init__(self, low, high, cur, boundsErrors=True):
self._low = low
self._high = high
self._cur = cur
self._boundsErrors = boundsErrors
def __str__(self):
return str(self._cur)
def __repr__(self):
return '<Counter(%s, %s, %s)>' % (self._low, self._high, self._cur)
def __bool__(self):
if self._cur == self._low:
return False
else:
return True
def __len__(self):
return self._cur - self._low
def __add__(self, other):
if isinstance(other, int):
while other:
self.increment()
other -= 1
else:
raise RuntimeError, 'Counters only support adding integers'
return self
def __sub__(self, other):
if isinstance(other, int):
while other:
self.decrement()
other -= 1
else:
raise RuntimeError, 'Counters only support subtracting integers'
return self
def __cmp__(self, other):
if isinstance(other, int):
return cmp(self._cur, other)
elif isinstance(other, self.__class__):
return cmp(self._cur, other._cur)
else:
raise (RuntimeError, 'Counters only support comparision operations '
'against integers and other Counter instances')
@property
def upperlimit(self):
return self._high
@property
def lowerlimit(self):
return self._low
def increment(self):
if self._cur + 1 <= self._high:
self._cur += 1
elif self._boundsErrors:
raise RuntimeError, 'Counter has been incremented past upper bounds'
def decrement(self):
if self._cur - 1 >= self._low:
self._cur -= 1
elif self._boundsErrors:
raise RuntimeError, 'Counter has been decremented past lower bounds'
| apache-2.0 |
kmARC/Cloud99 | runners/baseRunner.py | 1 | 1077 | import ha_engine.ha_infra as common
LOG = common.ha_logging(__name__)
class BaseRunner(object):
def __init__(self, input_args):
self.ha_report = []
self.input_args = {}
if input_args:
self.set_input_arguments(input_args)
def set_input_arguments(self, input_args):
self.input_args = input_args
LOG.info("Self, input %s ", str(self.input_args))
def get_input_arguments(self):
return self.input_args
def execute(self, sync=None, finish_execution=None):
raise NotImplementedError('Subclass should implement this method')
def setup(self):
raise NotImplementedError('Subclass should implement this method')
def teardown(self):
raise NotImplementedError('Subclass should implement this method')
def is_module_exeution_completed(self, finish_exection):
raise NotImplementedError('Subclass should implement this method')
def get_ha_interval(self):
return self.ha_interval
def get_ha_start_delay(self):
return self.ha_start_delay
| apache-2.0 |
lamby/python-social-auth | social/strategies/cherrypy_strategy.py | 77 | 1924 | import six
import cherrypy
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class CherryPyJinja2TemplateStrategy(BaseTemplateStrategy):
def __init__(self, strategy):
self.strategy = strategy
self.env = cherrypy.tools.jinja2env
def render_template(self, tpl, context):
return self.env.get_template(tpl).render(context)
def render_string(self, html, context):
return self.env.from_string(html).render(context)
class CherryPyStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = CherryPyJinja2TemplateStrategy
def get_setting(self, name):
return cherrypy.config[name]
def request_data(self, merge=True):
if merge:
data = cherrypy.request.params
elif cherrypy.request.method == 'POST':
data = cherrypy.body.params
else:
data = cherrypy.request.params
return data
def request_host(self):
return cherrypy.request.base
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def html(self, content):
return content
def authenticate(self, backend, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = backend
return backend.authenticate(*args, **kwargs)
def session_get(self, name, default=None):
return cherrypy.session.get(name, default)
def session_set(self, name, value):
cherrypy.session[name] = value
def session_pop(self, name):
cherrypy.session.pop(name, None)
def session_setdefault(self, name, value):
return cherrypy.session.setdefault(name, value)
def build_absolute_uri(self, path=None):
return cherrypy.url(path or '')
def is_response(self, value):
return isinstance(value, six.string_types) or \
isinstance(value, cherrypy.CherryPyException)
| bsd-3-clause |
jiangwei1221/django-virtualenv-demo | env/lib/python2.7/site-packages/debug_toolbar/panels/headers.py | 30 | 2025 | from __future__ import absolute_import, unicode_literals
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import Panel
class HeadersPanel(Panel):
"""
A panel to display HTTP headers.
"""
# List of environment variables we want to display
ENVIRON_FILTER = set((
'CONTENT_LENGTH',
'CONTENT_TYPE',
'DJANGO_SETTINGS_MODULE',
'GATEWAY_INTERFACE',
'QUERY_STRING',
'PATH_INFO',
'PYTHONPATH',
'REMOTE_ADDR',
'REMOTE_HOST',
'REQUEST_METHOD',
'SCRIPT_NAME',
'SERVER_NAME',
'SERVER_PORT',
'SERVER_PROTOCOL',
'SERVER_SOFTWARE',
'TZ',
))
title = _("Headers")
template = 'debug_toolbar/panels/headers.html'
def process_request(self, request):
wsgi_env = list(sorted(request.META.items()))
self.request_headers = OrderedDict(
(unmangle(k), v) for (k, v) in wsgi_env if is_http_header(k))
if 'Cookie' in self.request_headers:
self.request_headers['Cookie'] = '=> see Request panel'
self.environ = OrderedDict(
(k, v) for (k, v) in wsgi_env if k in self.ENVIRON_FILTER)
self.record_stats({
'request_headers': self.request_headers,
'environ': self.environ,
})
def process_response(self, request, response):
self.response_headers = OrderedDict(sorted(response.items()))
self.record_stats({
'response_headers': self.response_headers,
})
def is_http_header(wsgi_key):
# The WSGI spec says that keys should be str objects in the environ dict,
# but this isn't true in practice. See issues #449 and #482.
return isinstance(wsgi_key, str) and wsgi_key.startswith('HTTP_')
def unmangle(wsgi_key):
return wsgi_key[5:].replace('_', '-').title()
| unlicense |
dharmabumstead/ansible | lib/ansible/modules/remote_management/hpilo/hpilo_facts.py | 46 | 7753 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: hpilo_facts
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Gather facts through an HP iLO interface
description:
- This module gathers facts for a specific system using its HP iLO interface.
These facts include hardware and network related information useful
for provisioning (e.g. macaddress, uuid).
- This module requires the hpilo python module.
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
ssl_version:
description:
- Change the ssl_version used.
default: TLSv1
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
version_added: '2.4'
requirements:
- hpilo
notes:
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
# Task to gather facts from a HP iLO interface only if the system is an HP server
- hpilo_facts:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- fail:
msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ hw_system_serial }}) !'
when: cmdb_serialno != hw_system_serial
'''
RETURN = r'''
# Typical output of HP iLO_facts for a physical system
hw_bios_date:
description: BIOS date
returned: always
type: string
sample: 05/05/2011
hw_bios_version:
description: BIOS version
returned: always
type: string
sample: P68
hw_ethX:
description: Interface information (for each interface)
returned: always
type: dictionary
sample:
- macaddress: 00:11:22:33:44:55
macaddress_dash: 00-11-22-33-44-55
hw_eth_ilo:
description: Interface information (for the iLO network interface)
returned: always
type: dictionary
sample:
- macaddress: 00:11:22:33:44:BA
- macaddress_dash: 00-11-22-33-44-BA
hw_product_name:
description: Product name
returned: always
type: string
sample: ProLiant DL360 G7
hw_product_uuid:
description: Product UUID
returned: always
type: string
sample: ef50bac8-2845-40ff-81d9-675315501dac
hw_system_serial:
description: System serial number
returned: always
type: string
sample: ABC12345D6
hw_uuid:
description: Hardware UUID
returned: always
type: string
sample: 123456ABC78901D2
'''
import re
import warnings
try:
import hpilo
HAS_HPILO = True
except ImportError:
HAS_HPILO = False
from ansible.module_utils.basic import AnsibleModule
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
try:
factname = 'hw_eth' + str(int(entry['Port']) - 1)
except:
factname = non_numeric
facts = {
'macaddress': entry['MAC'].replace('-', ':'),
'macaddress_dash': entry['MAC']
}
return (factname, facts)
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
),
supports_check_mode=True,
)
if not HAS_HPILO:
module.fail_json(msg='The hpilo python module is required')
host = module.params['host']
login = module.params['login']
password = module.params['password']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
facts = {
'module_hw': True,
}
# TODO: Count number of CPUs, DIMMs and total memory
data = ilo.get_host_data()
for entry in data:
if 'type' not in entry:
continue
elif entry['type'] == 0: # BIOS Information
facts['hw_bios_version'] = entry['Family']
facts['hw_bios_date'] = entry['Date']
elif entry['type'] == 1: # System Information
facts['hw_uuid'] = entry['UUID']
facts['hw_system_serial'] = entry['Serial Number'].rstrip()
facts['hw_product_name'] = entry['Product Name']
facts['hw_product_uuid'] = entry['cUUID']
elif entry['type'] == 209: # Embedded NIC MAC Assignment
if 'fields' in entry:
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_eth' + str(int(value) - 1)
except:
factname = 'hw_eth_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
else:
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_iscsi' + str(int(value) - 1)
except:
factname = 'hw_iscsi_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
# Collect health (RAM/CPU data)
health = ilo.get_embedded_health()
facts['hw_health'] = health
memory_details_summary = health.get('memory', {}).get('memory_details_summary')
# RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
if memory_details_summary:
facts['hw_memory_details_summary'] = memory_details_summary
facts['hw_memory_total'] = 0
for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size:
ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
if ram:
if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))
# reformat into a text friendly format
facts['hw_memory_total'] = "{0} GB".format(facts['hw_memory_total'])
module.exit_json(ansible_facts=facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
tommyip/zulip | zerver/migrations/0221_subscription_notifications_data_migration.py | 3 | 2687 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-13 20:13
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
RECIPIENT_STREAM = 2
SETTINGS_MAP = {
'desktop_notifications': 'enable_stream_desktop_notifications',
'audible_notifications': 'enable_stream_sounds',
'push_notifications': 'enable_stream_push_notifications',
'email_notifications': 'enable_stream_email_notifications',
}
def update_notification_settings(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Subscription = apps.get_model('zerver', 'Subscription')
UserProfile = apps.get_model('zerver', 'UserProfile')
for setting_value in [True, False]:
for sub_setting_name, user_setting_name in SETTINGS_MAP.items():
sub_filter_kwargs = {sub_setting_name: setting_value}
user_filter_kwargs = {user_setting_name: setting_value}
update_kwargs = {sub_setting_name: None}
Subscription.objects.filter(user_profile__in=UserProfile.objects.filter(**user_filter_kwargs),
recipient__type=RECIPIENT_STREAM,
**sub_filter_kwargs).update(**update_kwargs)
def reverse_notification_settings(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Subscription = apps.get_model('zerver', 'Subscription')
UserProfile = apps.get_model('zerver', 'UserProfile')
for setting_value in [True, False]:
for sub_setting_name, user_setting_name in SETTINGS_MAP.items():
sub_filter_kwargs = {sub_setting_name: None}
user_filter_kwargs = {user_setting_name: setting_value}
update_kwargs = {sub_setting_name: setting_value}
Subscription.objects.filter(user_profile__in=UserProfile.objects.filter(**user_filter_kwargs),
recipient__type=RECIPIENT_STREAM,
**sub_filter_kwargs).update(**update_kwargs)
for sub_setting_name, user_setting_name in SETTINGS_MAP.items():
sub_filter_kwargs = {sub_setting_name: None}
update_kwargs = {sub_setting_name: True}
Subscription.objects.filter(recipient__type__in=[1, 3], **sub_filter_kwargs).update(**update_kwargs)
class Migration(migrations.Migration):
dependencies = [
('zerver', '0220_subscription_notification_settings'),
]
operations = [
migrations.RunPython(update_notification_settings,
reverse_notification_settings),
]
| apache-2.0 |
davisein/jitsudone | django/django/contrib/gis/db/backends/postgis/creation.py | 99 | 2854 | from django.conf import settings
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_opts = 'GIST_GEOMETRY_OPS'
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography:
# Geogrophy columns are created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns
if f.geography:
index_opts = ''
else:
index_opts = ' ' + style.SQL_KEYWORD(self.geom_index_opts)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_opts + ' );')
return output
def sql_table_creation_suffix(self):
qn = self.connection.ops.quote_name
return ' TEMPLATE %s' % qn(getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis'))
| bsd-3-clause |
Chilledheart/chromium | tools/telemetry/telemetry/internal/util/command_line_unittest.py | 17 | 1373 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal.util import command_line
class BenchmarkFoo(object):
""" Benchmark Foo for testing."""
@classmethod
def Name(cls):
return 'FooBenchmark'
class BenchmarkBar(object):
""" Benchmark Bar for testing long description line."""
@classmethod
def Name(cls):
return 'BarBenchmarkkkkk'
class UnusualBenchmark(object):
@classmethod
def Name(cls):
return 'I have a very unusual name'
class CommandLineUnittest(unittest.TestCase):
def testGetMostLikelyMatchedObject(self):
# Test moved from telemetry/benchmark_runner_unittest.py
all_benchmarks = [BenchmarkFoo, BenchmarkBar, UnusualBenchmark]
self.assertEquals(
[BenchmarkFoo, BenchmarkBar],
command_line.GetMostLikelyMatchedObject(
all_benchmarks, 'BenchmarkFooz', name_func=lambda x: x.Name()))
self.assertEquals(
[BenchmarkBar, BenchmarkFoo],
command_line.GetMostLikelyMatchedObject(
all_benchmarks, 'BarBenchmark', name_func=lambda x: x.Name()))
self.assertEquals(
[UnusualBenchmark],
command_line.GetMostLikelyMatchedObject(
all_benchmarks, 'unusual', name_func=lambda x: x.Name()))
| bsd-3-clause |
alangwansui/mtl_ordercenter | openerp/addons/audittrail/audittrail.py | 29 | 29115 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.osv.osv import object_proxy
from openerp.tools.translate import _
from openerp import pooler
import time
from openerp import tools
from openerp import SUPERUSER_ID
class audittrail_rule(osv.osv):
"""
For Auddittrail Rule
"""
_name = 'audittrail.rule'
_description = "Audittrail Rule"
_columns = {
"name": fields.char("Rule Name", size=32, required=True),
"object_id": fields.many2one('ir.model', 'Object', required=True, help="Select object for which you want to generate log."),
"user_id": fields.many2many('res.users', 'audittail_rules_users',
'user_id', 'rule_id', 'Users', help="if User is not added then it will applicable for all users"),
"log_read": fields.boolean("Log Reads", help="Select this if you want to keep track of read/open on any record of the object of this rule"),
"log_write": fields.boolean("Log Writes", help="Select this if you want to keep track of modification on any record of the object of this rule"),
"log_unlink": fields.boolean("Log Deletes", help="Select this if you want to keep track of deletion on any record of the object of this rule"),
"log_create": fields.boolean("Log Creates",help="Select this if you want to keep track of creation on any record of the object of this rule"),
"log_action": fields.boolean("Log Action",help="Select this if you want to keep track of actions on the object of this rule"),
"log_workflow": fields.boolean("Log Workflow",help="Select this if you want to keep track of workflow on any record of the object of this rule"),
"state": fields.selection((("draft", "Draft"), ("subscribed", "Subscribed")), "Status", required=True),
"action_id": fields.many2one('ir.actions.act_window', "Action ID"),
}
_defaults = {
'state': 'draft',
'log_create': 1,
'log_unlink': 1,
'log_write': 1,
}
_sql_constraints = [
('model_uniq', 'unique (object_id)', """There is already a rule defined on this object\n You cannot define another: please edit the existing one.""")
]
__functions = {}
def subscribe(self, cr, uid, ids, *args):
"""
Subscribe Rule for auditing changes on object and apply shortcut for logs on that object.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Auddittrail Rule’s IDs.
@return: True
"""
obj_action = self.pool.get('ir.actions.act_window')
obj_model = self.pool.get('ir.model.data')
#start Loop
for thisrule in self.browse(cr, uid, ids):
obj = self.pool.get(thisrule.object_id.model)
if not obj:
raise osv.except_osv(
_('WARNING: audittrail is not part of the pool'),
_('Change audittrail depends -- Setting rule as DRAFT'))
self.write(cr, uid, [thisrule.id], {"state": "draft"})
val = {
"name": 'View Log',
"res_model": 'audittrail.log',
"src_model": thisrule.object_id.model,
"domain": "[('object_id','=', " + str(thisrule.object_id.id) + "), ('res_id', '=', active_id)]"
}
action_id = obj_action.create(cr, SUPERUSER_ID, val)
self.write(cr, uid, [thisrule.id], {"state": "subscribed", "action_id": action_id})
keyword = 'client_action_relate'
value = 'ir.actions.act_window,' + str(action_id)
res = obj_model.ir_set(cr, SUPERUSER_ID, 'action', keyword, 'View_log_' + thisrule.object_id.model, [thisrule.object_id.model], value, replace=True, isobject=True, xml_id=False)
#End Loop
return True
def unsubscribe(self, cr, uid, ids, *args):
"""
Unsubscribe Auditing Rule on object
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Auddittrail Rule’s IDs.
@return: True
"""
obj_action = self.pool.get('ir.actions.act_window')
ir_values_obj = self.pool.get('ir.values')
value=''
#start Loop
for thisrule in self.browse(cr, uid, ids):
if thisrule.id in self.__functions:
for function in self.__functions[thisrule.id]:
setattr(function[0], function[1], function[2])
w_id = obj_action.search(cr, uid, [('name', '=', 'View Log'), ('res_model', '=', 'audittrail.log'), ('src_model', '=', thisrule.object_id.model)])
if w_id:
obj_action.unlink(cr, SUPERUSER_ID, w_id)
value = "ir.actions.act_window" + ',' + str(w_id[0])
val_id = ir_values_obj.search(cr, uid, [('model', '=', thisrule.object_id.model), ('value', '=', value)])
if val_id:
ir_values_obj = pooler.get_pool(cr.dbname).get('ir.values')
res = ir_values_obj.unlink(cr, uid, [val_id[0]])
self.write(cr, uid, [thisrule.id], {"state": "draft"})
#End Loop
return True
class audittrail_log(osv.osv):
"""
For Audittrail Log
"""
_name = 'audittrail.log'
_description = "Audittrail Log"
def _name_get_resname(self, cr, uid, ids, *args):
data = {}
for resname in self.browse(cr, uid, ids,[]):
model_object = resname.object_id
res_id = resname.res_id
if model_object and res_id:
model_pool = self.pool.get(model_object.model)
res = model_pool.read(cr, uid, res_id, ['name'])
data[resname.id] = res['name']
else:
data[resname.id] = False
return data
_columns = {
"name": fields.char("Resource Name",size=64),
"object_id": fields.many2one('ir.model', 'Object'),
"user_id": fields.many2one('res.users', 'User'),
"method": fields.char("Method", size=64),
"timestamp": fields.datetime("Date"),
"res_id": fields.integer('Resource Id'),
"line_ids": fields.one2many('audittrail.log.line', 'log_id', 'Log lines'),
}
_defaults = {
"timestamp": lambda *a: time.strftime("%Y-%m-%d %H:%M:%S")
}
_order = "timestamp desc"
class audittrail_log_line(osv.osv):
"""
Audittrail Log Line.
"""
_name = 'audittrail.log.line'
_description = "Log Line"
_columns = {
'field_id': fields.many2one('ir.model.fields', 'Fields', required=True),
'log_id': fields.many2one('audittrail.log', 'Log'),
'log': fields.integer("Log ID"),
'old_value': fields.text("Old Value"),
'new_value': fields.text("New Value"),
'old_value_text': fields.text('Old value Text'),
'new_value_text': fields.text('New value Text'),
'field_description': fields.char('Field Description', size=64),
}
class audittrail_objects_proxy(object_proxy):
""" Uses Object proxy for auditing changes on object of subscribed Rules"""
def get_value_text(self, cr, uid, pool, resource_pool, method, field, value):
"""
Gets textual values for the fields.
If the field is a many2one, it returns the name.
If it's a one2many or a many2many, it returns a list of name.
In other cases, it just returns the value.
:param cr: the current row, from the database cursor,
:param uid: the current user’s ID for security checks,
:param pool: current db's pooler object.
:param resource_pool: pooler object of the model which values are being changed.
:param field: for which the text value is to be returned.
:param value: value of the field.
:param recursive: True or False, True will repeat the process recursively
:return: string value or a list of values(for O2M/M2M)
"""
field_obj = (resource_pool._all_columns.get(field)).column
if field_obj._type in ('one2many','many2many'):
data = pool.get(field_obj._obj).name_get(cr, uid, value)
#return the modifications on x2many fields as a list of names
res = map(lambda x:x[1], data)
elif field_obj._type == 'many2one':
#return the modifications on a many2one field as its value returned by name_get()
res = value and value[1] or value
else:
res = value
return res
def create_log_line(self, cr, uid, log_id, model, lines=None):
"""
Creates lines for changed fields with its old and new values
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param model: Object which values are being changed
@param lines: List of values for line is to be created
"""
if lines is None:
lines = []
pool = pooler.get_pool(cr.dbname)
obj_pool = pool.get(model.model)
model_pool = pool.get('ir.model')
field_pool = pool.get('ir.model.fields')
log_line_pool = pool.get('audittrail.log.line')
for line in lines:
field_obj = obj_pool._all_columns.get(line['name'])
assert field_obj, _("'%s' field does not exist in '%s' model" %(line['name'], model.model))
field_obj = field_obj.column
old_value = line.get('old_value', '')
new_value = line.get('new_value', '')
search_models = [model.id]
if obj_pool._inherits:
search_models += model_pool.search(cr, uid, [('model', 'in', obj_pool._inherits.keys())])
field_id = field_pool.search(cr, uid, [('name', '=', line['name']), ('model_id', 'in', search_models)])
if field_obj._type == 'many2one':
old_value = old_value and old_value[0] or old_value
new_value = new_value and new_value[0] or new_value
vals = {
"log_id": log_id,
"field_id": field_id and field_id[0] or False,
"old_value": old_value,
"new_value": new_value,
"old_value_text": line.get('old_value_text', ''),
"new_value_text": line.get('new_value_text', ''),
"field_description": field_obj.string
}
line_id = log_line_pool.create(cr, uid, vals)
return True
def log_fct(self, cr, uid_orig, model, method, fct_src, *args, **kw):
"""
Logging function: This function is performing the logging operation
@param model: Object whose values are being changed
@param method: method to log: create, read, write, unlink, action or workflow action
@param fct_src: execute method of Object proxy
@return: Returns result as per method of Object proxy
"""
pool = pooler.get_pool(cr.dbname)
resource_pool = pool.get(model)
model_pool = pool.get('ir.model')
model_ids = model_pool.search(cr, SUPERUSER_ID, [('model', '=', model)])
model_id = model_ids and model_ids[0] or False
assert model_id, _("'%s' Model does not exist..." %(model))
model = model_pool.browse(cr, SUPERUSER_ID, model_id)
# fields to log. currently only used by log on read()
field_list = []
old_values = new_values = {}
if method == 'create':
res = fct_src(cr, uid_orig, model.model, method, *args, **kw)
if res:
res_ids = [res]
new_values = self.get_data(cr, uid_orig, pool, res_ids, model, method)
elif method == 'read':
res = fct_src(cr, uid_orig, model.model, method, *args, **kw)
if isinstance(res, dict):
records = [res]
else:
records = res
# build the res_ids and the old_values dict. Here we don't use get_data() to
# avoid performing an additional read()
res_ids = []
for record in records:
res_ids.append(record['id'])
old_values[(model.id, record['id'])] = {'value': record, 'text': record}
# log only the fields read
field_list = args[1]
elif method == 'unlink':
res_ids = args[0]
old_values = self.get_data(cr, uid_orig, pool, res_ids, model, method)
# process_data first as fct_src will unlink the record
self.process_data(cr, uid_orig, pool, res_ids, model, method, old_values, new_values, field_list)
return fct_src(cr, uid_orig, model.model, method, *args, **kw)
else: # method is write, action or workflow action
res_ids = []
if args:
res_ids = args[0]
if isinstance(res_ids, (long, int)):
res_ids = [res_ids]
if res_ids:
# store the old values into a dictionary
old_values = self.get_data(cr, uid_orig, pool, res_ids, model, method)
# process the original function, workflow trigger...
res = fct_src(cr, uid_orig, model.model, method, *args, **kw)
if method == 'copy':
res_ids = [res]
if res_ids:
# check the new values and store them into a dictionary
new_values = self.get_data(cr, uid_orig, pool, res_ids, model, method)
# compare the old and new values and create audittrail log if needed
self.process_data(cr, uid_orig, pool, res_ids, model, method, old_values, new_values, field_list)
return res
def get_data(self, cr, uid, pool, res_ids, model, method):
"""
This function simply read all the fields of the given res_ids, and also recurisvely on
all records of a x2m fields read that need to be logged. Then it returns the result in
convenient structure that will be used as comparison basis.
:param cr: the current row, from the database cursor,
:param uid: the current user’s ID. This parameter is currently not used as every
operation to get data is made as super admin. Though, it could be usefull later.
:param pool: current db's pooler object.
:param res_ids: Id's of resource to be logged/compared.
:param model: Object whose values are being changed
:param method: method to log: create, read, unlink, write, actions, workflow actions
:return: dict mapping a tuple (model_id, resource_id) with its value and textual value
{ (model_id, resource_id): { 'value': ...
'textual_value': ...
},
}
"""
data = {}
resource_pool = pool.get(model.model)
# read all the fields of the given resources in super admin mode
for resource in resource_pool.read(cr, SUPERUSER_ID, res_ids, resource_pool._all_columns):
values = {}
values_text = {}
resource_id = resource['id']
# loop on each field on the res_ids we just have read
for field in resource:
if field in ('__last_update', 'id'):
continue
values[field] = resource[field]
# get the textual value of that field for this record
values_text[field] = self.get_value_text(cr, SUPERUSER_ID, pool, resource_pool, method, field, resource[field])
field_obj = resource_pool._all_columns.get(field).column
if field_obj._type in ('one2many','many2many'):
# check if an audittrail rule apply in super admin mode
if self.check_rules(cr, SUPERUSER_ID, field_obj._obj, method):
# check if the model associated to a *2m field exists, in super admin mode
x2m_model_ids = pool.get('ir.model').search(cr, SUPERUSER_ID, [('model', '=', field_obj._obj)])
x2m_model_id = x2m_model_ids and x2m_model_ids[0] or False
assert x2m_model_id, _("'%s' Model does not exist..." %(field_obj._obj))
x2m_model = pool.get('ir.model').browse(cr, SUPERUSER_ID, x2m_model_id)
field_resource_ids = list(set(resource[field]))
if model.model == x2m_model.model:
# we need to remove current resource_id from the many2many to prevent an infinit loop
if resource_id in field_resource_ids:
field_resource_ids.remove(resource_id)
data.update(self.get_data(cr, SUPERUSER_ID, pool, field_resource_ids, x2m_model, method))
data[(model.id, resource_id)] = {'text':values_text, 'value': values}
return data
def prepare_audittrail_log_line(self, cr, uid, pool, model, resource_id, method, old_values, new_values, field_list=None):
"""
This function compares the old data (i.e before the method was executed) and the new data
(after the method was executed) and returns a structure with all the needed information to
log those differences.
:param cr: the current row, from the database cursor,
:param uid: the current user’s ID. This parameter is currently not used as every
operation to get data is made as super admin. Though, it could be usefull later.
:param pool: current db's pooler object.
:param model: model object which values are being changed
:param resource_id: ID of record to which values are being changed
:param method: method to log: create, read, unlink, write, actions, workflow actions
:param old_values: dict of values read before execution of the method
:param new_values: dict of values read after execution of the method
:param field_list: optional argument containing the list of fields to log. Currently only
used when performing a read, it could be usefull later on if we want to log the write
on specific fields only.
:return: dictionary with
* keys: tuples build as ID of model object to log and ID of resource to log
* values: list of all the changes in field values for this couple (model, resource)
return {
(model.id, resource_id): []
}
The reason why the structure returned is build as above is because when modifying an existing
record, we may have to log a change done in a x2many field of that object
"""
if field_list is None:
field_list = []
key = (model.id, resource_id)
lines = {
key: []
}
# loop on all the fields
for field_name, field_definition in pool.get(model.model)._all_columns.items():
if field_name in ('__last_update', 'id'):
continue
#if the field_list param is given, skip all the fields not in that list
if field_list and field_name not in field_list:
continue
field_obj = field_definition.column
if field_obj._type in ('one2many','many2many'):
# checking if an audittrail rule apply in super admin mode
if self.check_rules(cr, SUPERUSER_ID, field_obj._obj, method):
# checking if the model associated to a *2m field exists, in super admin mode
x2m_model_ids = pool.get('ir.model').search(cr, SUPERUSER_ID, [('model', '=', field_obj._obj)])
x2m_model_id = x2m_model_ids and x2m_model_ids[0] or False
assert x2m_model_id, _("'%s' Model does not exist..." %(field_obj._obj))
x2m_model = pool.get('ir.model').browse(cr, SUPERUSER_ID, x2m_model_id)
# the resource_ids that need to be checked are the sum of both old and previous values (because we
# need to log also creation or deletion in those lists).
x2m_old_values_ids = old_values.get(key, {'value': {}})['value'].get(field_name, [])
x2m_new_values_ids = new_values.get(key, {'value': {}})['value'].get(field_name, [])
# We use list(set(...)) to remove duplicates.
res_ids = list(set(x2m_old_values_ids + x2m_new_values_ids))
if model.model == x2m_model.model:
# we need to remove current resource_id from the many2many to prevent an infinit loop
if resource_id in res_ids:
res_ids.remove(resource_id)
for res_id in res_ids:
lines.update(self.prepare_audittrail_log_line(cr, SUPERUSER_ID, pool, x2m_model, res_id, method, old_values, new_values, field_list))
# if the value value is different than the old value: record the change
if key not in old_values or key not in new_values or old_values[key]['value'][field_name] != new_values[key]['value'][field_name]:
data = {
'name': field_name,
'new_value': key in new_values and new_values[key]['value'].get(field_name),
'old_value': key in old_values and old_values[key]['value'].get(field_name),
'new_value_text': key in new_values and new_values[key]['text'].get(field_name),
'old_value_text': key in old_values and old_values[key]['text'].get(field_name)
}
lines[key].append(data)
# On read log add current values for fields.
if method == 'read':
data={
'name': field_name,
'old_value': key in old_values and old_values[key]['value'].get(field_name),
'old_value_text': key in old_values and old_values[key]['text'].get(field_name)
}
lines[key].append(data)
return lines
def process_data(self, cr, uid, pool, res_ids, model, method, old_values=None, new_values=None, field_list=None):
"""
This function processes and iterates recursively to log the difference between the old
data (i.e before the method was executed) and the new data and creates audittrail log
accordingly.
:param cr: the current row, from the database cursor,
:param uid: the current user’s ID,
:param pool: current db's pooler object.
:param res_ids: Id's of resource to be logged/compared.
:param model: model object which values are being changed
:param method: method to log: create, read, unlink, write, actions, workflow actions
:param old_values: dict of values read before execution of the method
:param new_values: dict of values read after execution of the method
:param field_list: optional argument containing the list of fields to log. Currently only
used when performing a read, it could be usefull later on if we want to log the write
on specific fields only.
:return: True
"""
if field_list is None:
field_list = []
# loop on all the given ids
for res_id in res_ids:
# compare old and new values and get audittrail log lines accordingly
lines = self.prepare_audittrail_log_line(cr, uid, pool, model, res_id, method, old_values, new_values, field_list)
# if at least one modification has been found
for model_id, resource_id in lines:
line_model = pool.get('ir.model').browse(cr, SUPERUSER_ID, model_id).model
vals = {
'method': method,
'object_id': model_id,
'user_id': uid,
'res_id': resource_id,
}
if (model_id, resource_id) not in old_values and method not in ('copy', 'read'):
# the resource was not existing so we are forcing the method to 'create'
# (because it could also come with the value 'write' if we are creating
# new record through a one2many field)
vals.update({'method': 'create'})
if (model_id, resource_id) not in new_values and method not in ('copy', 'read'):
# the resource is not existing anymore so we are forcing the method to 'unlink'
# (because it could also come with the value 'write' if we are deleting the
# record through a one2many field)
name = old_values[(model_id, resource_id)]['value'].get('name',False)
vals.update({'method': 'unlink'})
else :
name = pool[line_model].name_get(cr, uid, [resource_id])[0][1]
vals.update({'name': name})
# create the audittrail log in super admin mode, only if a change has been detected
if lines[(model_id, resource_id)]:
log_id = pool.get('audittrail.log').create(cr, SUPERUSER_ID, vals)
model = pool.get('ir.model').browse(cr, uid, model_id)
self.create_log_line(cr, SUPERUSER_ID, log_id, model, lines[(model_id, resource_id)])
return True
def check_rules(self, cr, uid, model, method):
"""
Checks if auditrails is installed for that db and then if one rule match
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID,
@param model: value of _name of the object which values are being changed
@param method: method to log: create, read, unlink,write,actions,workflow actions
@return: True or False
"""
pool = pooler.get_pool(cr.dbname)
if 'audittrail.rule' in pool.models:
model_ids = pool.get('ir.model').search(cr, SUPERUSER_ID, [('model', '=', model)])
model_id = model_ids and model_ids[0] or False
if model_id:
rule_ids = pool.get('audittrail.rule').search(cr, SUPERUSER_ID, [('object_id', '=', model_id), ('state', '=', 'subscribed')])
for rule in pool.get('audittrail.rule').read(cr, SUPERUSER_ID, rule_ids, ['user_id','log_read','log_write','log_create','log_unlink','log_action','log_workflow']):
if len(rule['user_id']) == 0 or uid in rule['user_id']:
if rule.get('log_'+method,0):
return True
elif method not in ('default_get','read','fields_view_get','fields_get','search','search_count','name_search','name_get','get','request_get', 'get_sc', 'unlink', 'write', 'create', 'read_group', 'import_data'):
if rule['log_action']:
return True
def execute_cr(self, cr, uid, model, method, *args, **kw):
fct_src = super(audittrail_objects_proxy, self).execute_cr
if self.check_rules(cr,uid,model,method):
return self.log_fct(cr, uid, model, method, fct_src, *args, **kw)
return fct_src(cr, uid, model, method, *args, **kw)
def exec_workflow_cr(self, cr, uid, model, method, *args, **kw):
fct_src = super(audittrail_objects_proxy, self).exec_workflow_cr
if self.check_rules(cr,uid,model,'workflow'):
return self.log_fct(cr, uid, model, method, fct_src, *args, **kw)
return fct_src(cr, uid, model, method, *args, **kw)
audittrail_objects_proxy()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
abdoosh00/edx-platform | lms/djangoapps/psychometrics/management/commands/init_psychometrics.py | 22 | 2391 | #!/usr/bin/python
#
# generate pyschometrics data from tracking logs and student module data
import json
from courseware.models import StudentModule
from track.models import TrackingLog
from psychometrics.models import PsychometricData
from django.conf import settings
from django.core.management.base import BaseCommand
#db = "ocwtutor" # for debugging
#db = "default"
db = getattr(settings, 'DATABASE_FOR_PSYCHOMETRICS', 'default')
class Command(BaseCommand):
help = "initialize PsychometricData tables from StudentModule instances (and tracking data, if in SQL)."
help += "Note this is done for all courses for which StudentModule instances exist."
def handle(self, *args, **options):
# delete all pmd
#PsychometricData.objects.all().delete()
#PsychometricData.objects.using(db).all().delete()
smset = StudentModule.objects.using(db).exclude(max_grade=None)
for sm in smset:
usage_key = sm.module_state_key
if not usage_key.block_type == "problem":
continue
try:
state = json.loads(sm.state)
done = state['done']
except:
print "Oops, failed to eval state for %s (state=%s)" % (sm, sm.state)
continue
if done: # only keep if problem completed
try:
pmd = PsychometricData.objects.using(db).get(studentmodule=sm)
except PsychometricData.DoesNotExist:
pmd = PsychometricData(studentmodule=sm)
pmd.done = done
pmd.attempts = state['attempts']
# get attempt times from tracking log
uname = sm.student.username
tset = TrackingLog.objects.using(db).filter(username=uname, event_type__contains='save_problem_check')
tset = tset.filter(event_source='server')
tset = tset.filter(event__contains="'%s'" % url)
checktimes = [x.dtcreated for x in tset]
pmd.checktimes = checktimes
if not len(checktimes) == pmd.attempts:
print "Oops, mismatch in number of attempts and check times for %s" % pmd
#print pmd
pmd.save(using=db)
print "%d PMD entries" % PsychometricData.objects.using(db).all().count()
| agpl-3.0 |
wojla/kozaczko | biblio/settings.py | 1 | 2066 | # """
# Django settings for biblio project.
#
# For more information on this file, see
# https://docs.djangoproject.com/en/1.7/topics/settings/
#
# For the full list of settings and their values, see
# https://docs.djangoproject.com/en/1.7/ref/settings/
# """
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_kycwdxsi3%@&**(5b%dsvmhlb%ij8hoo-!w%yoi$qb*fj($a7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'biblio.urls'
WSGI_APPLICATION = 'biblio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pl'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| mit |
losnikitos/googleads-python-lib | examples/dfp/v201411/forecast_service/get_forecast.py | 4 | 3292 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a forecast for a prospective line item.
To determine which placements exist, run get_all_placements.py."""
from datetime import date
# Import appropriate modules from the client library.
from googleads import dfp
# Set the placement that the prospective line item will target.
TARGET_PLACEMENT_IDS = ['INSERT_PLACEMENT_IDS_HERE']
def main(client, target_placement_ids):
# Initialize appropriate service.
forecast_service = client.GetService('ForecastService', version='v201411')
# Create prospective line item.
line_item = {
'targeting': {
'inventoryTargeting': {
'targetedPlacementIds': target_placement_ids
}
},
'creativePlaceholders': [
{
'size': {
'width': '300',
'height': '250'
}
},
{
'size': {
'width': '120',
'height': '600'
}
}
],
'lineItemType': 'SPONSORSHIP',
'startDateTimeType': 'IMMEDIATELY',
'endDateTime': {
'date': {
'year': str(date.today().year + 1),
'month': '9',
'day': '30'
},
'hour': '0',
'minute': '0',
'second': '0'
},
'costType': 'CPM',
'costPerUnit': {
'currencyCode': 'USD',
'microAmount': '2000000'
},
'primaryGoal': {
'units': '50',
'unitType': 'IMPRESSIONS',
'goalType': 'DAILY'
},
'contractedUnitsBought': '100',
'creativeRotationType': 'EVEN',
'discountType': 'PERCENTAGE',
}
# Get forecast.
forecast = forecast_service.getForecast(line_item)
matched = long(forecast['matchedUnits'])
available_percent = (((long(forecast['availableUnits'])/
(matched * 1.0)) * 100)
if matched != 0 else 0)
contending_line_items = ([] if 'contendingLineItems' not in forecast
else forecast['contendingLineItems'])
# Display results.
print '%s %s matched.' % (matched, forecast['unitType'].lower())
print '%s%% %s available.' % (available_percent, forecast['unitType'].lower())
print '%d contending line items.' % len(contending_line_items)
if 'possibleUnits' in forecast:
possible_percent = (long(forecast['possibleUnits'])/(matched * 1.0)) * 100
print '%s%% %s possible' % (possible_percent, forecast['unitType'].lower())
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, TARGET_PLACEMENT_IDS)
| apache-2.0 |
Idematica/django-oscar | oscar/apps/offer/migrations/0027_add_rangeproduct.py | 11 | 17664 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
""" Manually written migration to add the priority parameter to
Range.included_products.
South suggested to drop original table and create a new table.
It would be painful for existing sites. Instead, rename table and add
the priority column. The risk when handling data should be minimal """
OLD_TABLE = u'offer_range_included_products'
NEW_TABLE = u'offer_rangeproduct'
ORDER_COL = u'display_order'
def forwards(self, orm):
db.rename_table(self.OLD_TABLE, self.NEW_TABLE)
db.add_column(
self.NEW_TABLE, self.ORDER_COL,
self.gf('django.db.models.fields.IntegerField')(default=0))
def backwards(self, orm):
db.rename_table(self.NEW_TABLE, self.OLD_TABLE)
db.delete_column(self.OLD_TABLE, self.ORDER_COL)
models = {
u'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['catalogue.AttributeEntityType']"})
},
u'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
u'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
u'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'offer.benefit': {
'Meta': {'object_name': 'Benefit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
u'offer.condition': {
'Meta': {'object_name': 'Condition'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
u'offer.conditionaloffer': {
'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
'applies_to_tax_exclusive_prices': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Benefit']"}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Condition']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_basket_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'max_global_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_user_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'num_applications': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '64'}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
u'offer.range': {
'Meta': {'object_name': 'Range'},
'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': u"orm['catalogue.Category']"}),
'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'through': u"orm['offer.RangeProduct']", 'to': u"orm['catalogue.Product']"}),
'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'})
},
u'offer.rangeproduct': {
'Meta': {'unique_together': "(('range', 'product'),)", 'object_name': 'RangeProduct'},
'display_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']"})
}
}
complete_apps = ['offer']
| bsd-3-clause |
Godiyos/python-for-android | python3-alpha/extra_modules/gdata/Crypto/PublicKey/qNEW.py | 45 | 5540 | #
# qNEW.py : The q-NEW signature algorithm.
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util.number import *
from Crypto.Hash import SHA
class error (Exception):
pass
HASHBITS = 160 # Size of SHA digests
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a qNEW key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=qNEWobj()
# Generate prime numbers p and q. q is a 160-bit prime
# number. p is another prime number (the modulus) whose bit
# size is chosen by the caller, and is generated so that p-1
# is a multiple of q.
#
# Note that only a single seed is used to
# generate p and q; if someone generates a key for you, you can
# use the seed to duplicate the key generation. This can
# protect you from someone generating values of p,q that have
# some special form that's easy to break.
if progress_func:
progress_func('p,q\n')
while (1):
obj.q = getPrime(160, randfunc)
# assert pow(2, 159L)<obj.q<pow(2, 160L)
obj.seed = S = long_to_bytes(obj.q)
C, N, V = 0, 2, {}
# Compute b and n such that bits-1 = b + n*HASHBITS
n= (bits-1) / HASHBITS
b= (bits-1) % HASHBITS ; powb=2 << b
powL1=pow(int(2), bits-1)
while C<4096:
# The V array will contain (bits-1) bits of random
# data, that are assembled to produce a candidate
# value for p.
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
p = V[n] % powb
for k in range(n-1, -1, -1):
p= (p << int(HASHBITS) )+V[k]
p = p+powL1 # Ensure the high bit is set
# Ensure that p-1 is a multiple of q
p = p - (p % (2*obj.q)-1)
# If p is still the right size, and it's prime, we're done!
if powL1<=p and isPrime(p):
break
# Otherwise, increment the counter and try again
C, N = C+1, N+n+1
if C<4096:
break # Ended early, so exit the while loop
if progress_func:
progress_func('4096 values of p tried\n')
obj.p = p
power=(p-1)/obj.q
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
# number <p-1, and g>1. g is kept; h can be discarded.
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
# x is the private key information, and is
# just a random number between 0 and q.
# y=g**x mod p, and is part of the public information.
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y=x, pow(g, x, p)
return obj
# Construct a qNEW object
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
Construct a qNEW object from a 4- or 5-tuple of numbers.
"""
obj=qNEWobj()
if len(tuple) not in [4,5]:
raise error('argument for construct() wrong length')
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class qNEWobj(pubkey.pubkey):
keydata=['p', 'q', 'g', 'y', 'x']
def _sign(self, M, K=''):
if (self.q<=K):
raise error('K is greater than q')
if M<0:
raise error('Illegal value of M (<0)')
if M>=pow(2,161):
raise error('Illegal value of M (too large)')
r=pow(self.g, K, self.p) % self.q
s=(K- (r*M*self.x % self.q)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
if M<0:
raise error('Illegal value of M (<0)')
if M<=0 or M>=pow(2,161):
return 0
v1 = pow(self.g, s, self.p)
v2 = pow(self.y, M*r, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return 160
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
return hasattr(self, 'x')
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.q, self.g, self.y))
object = qNEWobj
| apache-2.0 |
ubuntuvim/GoAgent | local/gevent-1.0rc2/greentest/test__examples.py | 2 | 1359 | import sys
import os
import glob
import time
import util
cwd = '../examples/'
ignore = ['wsgiserver.py', 'wsgiserver_ssl.py', 'webproxy.py', 'webpy.py']
if sys.platform == 'win32':
ignore += ['geventsendfile.py', 'psycopg2_pool.py']
ignore += [x[14:] for x in glob.glob('test__example_*.py')]
default_time_range = (2, 4)
time_ranges = {
'concurrent_download.py': (0, 30),
'geventsendfile.py': (0, 4),
'processes.py': (0, 4)}
def main(tests=None):
if not tests:
tests = set(os.path.basename(x) for x in glob.glob('../examples/*.py'))
tests = sorted(tests)
failed = []
for filename in tests:
if filename in ignore:
continue
min_time, max_time = time_ranges.get(filename, default_time_range)
start = time.time()
if util.run([sys.executable, '-u', filename], timeout=max_time, cwd=cwd):
failed.append(filename)
else:
took = time.time() - start
if took < min_time:
util.log('! Failed example %s: exited too quickly, after %.1fs (expected %.1fs)', filename, took, min_time)
failed.append(filename)
if failed:
util.log('! Failed examples:\n! - %s', '\n! - '.join(failed))
sys.exit(1)
if not tests:
sys.exit('No tests.')
if __name__ == '__main__':
main()
| mit |
dwaynebailey/translate | translate/lang/tr.py | 3 | 1065 | # -*- coding: utf-8 -*-
#
# Copyright 2009,2013 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Turkish language.
"""
from __future__ import unicode_literals
from translate.lang import common
class tr(common.Common):
"""This class represents Turkish."""
validaccel = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890" + "ığüşöçĞÜŞİÖÇ"
| gpl-2.0 |
beingzy/user_recommender_framework | network_simulator/GuidedUserClickSimulator.py | 1 | 1477 | """ objective introduction
Author: Yi Zhang <beingzy@gmail.com>
Date: 2016/05/09
"""
from .UserClickSimulator import UserClickSimulatorMixin
def convert_pair_dictionary(user_connections):
""" translate user pairs into user-wise dictionary
"""
user_conn_dict = {}
for ii, (uid_a, uid_b) in enumerate(user_connections):
if uid_a in user_conn_dict:
user_conn_dict[uid_a].append(uid_b)
else:
user_conn_dict[uid_a] = [uid_b]
if uid_b in user_conn_dict:
user_conn_dict[uid_b].append(uid_a)
else:
user_conn_dict[uid_b] = [uid_a]
return user_conn_dict
class GuidedUserClickSimulator(UserClickSimulatorMixin):
def __init__(self, reference_user_connections):
"""
Parameters:
==========
reference_user_connections: {matrix-like} (n, 2)
pair of users to represent true status of social network
"""
reference_user_connections = convert_pair_dictionary(reference_user_connections)
self._ref_user_connections = reference_user_connections
def click(self, target_user_id, rec_list):
known_user_conns = self._ref_user_connections[target_user_id]
accepted = []
rejected = []
for ii, uid in enumerate(rec_list):
if uid in known_user_conns:
accepted.append(uid)
else:
rejected.append(uid)
return accepted, rejected
| gpl-3.0 |
geoenvo/geonode | geonode/contrib/geosites/post_settings.py | 21 | 3197 | # flake8: noqa
##### Settings to be included last
###############################################
# Master Geosite settings
# These settings are called at/near the end of a GeoSite settings
# to finalize some derived settings
###############################################
# geonode local_settings
try:
# load in local_settings from system installed geonode
execfile(os.path.join(GEONODE_ROOT, 'local_settings.py'))
except:
# there are no system geonode local_settings to import
pass
# master local_settings
try:
# load in local_settings (usually for setting SITEURL and DATABASES for production)
execfile(os.path.join(SITE_ROOT, '../', 'local_settings.py'))
except:
# there are no master local_settings to import
pass
# site local_settings
try:
# load in local_settings (usually for setting SITEURL and DATABASES for production)
execfile(os.path.join(SITE_ROOT, 'local_settings.py'))
except:
# there are no site local_settings to import
pass
OGC_SERVER['default']['LOCATION'] = GEOSERVER_URL
#OGC_SERVER['default']['LOCATION'] = os.path.join(SITEURL, 'geoserver/')
OGC_SERVER['default']['PUBLIC_LOCATION'] = os.path.join(SITEURL, 'geoserver/')
CATALOGUE['default']['URL'] = '%scatalogue/csw' % SITEURL
PYCSW['CONFIGURATION']['metadata:main']['provider_url'] = SITEURL
LOCAL_GEOSERVER['source']['url'] = OGC_SERVER['default']['PUBLIC_LOCATION'] + 'wms'
# Directories to search for templates
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates/'),
os.path.join(PROJECT_ROOT, 'templates/'),
os.path.join(GEONODE_ROOT, 'templates/'),
)
# Directories which hold static files
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'static/'),
os.path.join(PROJECT_ROOT, 'static/'),
os.path.join(GEONODE_ROOT, 'static/')
)
# Update databases if site has own database
if SITE_DATABASES:
DATABASES.update(SITE_DATABASES)
# Update apps if site has own apps
if SITE_APPS:
INSTALLED_APPS += SITE_APPS
# Put static files in root
STATIC_ROOT = os.path.join(SERVE_PATH, 'static')
# Put media files in root
MEDIA_ROOT = os.path.join(SERVE_PATH, 'uploaded')
#OGC_SERVER['default']['LOCATION'] = os.path.join(GEOSERVER_URL, 'geoserver/')
# add datastore if defined
if DATASTORE in DATABASES.keys():
OGC_SERVER['default']['DATASTORE'] = DATASTORE
# If using nginx/gunicorn this should be added
# add gunicorn logging
# LOGGING['handlers']['gunicorn'] = {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.RotatingFileHandler',
# 'formatter': 'verbose',
# 'filename': '/geo/logs/gunicorn.errors',
# }
# LOGGING['loggers']['gunicorn'] = {
# 'level': 'DEBUG',
# 'handlers': ['gunicorn'],
# 'propagate': True,
# }
# DEBUG_TOOLBAR can interfere with Django - keep it off until needed
if DEBUG_TOOLBAR:
DEBUG_TOOLBAR_PATCH_SETTINGS = False
def show_if_superuser(request):
return True if request.user.is_superuser else False
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': 'cdesign.settings.show_if_superuser',
}
| gpl-3.0 |
tumf/p2pool | p2pool/bitcoin/height_tracker.py | 227 | 4678 | from twisted.internet import defer
from twisted.python import log
import p2pool
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral, forest, jsonrpc, variable
class HeaderWrapper(object):
__slots__ = 'hash previous_hash'.split(' ')
@classmethod
def from_header(cls, header):
return cls(bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header)), header['previous_block'])
def __init__(self, hash, previous_hash):
self.hash, self.previous_hash = hash, previous_hash
class HeightTracker(object):
'''Point this at a factory and let it take care of getting block heights'''
def __init__(self, best_block_func, factory, backlog_needed):
self._best_block_func = best_block_func
self._factory = factory
self._backlog_needed = backlog_needed
self._tracker = forest.Tracker()
self._watch1 = self._factory.new_headers.watch(self._heard_headers)
self._watch2 = self._factory.new_block.watch(self._request)
self._requested = set()
self._clear_task = deferral.RobustLoopingCall(self._requested.clear)
self._clear_task.start(60)
self._last_notified_size = 0
self.updated = variable.Event()
self._think_task = deferral.RobustLoopingCall(self._think)
self._think_task.start(15)
self._think2_task = deferral.RobustLoopingCall(self._think2)
self._think2_task.start(15)
def _think(self):
try:
highest_head = max(self._tracker.heads, key=lambda h: self._tracker.get_height_and_last(h)[0]) if self._tracker.heads else None
if highest_head is None:
return # wait for think2
height, last = self._tracker.get_height_and_last(highest_head)
if height < self._backlog_needed:
self._request(last)
except:
log.err(None, 'Error in HeightTracker._think:')
def _think2(self):
self._request(self._best_block_func())
def _heard_headers(self, headers):
changed = False
for header in headers:
hw = HeaderWrapper.from_header(header)
if hw.hash in self._tracker.items:
continue
changed = True
self._tracker.add(hw)
if changed:
self.updated.happened()
self._think()
if len(self._tracker.items) >= self._last_notified_size + 100:
print 'Have %i/%i block headers' % (len(self._tracker.items), self._backlog_needed)
self._last_notified_size = len(self._tracker.items)
@defer.inlineCallbacks
def _request(self, last):
if last in self._tracker.items:
return
if last in self._requested:
return
self._requested.add(last)
(yield self._factory.getProtocol()).send_getheaders(version=1, have=[], last=last)
def get_height_rel_highest(self, block_hash):
# callers: highest height can change during yields!
best_height, best_last = self._tracker.get_height_and_last(self._best_block_func())
height, last = self._tracker.get_height_and_last(block_hash)
if last != best_last:
return -1000000000 # XXX hack
return height - best_height
@defer.inlineCallbacks
def get_height_rel_highest_func(bitcoind, factory, best_block_func, net):
if '\ngetblock ' in (yield deferral.retry()(bitcoind.rpc_help)()):
@deferral.DeferredCacher
@defer.inlineCallbacks
def height_cacher(block_hash):
try:
x = yield bitcoind.rpc_getblock('%x' % (block_hash,))
except jsonrpc.Error_for_code(-5): # Block not found
if not p2pool.DEBUG:
raise deferral.RetrySilentlyException()
else:
raise
defer.returnValue(x['blockcount'] if 'blockcount' in x else x['height'])
best_height_cached = variable.Variable((yield deferral.retry()(height_cacher)(best_block_func())))
def get_height_rel_highest(block_hash):
this_height = height_cacher.call_now(block_hash, 0)
best_height = height_cacher.call_now(best_block_func(), 0)
best_height_cached.set(max(best_height_cached.value, this_height, best_height))
return this_height - best_height_cached.value
else:
get_height_rel_highest = HeightTracker(best_block_func, factory, 5*net.SHARE_PERIOD*net.CHAIN_LENGTH/net.PARENT.BLOCK_PERIOD).get_height_rel_highest
defer.returnValue(get_height_rel_highest)
| gpl-3.0 |
a-b/PopClip-Extensions | source/OneNote/requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| mit |
yeldartoktasynov/app-landing-page | vendor/bundle/ruby/2.2.0/gems/pygments.rb-0.6.3/vendor/pygments-main/pygments/styles/pastie.py | 135 | 2473 | # -*- coding: utf-8 -*-
"""
pygments.styles.pastie
~~~~~~~~~~~~~~~~~~~~~~
Style similar to the `pastie`_ default style.
.. _pastie: http://pastie.caboo.se/
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class PastieStyle(Style):
"""
Style similar to the pastie default style.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: '#888888',
Comment.Preproc: 'bold #cc0000',
Comment.Special: 'bg:#fff0f0 bold #cc0000',
String: 'bg:#fff0f0 #dd2200',
String.Regex: 'bg:#fff0ff #008800',
String.Other: 'bg:#f0fff0 #22bb22',
String.Symbol: '#aa6600',
String.Interpol: '#3333bb',
String.Escape: '#0044dd',
Operator.Word: '#008800',
Keyword: 'bold #008800',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#888888',
Name.Class: 'bold #bb0066',
Name.Exception: 'bold #bb0066',
Name.Function: 'bold #0066bb',
Name.Property: 'bold #336699',
Name.Namespace: 'bold #bb0066',
Name.Builtin: '#003388',
Name.Variable: '#336699',
Name.Variable.Class: '#336699',
Name.Variable.Instance: '#3333bb',
Name.Variable.Global: '#dd7700',
Name.Constant: 'bold #003366',
Name.Tag: 'bold #bb0066',
Name.Attribute: '#336699',
Name.Decorator: '#555555',
Name.Label: 'italic #336699',
Number: 'bold #0000DD',
Generic.Heading: '#333',
Generic.Subheading: '#666',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| mit |
ask/python-github2 | github2/teams.py | 1 | 3026 | # Copyright (C) 2011-2012 James Rowe <jnrowe@gmail.com>
# Patryk Zawadzki <patrys@pld-linux.org>
#
# This file is part of python-github2, and is made available under the 3-clause
# BSD license. See LICENSE for the full details.
from github2.core import BaseData, GithubCommand, Attribute, requires_auth
from github2.repositories import Repository
from github2.users import User
class Team(BaseData):
"""Team container.
.. versionadded:: 0.4.0
"""
id = Attribute("The team id")
name = Attribute("Name of the team")
permission = Attribute("Permissions of the team")
def __repr__(self):
return "<Team: %s>" % self.name
class Teams(GithubCommand):
"""GitHub API teams functionality.
.. versionadded:: 0.4.0
"""
domain = "teams"
def show(self, team_id):
"""Get information on team_id.
:param int team_id: team to get information for
"""
return self.get_value(str(team_id), filter="team", datatype=Team)
def members(self, team_id):
"""Get list of all team members.
:param int team_id: team to get information for
"""
return self.get_values(str(team_id), "members", filter="users",
datatype=User)
@requires_auth
def add_member(self, team_id, username):
"""Add a new member to a team.
:param int team_id: team to add new member to
:param str username: GitHub username to add to team
"""
return self.get_values(str(team_id), 'members', method='POST',
post_data={'name': username}, filter='users',
datatype=User)
def repositories(self, team_id):
"""Get list of all team repositories.
:param int team_id: team to get information for
"""
return self.get_values(str(team_id), "repositories",
filter="repositories", datatype=Repository)
@requires_auth
def add_project(self, team_id, project):
"""Add a project to a team.
:param int team_id: team to add repository to
:param str project: GitHub project
"""
if isinstance(project, Repository):
project = project.project
return self.get_values(str(team_id), "repositories", method="POST",
post_data={'name': project},
filter="repositories", datatype=Repository)
@requires_auth
def remove_project(self, team_id, project):
"""Remove a project to a team.
:param int team_id: team to remove project from
:param str project: GitHub project
"""
if isinstance(project, Repository):
project = project.project
return self.get_values(str(team_id), "repositories", method="DELETE",
post_data={'name': project},
filter="repositories", datatype=Repository)
| bsd-3-clause |
windyuuy/opera | chromium/src/tools/telemetry/telemetry/page/page.py | 3 | 4481 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import time
import urlparse
from telemetry.core import util
def _UrlPathJoin(*args):
"""Joins each path in |args| for insertion into a URL path.
This is distinct from os.path.join in that:
1. Forward slashes are always used.
2. Paths beginning with '/' are not treated as absolute.
For example:
_UrlPathJoin('a', 'b') => 'a/b'
_UrlPathJoin('a/', 'b') => 'a/b'
_UrlPathJoin('a', '/b') => 'a/b'
_UrlPathJoin('a/', '/b') => 'a/b'
"""
if not args:
return ''
if len(args) == 1:
return str(args[0])
else:
args = [str(arg).replace('\\', '/') for arg in args]
work = [args[0]]
for arg in args[1:]:
if not arg:
continue
if arg.startswith('/'):
work.append(arg[1:])
else:
work.append(arg)
joined = reduce(os.path.join, work)
return joined.replace('\\', '/')
class Page(object):
def __init__(self, url, page_set, attributes=None, base_dir=None):
parsed_url = urlparse.urlparse(url)
if not parsed_url.scheme:
abspath = os.path.abspath(os.path.join(base_dir, parsed_url.path))
if os.path.exists(abspath):
url = 'file://%s' % os.path.abspath(os.path.join(base_dir, url))
else:
raise Exception('URLs must be fully qualified: %s' % url)
self.url = url
self.page_set = page_set
self.base_dir = base_dir
# These attributes can be set dynamically by the page.
self.credentials = None
self.disabled = False
self.script_to_evaluate_on_commit = None
if attributes:
for k, v in attributes.iteritems():
setattr(self, k, v)
def __getattr__(self, name):
if self.page_set and hasattr(self.page_set, name):
return getattr(self.page_set, name)
raise AttributeError()
@property
def serving_dirs_and_file(self):
parsed_url = urlparse.urlparse(self.url)
path = _UrlPathJoin(self.base_dir, parsed_url.netloc, parsed_url.path)
if hasattr(self.page_set, 'serving_dirs'):
url_base_dir = os.path.commonprefix(self.page_set.serving_dirs)
base_path = _UrlPathJoin(self.base_dir, url_base_dir)
return ([_UrlPathJoin(self.base_dir, d)
for d in self.page_set.serving_dirs],
path.replace(base_path, ''))
return os.path.split(path)
# A version of this page's URL that's safe to use as a filename.
@property
def url_as_file_safe_name(self):
# Just replace all special characters in the url with underscore.
return re.sub('[^a-zA-Z0-9]', '_', self.display_url)
@property
def display_url(self):
if self.url.startswith('http'):
return self.url
url_paths = ['/'.join(p.url.strip('/').split('/')[:-1])
for p in self.page_set
if p.url.startswith('file://')]
common_prefix = os.path.commonprefix(url_paths)
return self.url[len(common_prefix):].strip('/')
@property
def archive_path(self):
return self.page_set.WprFilePathForPage(self)
def __str__(self):
return self.url
def WaitToLoad(self, tab, timeout, poll_interval=0.1):
Page.WaitForPageToLoad(self, tab, timeout, poll_interval)
# TODO(dtu): Remove this method when no page sets use a click interaction
# with a wait condition. crbug.com/168431
@staticmethod
def WaitForPageToLoad(obj, tab, timeout, poll_interval=0.1):
"""Waits for various wait conditions present in obj."""
if hasattr(obj, 'wait_seconds'):
time.sleep(obj.wait_seconds)
if hasattr(obj, 'wait_for_element_with_text'):
callback_code = 'function(element) { return element != null; }'
util.WaitFor(
lambda: util.FindElementAndPerformAction(
tab, obj.wait_for_element_with_text, callback_code),
timeout, poll_interval)
if hasattr(obj, 'wait_for_element_with_selector'):
util.WaitFor(lambda: tab.EvaluateJavaScript(
'document.querySelector(\'' + obj.wait_for_element_with_selector +
'\') != null'), timeout, poll_interval)
if hasattr(obj, 'post_navigate_javascript_to_execute'):
tab.EvaluateJavaScript(obj.post_navigate_javascript_to_execute)
if hasattr(obj, 'wait_for_javascript_expression'):
util.WaitFor(
lambda: tab.EvaluateJavaScript(obj.wait_for_javascript_expression),
timeout, poll_interval)
| bsd-3-clause |
emidln/django_roa | env/lib/python2.7/site-packages/PIL/FitsStubImagePlugin.py | 40 | 1643 | #
# The Python Imaging Library
# $Id$
#
# FITS stub adapter
#
# Copyright (c) 1998-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
_handler = None
##
# Install application-specific FITS image handler.
#
# @param handler Handler object.
def register_handler(handler):
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix):
return prefix[:6] == "SIMPLE"
class FITSStubImageFile(ImageFile.StubImageFile):
format = "FITS"
format_description = "FITS"
def _open(self):
offset = self.fp.tell()
if not _accept(self.fp.read(6)):
raise SyntaxError("Not a FITS file")
# FIXME: add more sanity checks here; mandatory header items
# include SIMPLE, BITPIX, NAXIS, etc.
self.fp.seek(offset)
# make something up
self.mode = "F"
self.size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("FITS save handler not installed")
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(FITSStubImageFile.format, FITSStubImageFile, _accept)
Image.register_save(FITSStubImageFile.format, _save)
Image.register_extension(FITSStubImageFile.format, ".fit")
Image.register_extension(FITSStubImageFile.format, ".fits")
| bsd-3-clause |
acutesoftware/AIKIF | aikif/.z_prototype/tools.py | 1 | 4110 | # coding: utf-8
# tools.py written by Duncan Murray 20/3/2014 (C) Acute Software
# Script to configure the functional toolbox of AIKIF
import os
import sys
import time
from random import randint
import aikif.toolbox.Toolbox as mod_tool
import aikif.config as mod_cfg
aikif_dir = mod_cfg.core_folder # os.path.dirname(os.path.abspath(__file__))
fldr = os.path.abspath(aikif_dir + os.sep + "aikif" + os.sep + "toolbox" )
print('tools.py : fldr = ' + fldr)
sys.path.append(fldr) # YUCK - but doesnt seem to work otherwise
def main():
"""
Script to define tools, which currently all are functions in
python programs.
TODO - this should be registered via cls_log in the program source
# attempt at imported tools via AIKIF, but doesnt work
# (is better to leave as full folder names anyway for
# external programs
tl.add({'file':'aikif.toolbox.maths_ml_algorithms.py', 'function':'ml_entropy', 'args':['list'], 'return':['float']})
tl.add({'file':'aikif.toolbox.test_tool.py', 'function':'get_min_even_num', 'args':['list'], 'return':['int']})
tl.add({'file':'aikif.toolbox.test_tool.py', 'function':'test_function', 'args':[], 'return':['int']})
progName = 'aikif.toolbox.solve_knapsack.py'
tl.add({'file':progName, 'function':'solve_greedy_trivial', 'args':['int', 'dict'], 'return':['int', 'list']})
tl.add({'file':progName, 'function':'solve_smallest_items_first', 'args':['int', 'dict'], 'return':['int', 'list']})
tl.add({'file':progName, 'function':'solve_expensive_items_first', 'args':['int', 'dict'], 'return':['int', 'list']})
"""
tl = mod_tool.Toolbox()
tl.add({'file':fldr + os.sep + 'maths_ml_algorithms.py', 'function':'ml_entropy', 'args':['list'], 'return':['float']})
tl.add({'file':fldr + os.sep + 'test_tool.py', 'function':'get_min_even_num', 'args':['list'], 'return':['int']})
tl.add({'file':fldr + os.sep + 'test_tool.py', 'function':'test_function', 'args':[], 'return':['int']})
progName = fldr + os.sep + 'solve_knapsack.py'
tl.add({'file':progName, 'function':'solve_greedy_trivial', 'args':['int', 'dict'], 'return':['int', 'list']})
tl.add({'file':progName, 'function':'solve_smallest_items_first', 'args':['int', 'dict'], 'return':['int', 'list']})
tl.add({'file':progName, 'function':'solve_expensive_items_first', 'args':['int', 'dict'], 'return':['int', 'list']})
tl.add({'file':progName, 'function':'solve_value_density', 'args':['int', 'dict'], 'return':['int', 'list']})
tl.add({'file':progName, 'function':'main', 'args':['int', 'dict'], 'return':['int', 'list']})
progName = fldr + os.sep + 'game_board_utils.py'
tl.add({'file':progName, 'function':'build_board_2048', 'args':[], 'return':['list']})
tl.add({'file':progName, 'function':'build_board_checkers', 'args':[], 'return':['list']})
progName = fldr + os.sep + 'crypt_utils.py'
tl.add({'file':progName, 'function':'solve', 'args':['string'], 'return':['string']})
tl.add({'file':aikif_dir + os.sep + 'dataTools' + os.sep + 'if_excel.py', 'function':'xls_to_csv', 'args':['string'], 'return':['string']})
tl.save('tools.txt')
args = [1,2,3,4,5,6,7]
for ndx in range(0,1):
testResult = tl.run(tl.lstTools[ndx], args, 'N')
print('Ran test on ', os.path.basename(tl.lstTools[ndx]['file']) + '->' + tl.lstTools[ndx]['function'], ' Result = ', testResult)
run_multiple(tl, tl.lstTools[0], 5)
def run_multiple(t1, tool, numIterations, silent='Y'):
results = []
start_time = time.time()
for _ in range(0,numIterations):
args = [randint(10,99) for _ in range(1,randint(2,5))]
testname = tool['file'] + '.' + tool['function']
#print('testname = ', testname)
answer = t1.run(tool, args, silent)
results.append({'tool':testname, 'args':args, 'result':answer})
print("Method1 = ", time.time() - start_time, "seconds")
print('Done processing ' + str(len(results)) + ' calculations')
return results
if __name__ == '__main__':
main()
| gpl-3.0 |
michalochman/complex-networks | code/calc-users-topics-count.py | 1 | 1552 | from collections import OrderedDict
import os
from multiprocessing import Process, Queue, JoinableQueue, cpu_count
import math
import MySQLdb
from MySQLdb.cursors import SSDictCursor
from scur_mgr import DB_HOST, DB_USER, DB_PASS, DB_NAME, DB_TABLE_RELATIONSHIPS
__DIR__ = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
db = MySQLdb.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASS, db=DB_NAME, cursorclass=SSDictCursor)
cur = db.cursor()
data_dir = '%s/results/' % __DIR__
users = {}
users_set = set()
limit = 1000000
for i in range(0, 19):
cur.execute('SELECT word_id, user_id FROM %s r WHERE skip = "n" LIMIT %s,%s' % (DB_TABLE_RELATIONSHIPS, i*limit, limit))
row = cur.fetchone()
while row is not None:
user_id = row.get('user_id')
word_id = row.get('word_id')
if user_id not in users:
users[user_id] = set()
else:
users[user_id].add(word_id)
row = cur.fetchone()
for user_id, uwords in users.iteritems():
degree = len(uwords)
if degree >= 1000:
users_set.add(user_id)
print len(users_set)/float(len(users))
# single_topics = 0
# for user_id in users_set:
# cur.execute('SELECT COUNT(DISTINCT topic_id) topics FROM headfi_posts WHERE user_id = %s' % user_id)
# topics = cur.fetchall()[0]['topics']
# if topics == 1:
# single_topics += 1
#
# print single_topics/float(len(users_set)) | mit |
Hokutosei/kubernetes | cluster/juju/charms/trusty/kubernetes-master/hooks/setup.py | 213 | 1409 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pre_install():
"""
Do any setup required before the install hook.
"""
install_charmhelpers()
install_path()
def install_charmhelpers():
"""
Install the charmhelpers library, if not present.
"""
try:
import charmhelpers # noqa
except ImportError:
import subprocess
subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
subprocess.check_call(['pip', 'install', 'charmhelpers'])
def install_path():
"""
Install the path.py library, when not present.
"""
try:
import path # noqa
except ImportError:
import subprocess
subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
subprocess.check_call(['pip', 'install', 'path.py'])
| apache-2.0 |
soundcloud/selenium | py/selenium/webdriver/firefox/webdriver.py | 5 | 2988 | #!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import shutil
import socket
import sys
from .firefox_binary import FirefoxBinary
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.extension_connection import ExtensionConnection
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30,
capabilities=None, proxy=None):
self.binary = firefox_binary
self.profile = firefox_profile
if self.profile is None:
self.profile = FirefoxProfile()
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if self.binary is None:
self.binary = FirefoxBinary()
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX
if proxy is not None:
proxy.add_to_capabilities(capabilities)
RemoteWebDriver.__init__(self,
command_executor=ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout),
desired_capabilities=capabilities,
keep_alive=True)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except (http_client.BadStatusLine, socket.error):
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
self.binary.kill()
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
| apache-2.0 |
IDMIPPM/reliability-aware-resynthesis | resynthesis/resynthesis_create_subckts.py | 1 | 39197 | # coding: utf-8
__author__ = 'IDM.IPPM (Roman Solovyev)'
import os
import subprocess
import re
import random
from resynthesis.resynthesis_external_stats import get_project_directory
import resynthesis as sa
# Получает список уникальных имен узлов из структуры merge
def getUniqueNames(m):
lst = []
for o in m:
for term in m[o]:
for name in term:
if lst.count(name) == 0:
lst.append(name)
return lst
def createMergeStructure(q, ckt):
merge = dict()
for o in q:
oname = ckt.__outputs__[o]
merge[oname] = []
for term in q[o]:
lst = []
for i in range(0, ckt.inputs()):
if (term[i] == '1'):
lst.append(ckt.__inputs__[i])
elif (term[i] == '0'):
lst.append("!" + ckt.__inputs__[i])
merge[oname].append(lst)
return merge
# q - reduced truth table, ckt - initial ckt
# Method 1: сначала объединяем наиболее частые AND пары,
# потом все это объединяем через OR
def createSubckt_method1(q, ckt):
sckt = sa.scheme_alt()
for i in ckt.__inputs__:
sckt.__inputs__.append(i)
for o in ckt.__outputs__:
sckt.__outputs__.append(o)
# Делаем подходящую структуру данных
merge = createMergeStructure(q, ckt)
# print(merge)
# Ищем самые часто встречающиеся пары и заменяем их
# Приоритет парам либо с отрицаниями, либо без них
# Цикл заканчивается когда все термы сжимаются до одного элемента
num = 0
while (1):
uniqueList = getUniqueNames(merge)
total = len(uniqueList)
# Создаем проверочный массив
check = [0] * total
for i in range(total):
check[i] = [0] * total
# Считаем пары
for i in range(0, total):
for j in range(i+1, total):
n1 = uniqueList[i]
n2 = uniqueList[j]
check[i][j] = 0
for o in merge:
for term in merge[o]:
if term.count(n1) > 0 and term.count(n2) > 0:
check[i][j] = check[i][j] + 1
# Выбираем самую частую пару
max = 0
maxi = -1
maxj = -1
for i in range(0, total):
for j in range(i+1, total):
if check[i][j] > 0:
if check[i][j] > max:
max = check[i][j]
maxi = i
maxj = j
elif check[i][j] == max:
if (uniqueList[i][0] != '!' and uniqueList[j][0] != '!') or (uniqueList[i][0] == '!' and uniqueList[j][0] == '!'):
maxi = i
maxj = j
# Если пары ещё есть добавляем элемент в схемы и заменяем пару в списке на один элемент
# Если пар нет выходим из цикла
if (max > 0):
n1 = uniqueList[maxi]
n2 = uniqueList[maxj]
newname = "INT_{0}".format(num)
num = num + 1
if (n1[0] != '!' and n2[0] != '!'):
sckt.__elements__[newname] = ('AND', [n1, n2])
elif (n1[0] == '!' and n2[0] == '!'):
sckt.__elements__[newname] = ('NOR', [n1[1:], n2[1:]])
elif (n1[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
sckt.__elements__[newname1] = ('INV', [n1[1:]])
sckt.__elements__[newname] = ('AND', [newname1, n2])
elif (n2[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
sckt.__elements__[newname1] = ('INV', [n2[1:]])
sckt.__elements__[newname] = ('AND', [newname1, n1])
# Заменяем пары в термах
for o in merge:
for term in merge[o]:
if term.count(n1) > 0 and term.count(n2) > 0:
term.remove(n1)
term.remove(n2)
term.append(newname)
else:
break
check = []
# print(sckt)
# print(merge)
# Цикл, который попарно объединяет термы в рамках каждого выхода
while (1):
uniqueList = getUniqueNames(merge)
total = len(uniqueList)
# Создаем проверочный массив
check = [0] * total
for i in range(total):
check[i] = [0] * total
# Считаем пары (надо проверить какая пара чаще встречается среди всех выходов)
for i in range(0, total):
for j in range(i+1, total):
n1 = uniqueList[i]
n2 = uniqueList[j]
check[i][j] = 0
for o in merge:
flag = 0
for term in merge[o]:
if term.count(n1) > 0:
flag = flag + 1
for term in merge[o]:
if term.count(n2) > 0:
flag = flag + 1
if flag == 2:
check[i][j] = check[i][j] + 1
# Выбираем самую частую пару
max = 0
maxi = -1
maxj = -1
for i in range(0, total):
for j in range(i+1, total):
if check[i][j] > 0:
if check[i][j] > max:
max = check[i][j]
maxi = i
maxj = j
elif check[i][j] == max:
if (uniqueList[i][0] != '!' and uniqueList[j][0] != '!') or (uniqueList[i][0] == '!' and uniqueList[j][0] == '!'):
maxi = i
maxj = j
# Если пары ещё есть добавляем элемент в схемы и заменяем пару в списке на один элемент
# Если пар нет выходим из цикла
if (max > 0):
n1 = uniqueList[maxi]
n2 = uniqueList[maxj]
newname = "INT_{0}".format(num)
num = num + 1
if (n1[0] != '!' and n2[0] != '!'):
sckt.__elements__[newname] = ('OR', [n1, n2])
elif (n1[0] == '!' and n2[0] == '!'):
sckt.__elements__[newname] = ('NAND', [n1[1:], n2[1:]])
elif (n1[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
sckt.__elements__[newname1] = ('INV', [n1[1:]])
sckt.__elements__[newname] = ('OR', [newname1, n2])
elif (n2[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
sckt.__elements__[newname1] = ('INV', [n2[1:]])
sckt.__elements__[newname] = ('OR', [newname1, n1])
# print('N1: ' + n1)
# print('N2: ' + n2)
# Заменяем пары в термах
for o in merge:
flag = 0
for term in merge[o]:
if term.count(n1) > 0:
flag = flag + 1
if term.count(n2) > 0:
flag = flag + 1
if (flag == 2):
for term in merge[o]:
if term.count(n1) > 0:
term.remove(n1)
continue
if term.count(n2) > 0:
term.remove(n2)
continue
merge[o].append([newname])
# Чистим список от пустых записей
for o in merge:
for term in merge[o]:
while (merge[o].count([]) > 0):
merge[o].remove([])
else:
break
check = []
# Теперь требуется заменить имена промежуточных узлов на имена реальных выходов
for o in merge:
find = merge[o][0][0]
replace = o
# Сначала меняем в поле ключей
if find in sckt.__elements__:
sckt.__elements__[replace] = sckt.__elements__[find]
del sckt.__elements__[find]
# Затем меняем в элементах
for el in sckt.__elements__:
lst = sckt.__elements__[el][1]
for key,i in enumerate(lst):
if i == find:
lst[key] = replace
# Фикс для случая когда у нас несколько выходов имеют единую логическую функцию (и часть выходов пропала при замене)
# В этом случае добавляем буферы
fix_happened = 0
for o in merge:
if o not in sckt.__elements__:
flag = 0
for el in sckt.__elements__:
lst = sckt.__elements__[el][1]
for key,i in enumerate(lst):
if i == o:
flag += 1
if flag == 0:
f1 = merge[o][0][0]
fname = ''
for o1 in merge:
if o1 in sckt.__elements__:
f2 = merge[o1][0][0]
if f1 == f2:
fname = o1
if fname == '':
print('Some unexpected error here')
break
sckt.__elements__[o] = ('BUF', [fname])
fix_happened += 1
# Фикс для случая когда выход полностью повторяет вход или его инверсию (и он теряется)
# Необходимо добавить буфер типа (BUF вход выход) или (INV вход выход)
for o in merge:
if (len(merge[o]) == 1) & (len(merge[o][0]) == 1):
if (merge[o][0][0][0] != '!') & (merge[o][0][0] in ckt.__inputs__):
sckt.__elements__[o] = ('BUF', merge[o][0])
if (merge[o][0][0][0] == '!') & (merge[o][0][0][1:] in ckt.__inputs__):
sckt.__elements__[o] = ('INV', [merge[o][0][0][1:]])
# if fix_happened > 0:
# print('Merge:')
# print(merge)
# print(sckt)
# exit()
return sckt
# q - reduced truth table, ckt - initial ckt
# Method 2: сначала объединяем наиболее частые AND пары,
# потом все это объединяем через OR
# Заменяем X&~Y слчайным образом на INV + AND или INV + NOR
# Заменяем ~X&Y слчайным образом на INV + OR или INV + NAND
def createSubckt_method2(q, ckt):
sckt = sa.scheme_alt()
for i in ckt.__inputs__:
sckt.__inputs__.append(i)
for o in ckt.__outputs__:
sckt.__outputs__.append(o)
# Делаем подходящую структуру данных
merge = createMergeStructure(q, ckt)
# Ищем самые часто встречающиеся пары и заменяем их
# Приоритет парам либо с отрицаниями, либо без них
# Цикл заканчивается когда все термы сжимаются до одного элемента
num = 0
while (1):
uniqueList = getUniqueNames(merge)
total = len(uniqueList)
# Создаем проверочный массив
check = [0] * total
for i in range(total):
check[i] = [0] * total
# Считаем пары
for i in range(0, total):
for j in range(i+1, total):
n1 = uniqueList[i]
n2 = uniqueList[j]
check[i][j] = 0
for o in merge:
for term in merge[o]:
if term.count(n1) > 0 and term.count(n2) > 0:
check[i][j] = check[i][j] + 1
# Выбираем самую частую пару
max = 0
maxi = -1
maxj = -1
for i in range(0, total):
for j in range(i+1, total):
if check[i][j] > 0:
if check[i][j] > max:
max = check[i][j]
maxi = i
maxj = j
elif check[i][j] == max:
if (uniqueList[i][0] != '!' and uniqueList[j][0] != '!') or (uniqueList[i][0] == '!' and uniqueList[j][0] == '!'):
maxi = i
maxj = j
# Если пары ещё есть добавляем элемент в схемы и заменяем пару в списке на один элемент
# Если пар нет выходим из цикла
if (max > 0):
n1 = uniqueList[maxi]
n2 = uniqueList[maxj]
newname = "INT_{0}".format(num)
num = num + 1
if (n1[0] != '!' and n2[0] != '!'):
sckt.__elements__[newname] = ('AND', [n1, n2])
elif (n1[0] == '!' and n2[0] == '!'):
sckt.__elements__[newname] = ('NOR', [n1[1:], n2[1:]])
elif (n1[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
if random.randint(0, 1) == 0:
sckt.__elements__[newname1] = ('INV', [n1[1:]])
sckt.__elements__[newname] = ('AND', [newname1, n2])
else:
sckt.__elements__[newname1] = ('INV', [n2])
sckt.__elements__[newname] = ('NOR', [newname1, n1[1:]])
elif (n2[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
if random.randint(0, 1) == 0:
sckt.__elements__[newname1] = ('INV', [n2[1:]])
sckt.__elements__[newname] = ('AND', [newname1, n1])
else:
sckt.__elements__[newname1] = ('INV', [n1])
sckt.__elements__[newname] = ('NOR', [newname1, n2[1:]])
# Заменяем пары в термах
for o in merge:
for term in merge[o]:
if term.count(n1) > 0 and term.count(n2) > 0:
term.remove(n1)
term.remove(n2)
term.append(newname)
else:
break
check = []
# print(sckt)
# print(merge)
# Цикл, который попарно объединяет термы в рамках каждого выхода
while (1):
uniqueList = getUniqueNames(merge)
total = len(uniqueList)
# Создаем проверочный массив
check = [0] * total
for i in range(total):
check[i] = [0] * total
# Считаем пары (надо проверить какая пара чаще встречается среди всех выходов)
for i in range(0, total):
for j in range(i+1, total):
n1 = uniqueList[i]
n2 = uniqueList[j]
check[i][j] = 0
for o in merge:
flag = 0
for term in merge[o]:
if term.count(n1) > 0:
flag = flag + 1
for term in merge[o]:
if term.count(n2) > 0:
flag = flag + 1
if flag == 2:
check[i][j] = check[i][j] + 1
# Выбираем самую частую пару
max = 0
maxi = -1
maxj = -1
for i in range(0, total):
for j in range(i+1, total):
if check[i][j] > 0:
if check[i][j] > max:
max = check[i][j]
maxi = i
maxj = j
elif check[i][j] == max:
if (uniqueList[i][0] != '!' and uniqueList[j][0] != '!') or (uniqueList[i][0] == '!' and uniqueList[j][0] == '!'):
maxi = i
maxj = j
# Если пары ещё есть добавляем элемент в схемы и заменяем пару в списке на один элемент
# Если пар нет выходим из цикла
if (max > 0):
n1 = uniqueList[maxi]
n2 = uniqueList[maxj]
newname = "INT_{0}".format(num)
num = num + 1
if (n1[0] != '!' and n2[0] != '!'):
sckt.__elements__[newname] = ('OR', [n1, n2])
elif (n1[0] == '!' and n2[0] == '!'):
sckt.__elements__[newname] = ('NAND', [n1[1:], n2[1:]])
elif (n1[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
if random.randint(0, 1) == 0:
sckt.__elements__[newname1] = ('INV', [n1[1:]])
sckt.__elements__[newname] = ('OR', [newname1, n2])
else:
sckt.__elements__[newname1] = ('INV', [n2])
sckt.__elements__[newname] = ('NAND', [newname1, n1[1:]])
elif (n2[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
if random.randint(0, 1) == 0:
sckt.__elements__[newname1] = ('INV', [n2[1:]])
sckt.__elements__[newname] = ('OR', [newname1, n1])
else:
sckt.__elements__[newname1] = ('INV', [n1])
sckt.__elements__[newname] = ('NAND', [newname1, n2[1:]])
# print('N1: ' + n1)
# print('N2: ' + n2)
# Заменяем пары в термах
for o in merge:
flag = 0
for term in merge[o]:
if term.count(n1) > 0:
flag = flag + 1
if term.count(n2) > 0:
flag = flag + 1
if (flag == 2):
for term in merge[o]:
if term.count(n1) > 0:
term.remove(n1)
continue
if term.count(n2) > 0:
term.remove(n2)
continue
merge[o].append([newname])
# Чистим список от пустых записей
for o in merge:
for term in merge[o]:
while (merge[o].count([]) > 0):
merge[o].remove([])
else:
break
check = []
# Теперь требуется заменить имена промежуточных узлов на имена реальных выходов
for o in merge:
find = merge[o][0][0]
replace = o
# Сначала меняем в поле ключей
if find in sckt.__elements__:
sckt.__elements__[replace] = sckt.__elements__[find]
del sckt.__elements__[find]
# Затем меняем в элементах
for el in sckt.__elements__:
lst = sckt.__elements__[el][1]
for key,i in enumerate(lst):
if i == find:
lst[key] = replace
# Фикс для случая когда у нас несколько выходов имеют единую логическую функцию (и часть выходов пропала при замене)
# В этом случае добавляем буферы
fix_happened = 0
for o in merge:
if o not in sckt.__elements__:
flag = 0
for el in sckt.__elements__:
lst = sckt.__elements__[el][1]
for key,i in enumerate(lst):
if i == o:
flag += 1
if flag == 0:
f1 = merge[o][0][0]
fname = ''
for o1 in merge:
if o1 in sckt.__elements__:
f2 = merge[o1][0][0]
if f1 == f2:
fname = o1
if fname == '':
print('Some unexpected error here')
break
sckt.__elements__[o] = ('BUF', [fname])
fix_happened += 1
# Фикс для случая когда выход полностью повторяет вход или его инверсию (и он теряется)
# Необходимо добавить буфер типа (BUF вход выход) или (INV вход выход)
for o in merge:
if (len(merge[o]) == 1) & (len(merge[o][0]) == 1):
if (merge[o][0][0][0] != '!') & (merge[o][0][0] in ckt.__inputs__):
sckt.__elements__[o] = ('BUF', merge[o][0])
if (merge[o][0][0][0] == '!') & (merge[o][0][0][1:] in ckt.__inputs__):
sckt.__elements__[o] = ('INV', [merge[o][0][0][1:]])
return sckt
# q - reduced truth table, ckt - initial ckt
# Method 3: объединяем произвольные AND пары,
# потом все это объединяем через произвольные OR пары
def createSubckt_method3(q, ckt):
sckt = sa.scheme_alt()
for i in ckt.__inputs__:
sckt.__inputs__.append(i)
for o in ckt.__outputs__:
sckt.__outputs__.append(o)
# Делаем подходящую структуру данных
merge = createMergeStructure(q, ckt)
# Ищем самые часто встречающиеся пары и заменяем их
# Приоритет парам либо с отрицаниями, либо без них
# Цикл заканчивается когда все термы сжимаются до одного элемента
num = 0
while (1):
uniqueList = getUniqueNames(merge)
total = len(uniqueList)
# Создаем проверочный массив
check = [0] * total
for i in range(total):
check[i] = [0] * total
# Считаем пары
for i in range(0, total):
for j in range(i+1, total):
n1 = uniqueList[i]
n2 = uniqueList[j]
check[i][j] = 0
for o in merge:
for term in merge[o]:
if term.count(n1) > 0 and term.count(n2) > 0:
check[i][j] = check[i][j] + 1
# Выбираем случайную пару
count = 0
for i in range(0, total):
for j in range(i+1, total):
if check[i][j] > 0:
count += 1
if count == 0:
break
numPair = random.randint(0, count-1)
count = 0
for i in range(0, total):
for j in range(i+1, total):
if check[i][j] > 0:
if count == numPair:
n1 = uniqueList[i]
n2 = uniqueList[j]
break
count += 1
newname = "INT_{0}".format(num)
num = num + 1
if (n1[0] != '!' and n2[0] != '!'):
sckt.__elements__[newname] = ('AND', [n1, n2])
elif (n1[0] == '!' and n2[0] == '!'):
sckt.__elements__[newname] = ('NOR', [n1[1:], n2[1:]])
elif (n1[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
if random.randint(0, 1) == 0:
sckt.__elements__[newname1] = ('INV', [n1[1:]])
sckt.__elements__[newname] = ('AND', [newname1, n2])
else:
sckt.__elements__[newname1] = ('INV', [n2])
sckt.__elements__[newname] = ('NOR', [newname1, n1[1:]])
elif (n2[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
if random.randint(0, 1) == 0:
sckt.__elements__[newname1] = ('INV', [n2[1:]])
sckt.__elements__[newname] = ('AND', [newname1, n1])
else:
sckt.__elements__[newname1] = ('INV', [n1])
sckt.__elements__[newname] = ('NOR', [newname1, n2[1:]])
# Заменяем пары в термах
for o in merge:
for term in merge[o]:
if term.count(n1) > 0 and term.count(n2) > 0:
term.remove(n1)
term.remove(n2)
term.append(newname)
# Цикл, который попарно объединяет термы в рамках каждого выхода
while (1):
uniqueList = getUniqueNames(merge)
total = len(uniqueList)
# Создаем проверочный массив
check = [0] * total
for i in range(total):
check[i] = [0] * total
# Считаем пары (надо проверить какая пара чаще встречается среди всех выходов)
for i in range(0, total):
for j in range(i+1, total):
n1 = uniqueList[i]
n2 = uniqueList[j]
check[i][j] = 0
for o in merge:
flag = 0
for term in merge[o]:
if term.count(n1) > 0:
flag = flag + 1
for term in merge[o]:
if term.count(n2) > 0:
flag = flag + 1
if flag == 2:
check[i][j] = check[i][j] + 1
# Выбираем случайную пару
count = 0
for i in range(0, total):
for j in range(i+1, total):
if check[i][j] > 0:
count += 1
if count == 0:
break
numPair = random.randint(0, count-1)
count = 0
for i in range(0, total):
for j in range(i+1, total):
if check[i][j] > 0:
if count == numPair:
n1 = uniqueList[i]
n2 = uniqueList[j]
break
count += 1
# Если пары ещё есть добавляем элемент в схемы и заменяем пару в списке на один элемент
newname = "INT_{0}".format(num)
num = num + 1
if (n1[0] != '!' and n2[0] != '!'):
sckt.__elements__[newname] = ('OR', [n1, n2])
elif (n1[0] == '!' and n2[0] == '!'):
sckt.__elements__[newname] = ('NAND', [n1[1:], n2[1:]])
elif (n1[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
if random.randint(0, 1) == 0:
sckt.__elements__[newname1] = ('INV', [n1[1:]])
sckt.__elements__[newname] = ('OR', [newname1, n2])
else:
sckt.__elements__[newname1] = ('INV', [n2])
sckt.__elements__[newname] = ('NAND', [newname1, n1[1:]])
elif (n2[0] == '!'):
newname1 = "INT_{0}".format(num)
num = num + 1
if random.randint(0, 1) == 0:
sckt.__elements__[newname1] = ('INV', [n2[1:]])
sckt.__elements__[newname] = ('OR', [newname1, n1])
else:
sckt.__elements__[newname1] = ('INV', [n1])
sckt.__elements__[newname] = ('NAND', [newname1, n2[1:]])
# Заменяем пары в термах
for o in merge:
flag = 0
for term in merge[o]:
if term.count(n1) > 0:
flag = flag + 1
if term.count(n2) > 0:
flag = flag + 1
if (flag == 2):
for term in merge[o]:
if term.count(n1) > 0:
term.remove(n1)
continue
if term.count(n2) > 0:
term.remove(n2)
continue
merge[o].append([newname])
# Чистим список от пустых записей
for o in merge:
for term in merge[o]:
while (merge[o].count([]) > 0):
merge[o].remove([])
# Теперь требуется заменить имена промежуточных узлов на имена реальных выходов
for o in merge:
find = merge[o][0][0]
replace = o
# Сначала меняем в поле ключей
if find in sckt.__elements__:
sckt.__elements__[replace] = sckt.__elements__[find]
del sckt.__elements__[find]
# Затем меняем в элементах
for el in sckt.__elements__:
lst = sckt.__elements__[el][1]
for key,i in enumerate(lst):
if i == find:
lst[key] = replace
# Фикс для случая когда у нас несколько выходов имеют единую логическую функцию (и часть выходов пропала при замене)
# В этом случае добавляем буферы
fix_happened = 0
for o in merge:
if o not in sckt.__elements__:
flag = 0
for el in sckt.__elements__:
lst = sckt.__elements__[el][1]
for key,i in enumerate(lst):
if i == o:
flag += 1
if flag == 0:
f1 = merge[o][0][0]
fname = ''
for o1 in merge:
if o1 in sckt.__elements__:
f2 = merge[o1][0][0]
if f1 == f2:
fname = o1
if fname == '':
print('Some unexpected error here')
break
sckt.__elements__[o] = ('BUF', [fname])
fix_happened += 1
# Фикс для случая когда выход полностью повторяет вход или его инверсию (и он теряется)
# Необходимо добавить буфер типа (BUF вход выход) или (INV вход выход)
for o in merge:
if (len(merge[o]) == 1) & (len(merge[o][0]) == 1):
if (merge[o][0][0][0] != '!') & (merge[o][0][0] in ckt.__inputs__):
sckt.__elements__[o] = ('BUF', merge[o][0])
if (merge[o][0][0][0] == '!') & (merge[o][0][0][1:] in ckt.__inputs__):
sckt.__elements__[o] = ('INV', [merge[o][0][0][1:]])
return sckt
def createTMRCirc(ckt):
out = sa.scheme_alt()
out.__inputs__ = ckt.__inputs__
out.__outputs__ = ckt.__outputs__
# Делаем 3 копии схемы
for copy in range(1,4):
for el in ckt.__elements__:
el1 = "{}_COPY_{}".format(el, copy)
data = ckt.__elements__[el]
eltype = data[0]
lst = []
for d in data[1]:
if d not in ckt.__inputs__:
lst.append("{}_COPY_{}".format(d, copy))
else:
lst.append(d)
out.__elements__[el1] = (eltype, lst)
# Элемент голосования на каждом выходе
for o in ckt.__outputs__:
out.__elements__["{}_AND1".format(o)] = ('AND', ["{}_COPY_1".format(o), "{}_COPY_2".format(o)])
out.__elements__["{}_AND2".format(o)] = ('AND', ["{}_COPY_2".format(o), "{}_COPY_3".format(o)])
out.__elements__["{}_AND3".format(o)] = ('AND', ["{}_COPY_1".format(o), "{}_COPY_3".format(o)])
out.__elements__["{}_OR1".format(o)] = ('OR', ["{}_AND1".format(o), "{}_AND2".format(o)])
out.__elements__[o] = ('OR', ["{}_AND3".format(o), "{}_OR1".format(o)])
return out
def get_verilog_type(tp):
if tp == 'INV':
return 'not'
if tp == 'BUF':
return 'buf'
if tp == 'AND':
return 'and'
if tp == 'NAND':
return 'nand'
if tp == 'OR':
return 'or'
if tp == 'NOR':
return 'nor'
if tp == 'XOR':
return 'xor'
if tp == 'XNOR':
return 'xnor'
return 'UNKNOWN'
def print_circuit_in_verilog_file(circ, circname, file_name):
f = open(file_name, 'w') # 'x'
str = 'module ' + circname + ' ('
for i in range(circ.inputs()):
str += (circ.__inputs__[i] + ', ')
for i in range(circ.outputs()):
if i > 0:
str += ', '
str += circ.__outputs__[i]
str += ');\n'
f.write(str)
for i in range(circ.inputs()):
f.write("\tinput " + circ.__inputs__[i] + ';\n')
for i in range(circ.outputs()):
f.write("\toutput " + circ.__outputs__[i] + ';\n')
wires = []
# Добавляем необъявленные выходы в список WIRE
for el in circ.__elements__:
out = el
if out in circ.__inputs__:
continue
if out in circ.__outputs__:
continue
if out in wires:
continue
wires.append(out)
# Добавляем необъявленные входы элементов в список WIRE
for el in circ.__elements__:
inps = circ.__elements__[el][1]
for inp in inps:
if inp in circ.__inputs__:
continue
if inp in circ.__outputs__:
continue
if inp in wires:
continue
wires.append(inp)
for w in wires:
f.write("\twire " + w + ';\n')
elindex = 0
for el in circ.__elements__:
out = el
type = get_verilog_type(circ.__elements__[el][0])
inps = circ.__elements__[el][1]
str = "\t" + type + " " + "e" + elindex.__str__() + " ("
str += out
for inp in inps:
str += ', ' + inp
str += ');\n'
f.write(str)
elindex += 1
f.write('endmodule')
f.close()
def print_run_file(run_file, verilog_file, synth_file, graph):
f = open(run_file, 'w')
f.write('read_verilog ' + verilog_file + '\n')
f.write('synth -top circ\n')
f.write('dfflibmap -liberty std.lib\n')
f.write('abc -liberty std.lib\n')
f.write('clean\n')
f.write('write_verilog ' + synth_file + '\n')
# f.write('show -format svg -prefix ' + graph + '\n')
f.close()
def convert_file_to_relic_format(circuit, synth_file, converted_circuit_file):
f = open(converted_circuit_file, 'w')
f.write(circuit.inputs().__str__())
for inp in circuit.__inputs__:
f.write(' ' + inp)
f.write('\n')
f.write(circuit.outputs().__str__())
for out in circuit.__outputs__:
f.write(' ' + out)
f.write('\n')
f1 = open(synth_file, 'r')
content = f1.read()
# Все элементы
matches = re.findall(r"\s*?(INV|BUF|AND|NAND|OR|NOR|XOR|XNOR) (.*?) \((.*?);", content, re.DOTALL)
# Все assign
matches2 = re.findall(r"\s*?assign (.*?) = (.*?);", content, re.DOTALL)
total = len(matches) + len(matches2)
f.write(total.__str__() + '\n')
for m in matches:
# print(m[2])
cell_type = m[0]
nodes = re.search("\.Y\((.*?)\)", m[2], re.M)
if nodes is None:
print('Error converting verilog file (3)')
out = nodes.group(1)
nodes = re.search("\.A\((.*?)\)", m[2], re.M)
if nodes is None:
print('Error converting verilog file (1)')
node1 = nodes.group(1)
if (cell_type != 'INV' and cell_type != 'buf'):
nodes = re.search("\.B\((.*?)\)", m[2], re.M)
if nodes is None:
print('Error converting verilog file (2)')
node2 = nodes.group(1)
f.write(cell_type + ' ' + node1 + ' ' + node2 + ' ' + out + '\n')
else:
f.write(cell_type + ' ' + node1 + ' ' + out + '\n')
for m in matches2:
f.write('BUF ' + m[1] + ' ' + m[0] + '\n')
f.close()
f1.close()
# Функция для синтеза схемы с помощью открытого логического синтезатора YOSYS:
# http://www.clifford.at/yosys/about.html
def create_circuit_external_yosys (circuit):
dfile = get_project_directory()
run_path = os.path.join(dfile, "utils", "bin", "win32", "yosys")
yosys_exe = os.path.join(run_path, "yosys.exe")
circuit_file = os.path.join(dfile, "temp", "tmp_sheme_yosys.v")
run_file = os.path.join(dfile, "temp", "tmp_runfile_yosys.txt")
synth_file = os.path.join(dfile, "temp", "tmp_synth.v")
converted_circuit_file = os.path.join(dfile, "temp", "tmp_synth_conv.txt")
graph_file = os.path.join(dfile, "temp", "synth.svg")
debug_file = os.path.join(dfile, "temp", "yosys_fail.txt")
if os.path.isfile(circuit_file):
os.remove(circuit_file)
if os.path.isfile(run_file):
os.remove(run_file)
if os.path.isfile(synth_file):
os.remove(synth_file)
if os.path.isfile(converted_circuit_file):
os.remove(converted_circuit_file)
print_circuit_in_verilog_file(circuit, "circ", circuit_file)
print_run_file(run_file, circuit_file, synth_file, graph_file)
exe = yosys_exe + " < " + run_file
try:
ret = subprocess.check_output(exe, shell=True, cwd=run_path).decode('UTF-8')
except:
ret = 'Error'
if not os.path.isfile(synth_file):
# Если была проблема с Yosys выводим схему для последующего дебага
circuit.print_circuit_in_file(debug_file)
print('Yosys error')
return None
convert_file_to_relic_format(circuit, synth_file, converted_circuit_file)
if os.path.isfile(converted_circuit_file) == False:
return None
new_ckt = sa.read_scheme(converted_circuit_file)
return new_ckt
| apache-2.0 |
phoebusliang/parallel-lettuce | tests/integration/lib/Django-1.2.5/django/contrib/localflavor/uk/forms.py | 313 | 1943 | """
UK-specific Form helpers
"""
import re
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class UKPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(UKPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class UKCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_REGION_CHOICES
super(UKCountySelect, self).__init__(attrs, choices=UK_REGION_CHOICES)
class UKNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_NATIONS_CHOICES
super(UKNationSelect, self).__init__(attrs, choices=UK_NATIONS_CHOICES)
| gpl-3.0 |
richardcs/ansible | lib/ansible/modules/network/onyx/onyx_ptp_interface.py | 59 | 8135 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_ptp_interface
version_added: '2.8'
author: 'Anas Badaha (@anasb)'
short_description: 'Configures PTP on interface'
description:
- "This module provides declarative management of PTP interfaces configuration\non Mellanox ONYX network devices."
notes:
- 'Tested on ONYX 3.6.8130'
- 'PTP Protocol must be enabled on switch.'
- 'Interface must not be a switch port interface.'
options:
name:
description:
- 'ethernet or vlan interface name that we want to configure PTP on it'
required: true
state:
description:
- 'Enable/Disable PTP on Interface'
default: enabled
choices:
- enabled
- disabled
delay_request:
description:
- 'configure PTP delay request interval, Range 0-5'
announce_interval:
description:
- 'configure PTP announce setting for interval, Range -3-1'
announce_timeout:
description:
- 'configure PTP announce setting for timeout, Range 2-10'
sync_interval:
description:
- 'configure PTP sync interval, Range -7--1'
"""
EXAMPLES = """
- name: configure PTP interface
onyx_ptp_interface:
state: enabled
name: Eth1/1
delay_request: 0
announce_interval: -2
announce_timeout: 3
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface ethernet 1/16 ptp enable
- interface ethernet 1/16 ptp delay-req interval 0
- interface ethernet 1/16 ptp announce interval -1
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.onyx.onyx import show_cmd
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxPtpInterfaceModule(BaseOnyxModule):
IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$")
IF_VLAN_REGEX = re.compile(r"^Vlan (\d+)$")
IF_TYPE_ETH = "ethernet"
IF_TYPE_VLAN = "vlan"
IF_TYPE_MAP = {
IF_TYPE_ETH: IF_ETH_REGEX,
IF_TYPE_VLAN: IF_VLAN_REGEX
}
RANGE_ATTR = {
"delay_request": (0, 5),
"announce_interval": (-3, -1),
"announce_timeout": (2, 10),
"sync_interval": (-7, -1)
}
_interface_type = None
_interface_id = None
def init_module(self):
""" initialize module
"""
element_spec = dict(
name=dict(required=True),
state=dict(choices=['enabled', 'disabled'], default='enabled'),
delay_request=dict(type=int),
announce_interval=dict(type=int),
announce_timeout=dict(type=int),
sync_interval=dict(type=int)
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
@classmethod
def _get_interface_type(cls, if_name):
if_type = None
if_id = None
for interface_type, interface_regex in iteritems(cls.IF_TYPE_MAP):
match = interface_regex.match(if_name)
if match:
if_type = interface_type
if_id = match.group(1)
break
return if_type, if_id
def _set_if_type(self, module_params):
if_name = module_params['name']
self._interface_type, self._interface_id = self._get_interface_type(if_name)
if not self._interface_id:
self._module.fail_json(
msg='unsupported interface name/type: %s' % if_name)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self._set_if_type(self._required_config)
self.validate_param_values(self._required_config)
def _validate_attr_is_not_none(self, attr_name, attr_value):
if attr_value is not None:
self._module.fail_json(msg='Can not set %s value on switch while state is disabled' % attr_name)
def validate_param_values(self, obj, param=None):
if obj['state'] == 'disabled':
for attr_name in self.RANGE_ATTR:
self._validate_attr_is_not_none(attr_name, obj[attr_name])
super(OnyxPtpInterfaceModule, self).validate_param_values(obj, param)
def _validate_range(self, value, attr_name):
min_value, max_value = self.RANGE_ATTR[attr_name]
if value and not min_value <= int(value) <= max_value:
self._module.fail_json(msg='%s value must be between %d and %d' % (attr_name, min_value, max_value))
def validate_delay_request(self, value):
self._validate_range(value, "delay_request")
def validate_announce_interval(self, value):
self._validate_range(value, "announce_interval")
def validate_announce_timeout(self, value):
self._validate_range(value, "announce_timeout")
def validate_sync_interval(self, value):
self._validate_range(value, "sync_interval")
def _set_ptp_interface_config(self, ptp_interface_config):
if ptp_interface_config is None:
self._current_config['state'] = 'disabled'
return
ptp_interface_config = ptp_interface_config[0]
self._current_config['state'] = 'enabled'
self._current_config['delay_request'] = int(ptp_interface_config['Delay request interval(log mean)'])
self._current_config['announce_interval'] = int(ptp_interface_config['Announce interval(log mean)'])
self._current_config['announce_timeout'] = int(ptp_interface_config['Announce receipt time out'])
self._current_config['sync_interval'] = int(ptp_interface_config['Sync interval(log mean)'])
def _show_ptp_interface_config(self):
cmd = "show ptp interface %s %s" % (self._interface_type, self._interface_id)
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
ptp_interface_config = self._show_ptp_interface_config()
self._set_ptp_interface_config(ptp_interface_config)
def _generate_attr_command(self, attr_name, attr_cmd_name):
attr_val = self._required_config.get(attr_name)
if attr_val is not None:
curr_val = self._current_config.get(attr_name)
if attr_val != curr_val:
self._commands.append(
'interface %s %s ptp %s %d' % (self._interface_type, self._interface_id, attr_cmd_name, attr_val))
def generate_commands(self):
state = self._required_config.get("state", "enabled")
self._gen_ptp_commands(state)
self._generate_attr_command("delay_request", "delay-req interval")
self._generate_attr_command("announce_interval", "announce interval")
self._generate_attr_command("announce_timeout", "announce timeout")
self._generate_attr_command("sync_interval", "sync interval")
def _add_if_ptp_cmd(self, req_state):
if req_state == 'enabled':
if_ptp_cmd = 'interface %s %s ptp enable' % (self._interface_type, self._interface_id)
else:
if_ptp_cmd = 'no interface %s %s ptp enable' % (self._interface_type, self._interface_id)
self._commands.append(if_ptp_cmd)
def _gen_ptp_commands(self, req_state):
curr_state = self._current_config.get('state')
if curr_state != req_state:
self._add_if_ptp_cmd(req_state)
def main():
""" main entry point for module execution
"""
OnyxPtpInterfaceModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
rduivenvoorde/QGIS | tests/src/python/test_qgsmapboxglconverter.py | 10 | 30272 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMapBoxGlStyleConverter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2020 by Nyall Dawson'
__date__ = '29/07/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtGui import (QColor)
from qgis.core import (QgsMapBoxGlStyleConverter,
QgsMapBoxGlStyleConversionContext,
QgsEffectStack
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsMapBoxGlStyleConverter(unittest.TestCase):
maxDiff = 100000
def testNoLayer(self):
c = QgsMapBoxGlStyleConverter()
self.assertEqual(c.convert({'x': 'y'}), QgsMapBoxGlStyleConverter.NoLayerList)
self.assertEqual(c.errorMessage(), 'Could not find layers list in JSON')
self.assertIsNone(c.renderer())
self.assertIsNone(c.labeling())
def testInterpolateExpression(self):
self.assertEqual(QgsMapBoxGlStyleConverter.interpolateExpression(5, 13, 27, 29, 1),
'scale_linear(@vector_tile_zoom,5,13,27,29)')
self.assertEqual(QgsMapBoxGlStyleConverter.interpolateExpression(5, 13, 27, 29, 1.5),
'scale_exp(@vector_tile_zoom,5,13,27,29,1.5)')
self.assertEqual(QgsMapBoxGlStyleConverter.interpolateExpression(5, 13, 27, 29, 1.5),
'scale_exp(@vector_tile_zoom,5,13,27,29,1.5)')
# same values, return nice and simple expression!
self.assertEqual(QgsMapBoxGlStyleConverter.interpolateExpression(5, 13, 27, 27, 1.5),
'27')
self.assertEqual(QgsMapBoxGlStyleConverter.interpolateExpression(5, 13, 27, 27, 1.5, 2),
'54')
def testColorAsHslaComponents(self):
self.assertEqual(QgsMapBoxGlStyleConverter.colorAsHslaComponents(QColor.fromHsl(30, 50, 70)), (30, 19, 27, 255))
def testParseInterpolateColorByZoom(self):
conversion_context = QgsMapBoxGlStyleConversionContext()
props, default_col = QgsMapBoxGlStyleConverter.parseInterpolateColorByZoom({}, conversion_context)
self.assertEqual(props.isActive(),
False)
props, default_col = QgsMapBoxGlStyleConverter.parseInterpolateColorByZoom({'base': 1,
'stops': [[0, '#f1f075'],
[150, '#b52e3e'],
[250, '#e55e5e']]
},
conversion_context)
self.assertEqual(props.expressionString(),
'CASE WHEN @vector_tile_zoom >= 0 AND @vector_tile_zoom < 150 THEN color_hsla(scale_linear(@vector_tile_zoom,0,150,59,352), scale_linear(@vector_tile_zoom,0,150,81,59), scale_linear(@vector_tile_zoom,0,150,70,44), 255) WHEN @vector_tile_zoom >= 150 AND @vector_tile_zoom < 250 THEN color_hsla(scale_linear(@vector_tile_zoom,150,250,352,0), scale_linear(@vector_tile_zoom,150,250,59,72), scale_linear(@vector_tile_zoom,150,250,44,63), 255) WHEN @vector_tile_zoom >= 250 THEN color_hsla(0, 72, 63, 255) ELSE color_hsla(0, 72, 63, 255) END')
self.assertEqual(default_col.name(), '#f1f075')
props, default_col = QgsMapBoxGlStyleConverter.parseInterpolateColorByZoom({'base': 2,
'stops': [[0, '#f1f075'],
[150, '#b52e3e'],
[250, '#e55e5e']]
},
conversion_context)
self.assertEqual(props.expressionString(),
'CASE WHEN @vector_tile_zoom >= 0 AND @vector_tile_zoom < 150 THEN color_hsla(scale_exp(@vector_tile_zoom,0,150,59,352,2), scale_exp(@vector_tile_zoom,0,150,81,59,2), scale_exp(@vector_tile_zoom,0,150,70,44,2), 255) WHEN @vector_tile_zoom >= 150 AND @vector_tile_zoom < 250 THEN color_hsla(scale_exp(@vector_tile_zoom,150,250,352,0,2), scale_exp(@vector_tile_zoom,150,250,59,72,2), scale_exp(@vector_tile_zoom,150,250,44,63,2), 255) WHEN @vector_tile_zoom >= 250 THEN color_hsla(0, 72, 63, 255) ELSE color_hsla(0, 72, 63, 255) END')
self.assertEqual(default_col.name(), '#f1f075')
def testParseStops(self):
conversion_context = QgsMapBoxGlStyleConversionContext()
self.assertEqual(QgsMapBoxGlStyleConverter.parseStops(1, [[1, 10], [2, 20], [5, 100]], 1, conversion_context),
'CASE WHEN @vector_tile_zoom > 1 AND @vector_tile_zoom <= 2 THEN scale_linear(@vector_tile_zoom,1,2,10,20) WHEN @vector_tile_zoom > 2 AND @vector_tile_zoom <= 5 THEN scale_linear(@vector_tile_zoom,2,5,20,100) WHEN @vector_tile_zoom > 5 THEN 100 END')
self.assertEqual(QgsMapBoxGlStyleConverter.parseStops(1.5, [[1, 10], [2, 20], [5, 100]], 1, conversion_context),
'CASE WHEN @vector_tile_zoom > 1 AND @vector_tile_zoom <= 2 THEN scale_exp(@vector_tile_zoom,1,2,10,20,1.5) WHEN @vector_tile_zoom > 2 AND @vector_tile_zoom <= 5 THEN scale_exp(@vector_tile_zoom,2,5,20,100,1.5) WHEN @vector_tile_zoom > 5 THEN 100 END')
self.assertEqual(QgsMapBoxGlStyleConverter.parseStops(1, [[1, 10], [2, 20], [5, 100]], 8, conversion_context),
'CASE WHEN @vector_tile_zoom > 1 AND @vector_tile_zoom <= 2 THEN scale_linear(@vector_tile_zoom,1,2,10,20) * 8 WHEN @vector_tile_zoom > 2 AND @vector_tile_zoom <= 5 THEN scale_linear(@vector_tile_zoom,2,5,20,100) * 8 WHEN @vector_tile_zoom > 5 THEN 800 END')
self.assertEqual(QgsMapBoxGlStyleConverter.parseStops(1.5, [[1, 10], [2, 20], [5, 100]], 8, conversion_context),
'CASE WHEN @vector_tile_zoom > 1 AND @vector_tile_zoom <= 2 THEN scale_exp(@vector_tile_zoom,1,2,10,20,1.5) * 8 WHEN @vector_tile_zoom > 2 AND @vector_tile_zoom <= 5 THEN scale_exp(@vector_tile_zoom,2,5,20,100,1.5) * 8 WHEN @vector_tile_zoom > 5 THEN 800 END')
def testParseMatchList(self):
conversion_context = QgsMapBoxGlStyleConversionContext()
res, default_color, default_number = QgsMapBoxGlStyleConverter.parseMatchList([
"match",
["get", "type"],
["Air Transport", "Airport"],
"#e6e6e6",
["Education"],
"#f7eaca",
["Medical Care"],
"#f3d8e7",
["Road Transport"],
"#f7f3ca",
["Water Transport"],
"#d8e6f3",
"#e7e7e7"
], QgsMapBoxGlStyleConverter.Color, conversion_context, 2.5, 200)
self.assertEqual(res.asExpression(),
'CASE WHEN "type" IN (\'Air Transport\',\'Airport\') THEN \'#e6e6e6\' WHEN "type" IN (\'Education\') THEN \'#f7eaca\' WHEN "type" IN (\'Medical Care\') THEN \'#f3d8e7\' WHEN "type" IN (\'Road Transport\') THEN \'#f7f3ca\' WHEN "type" IN (\'Water Transport\') THEN \'#d8e6f3\' ELSE \'#e7e7e7\' END')
self.assertEqual(default_color.name(), '#e7e7e7')
res, default_color, default_number = QgsMapBoxGlStyleConverter.parseMatchList([
"match",
["get", "type"],
["Normal"],
0.25,
["Index"],
0.5,
0.2
], QgsMapBoxGlStyleConverter.Numeric, conversion_context, 2.5, 200)
self.assertEqual(res.asExpression(),
'CASE WHEN "type" IN (\'Normal\') THEN 0.625 WHEN "type" IN (\'Index\') THEN 1.25 ELSE 0.5 END')
self.assertEqual(default_number, 0.5)
def testParseValueList(self):
conversion_context = QgsMapBoxGlStyleConversionContext()
res, default_color, default_number = QgsMapBoxGlStyleConverter.parseValueList([
"match",
["get", "type"],
["Air Transport", "Airport"],
"#e6e6e6",
["Education"],
"#f7eaca",
["Medical Care"],
"#f3d8e7",
["Road Transport"],
"#f7f3ca",
["Water Transport"],
"#d8e6f3",
"#e7e7e7"
], QgsMapBoxGlStyleConverter.Color, conversion_context, 2.5, 200)
self.assertEqual(res.asExpression(),
'CASE WHEN "type" IN (\'Air Transport\',\'Airport\') THEN \'#e6e6e6\' WHEN "type" IN (\'Education\') THEN \'#f7eaca\' WHEN "type" IN (\'Medical Care\') THEN \'#f3d8e7\' WHEN "type" IN (\'Road Transport\') THEN \'#f7f3ca\' WHEN "type" IN (\'Water Transport\') THEN \'#d8e6f3\' ELSE \'#e7e7e7\' END')
self.assertEqual(default_color.name(), '#e7e7e7')
res, default_color, default_number = QgsMapBoxGlStyleConverter.parseValueList([
"interpolate",
["linear"],
["zoom"],
10,
0.1,
15,
0.3,
18,
0.6
], QgsMapBoxGlStyleConverter.Numeric, conversion_context, 2.5, 200)
self.assertEqual(res.asExpression(),
'CASE WHEN @vector_tile_zoom > 10 AND @vector_tile_zoom <= 15 THEN scale_linear(@vector_tile_zoom,10,15,0.1,0.3) * 2.5 WHEN @vector_tile_zoom > 15 AND @vector_tile_zoom <= 18 THEN scale_linear(@vector_tile_zoom,15,18,0.3,0.6) * 2.5 WHEN @vector_tile_zoom > 18 THEN 1.5 END')
self.assertEqual(default_number, 0.25)
def testInterpolateByZoom(self):
conversion_context = QgsMapBoxGlStyleConversionContext()
prop, default_val = QgsMapBoxGlStyleConverter.parseInterpolateByZoom({'base': 1,
'stops': [[0, 11],
[150, 15],
[250, 22]]
}, conversion_context)
self.assertEqual(prop.expressionString(),
'CASE WHEN @vector_tile_zoom > 0 AND @vector_tile_zoom <= 150 THEN scale_linear(@vector_tile_zoom,0,150,11,15) WHEN @vector_tile_zoom > 150 AND @vector_tile_zoom <= 250 THEN scale_linear(@vector_tile_zoom,150,250,15,22) WHEN @vector_tile_zoom > 250 THEN 22 END')
self.assertEqual(default_val, 11.0)
prop, default_val = QgsMapBoxGlStyleConverter.parseInterpolateByZoom({'base': 1,
'stops': [[0, 11],
[150, 15]]
}, conversion_context)
self.assertEqual(prop.expressionString(),
'scale_linear(@vector_tile_zoom,0,150,11,15)')
self.assertEqual(default_val, 11.0)
prop, default_val = QgsMapBoxGlStyleConverter.parseInterpolateByZoom({'base': 2,
'stops': [[0, 11],
[150, 15]]
}, conversion_context)
self.assertEqual(prop.expressionString(),
'scale_exp(@vector_tile_zoom,0,150,11,15,2)')
self.assertEqual(default_val, 11.0)
prop, default_val = QgsMapBoxGlStyleConverter.parseInterpolateByZoom({'base': 2,
'stops': [[0, 11],
[150, 15]]
}, conversion_context, multiplier=5)
self.assertEqual(prop.expressionString(),
'scale_exp(@vector_tile_zoom,0,150,11,15,2) * 5')
self.assertEqual(default_val, 55.0)
def testInterpolateOpacityByZoom(self):
self.assertEqual(QgsMapBoxGlStyleConverter.parseInterpolateOpacityByZoom({'base': 1,
'stops': [[0, 0.1],
[150, 0.15],
[250, 0.2]]
}, 255).expressionString(),
"CASE WHEN @vector_tile_zoom < 0 THEN set_color_part(@symbol_color, 'alpha', 25.5) WHEN @vector_tile_zoom >= 0 AND @vector_tile_zoom < 150 THEN set_color_part(@symbol_color, 'alpha', scale_linear(@vector_tile_zoom,0,150,25.5,38.25)) WHEN @vector_tile_zoom >= 150 AND @vector_tile_zoom < 250 THEN set_color_part(@symbol_color, 'alpha', scale_linear(@vector_tile_zoom,150,250,38.25,51)) WHEN @vector_tile_zoom >= 250 THEN set_color_part(@symbol_color, 'alpha', 51) END")
self.assertEqual(QgsMapBoxGlStyleConverter.parseInterpolateOpacityByZoom({'base': 1,
'stops': [[0, 0.1],
[150, 0.15],
[250, 0.2]]
}, 100).expressionString(),
"CASE WHEN @vector_tile_zoom < 0 THEN set_color_part(@symbol_color, 'alpha', 10) WHEN @vector_tile_zoom >= 0 AND @vector_tile_zoom < 150 THEN set_color_part(@symbol_color, 'alpha', scale_linear(@vector_tile_zoom,0,150,10,15)) WHEN @vector_tile_zoom >= 150 AND @vector_tile_zoom < 250 THEN set_color_part(@symbol_color, 'alpha', scale_linear(@vector_tile_zoom,150,250,15,20)) WHEN @vector_tile_zoom >= 250 THEN set_color_part(@symbol_color, 'alpha', 20) END")
self.assertEqual(QgsMapBoxGlStyleConverter.parseInterpolateOpacityByZoom({'base': 1,
'stops': [[0, 0.1],
[150, 0.15]]
}, 255).expressionString(),
"set_color_part(@symbol_color, 'alpha', scale_linear(@vector_tile_zoom,0,150,25.5,38.25))")
self.assertEqual(QgsMapBoxGlStyleConverter.parseInterpolateOpacityByZoom({'base': 2,
'stops': [[0, 0.1],
[150, 0.15]]
}, 255).expressionString(),
"set_color_part(@symbol_color, 'alpha', scale_exp(@vector_tile_zoom,0,150,25.5,38.25,2))")
self.assertEqual(QgsMapBoxGlStyleConverter.parseInterpolateOpacityByZoom({'base': 2,
'stops': [[0, 0.1],
[150, 0.1]]
}, 255).expressionString(),
"set_color_part(@symbol_color, 'alpha', 25.5)")
def testInterpolateListByZoom(self):
conversion_context = QgsMapBoxGlStyleConversionContext()
prop, default_color, default_val = QgsMapBoxGlStyleConverter.parseInterpolateListByZoom([
"interpolate",
["linear"],
["zoom"],
10,
0.1,
15,
0.3,
18,
0.6
], QgsMapBoxGlStyleConverter.Opacity, conversion_context, 2)
self.assertEqual(prop.expressionString(),
"CASE WHEN @vector_tile_zoom < 10 THEN set_color_part(@symbol_color, 'alpha', 25.5) WHEN @vector_tile_zoom >= 10 AND @vector_tile_zoom < 15 THEN set_color_part(@symbol_color, 'alpha', scale_linear(@vector_tile_zoom,10,15,25.5,76.5)) WHEN @vector_tile_zoom >= 15 AND @vector_tile_zoom < 18 THEN set_color_part(@symbol_color, 'alpha', scale_linear(@vector_tile_zoom,15,18,76.5,153)) WHEN @vector_tile_zoom >= 18 THEN set_color_part(@symbol_color, 'alpha', 153) END")
prop, default_color, default_val = QgsMapBoxGlStyleConverter.parseInterpolateListByZoom([
"interpolate",
["linear"],
["zoom"],
10,
0.1,
15,
0.3,
18,
0.6
], QgsMapBoxGlStyleConverter.Numeric, conversion_context, 2)
self.assertEqual(prop.expressionString(),
"CASE WHEN @vector_tile_zoom > 10 AND @vector_tile_zoom <= 15 THEN scale_linear(@vector_tile_zoom,10,15,0.1,0.3) * 2 WHEN @vector_tile_zoom > 15 AND @vector_tile_zoom <= 18 THEN scale_linear(@vector_tile_zoom,15,18,0.3,0.6) * 2 WHEN @vector_tile_zoom > 18 THEN 1.2 END")
self.assertEqual(default_val, 0.2)
def testParseExpression(self):
conversion_context = QgsMapBoxGlStyleConversionContext()
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression([
"all",
["==", ["get", "level"], 0],
["match", ["get", "type"], ["Restricted"], True, False]
], conversion_context),
'''(level IS 0) AND ("type" = 'Restricted')''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression([
"match", ["get", "type"], ["Restricted"], True, False
], conversion_context),
'''"type" = 'Restricted\'''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression([
"match", ["get", "type"], ["Restricted"], "r", ["Local"], "l", ["Secondary", "Main"], "m", "n"
], conversion_context),
'''CASE WHEN "type" = 'Restricted' THEN 'r' WHEN "type" = 'Local' THEN 'l' WHEN "type" IN ('Secondary', 'Main') THEN 'm' ELSE 'n' END''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression([
"all",
["==", ["get", "level"], 0],
["match", ["get", "type"], ["Restricted", "Temporary"], True, False]
], conversion_context),
'''(level IS 0) AND ("type" IN ('Restricted', 'Temporary'))''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression([
"any",
["match", ["get", "level"], [1], True, False],
["match", ["get", "type"], ["Local"], True, False]
], conversion_context),
'''("level" = 1) OR ("type" = 'Local')''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression([
"none",
["match", ["get", "level"], [1], True, False],
["match", ["get", "type"], ["Local"], True, False]
], conversion_context),
'''NOT ("level" = 1) AND NOT ("type" = 'Local')''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression([
"match",
["get", "type"],
["Primary", "Motorway"],
False,
True
], conversion_context),
'''CASE WHEN "type" IN ('Primary', 'Motorway') THEN FALSE ELSE TRUE END''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression(["==", "_symbol", 0], conversion_context),
'''"_symbol" IS 0''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression(["all", ["==", "_symbol", 8], ["!in", "Viz", 3]],
conversion_context),
'''("_symbol" IS 8) AND (("Viz" IS NULL OR "Viz" NOT IN (3)))''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression(["get", "name"],
conversion_context),
'''"name"''')
self.assertEqual(QgsMapBoxGlStyleConverter.parseExpression(["to-string", ["get", "name"]],
conversion_context),
'''to_string("name")''')
def testConvertLabels(self):
context = QgsMapBoxGlStyleConversionContext()
style = {
"layout": {
"text-field": "{name_en}",
"text-font": [
"Open Sans Semibold",
"Arial Unicode MS Bold"
],
"text-max-width": 8,
"text-anchor": "top",
"text-size": 11,
"icon-size": 1
},
"type": "symbol",
"id": "poi_label",
"paint": {
"text-color": "#666",
"text-halo-width": 1.5,
"text-halo-color": "rgba(255,255,255,0.95)",
"text-halo-blur": 1
},
"source-layer": "poi_label"
}
renderer, has_renderer, labeling, has_labeling = QgsMapBoxGlStyleConverter.parseSymbolLayer(style, context)
self.assertFalse(has_renderer)
self.assertTrue(has_labeling)
self.assertEqual(labeling.labelSettings().fieldName, 'name_en')
self.assertFalse(labeling.labelSettings().isExpression)
style = {
"layout": {
"text-field": "name_en",
"text-font": [
"Open Sans Semibold",
"Arial Unicode MS Bold"
],
"text-max-width": 8,
"text-anchor": "top",
"text-size": 11,
"icon-size": 1
},
"type": "symbol",
"id": "poi_label",
"paint": {
"text-color": "#666",
"text-halo-width": 1.5,
"text-halo-color": "rgba(255,255,255,0.95)"
},
"source-layer": "poi_label"
}
renderer, has_renderer, labeling, has_labeling = QgsMapBoxGlStyleConverter.parseSymbolLayer(style, context)
self.assertFalse(has_renderer)
self.assertTrue(has_labeling)
self.assertEqual(labeling.labelSettings().fieldName, 'name_en')
self.assertFalse(labeling.labelSettings().isExpression)
style = {
"layout": {
"text-field": ["format",
"foo", {"font-scale": 1.2},
"bar", {"font-scale": 0.8}
],
"text-font": [
"Open Sans Semibold",
"Arial Unicode MS Bold"
],
"text-max-width": 8,
"text-anchor": "top",
"text-size": 11,
"icon-size": 1
},
"type": "symbol",
"id": "poi_label",
"paint": {
"text-color": "#666",
"text-halo-width": 1.5,
"text-halo-color": "rgba(255,255,255,0.95)",
"text-halo-blur": 1
},
"source-layer": "poi_label"
}
renderer, has_renderer, labeling, has_labeling = QgsMapBoxGlStyleConverter.parseSymbolLayer(style, context)
self.assertFalse(has_renderer)
self.assertTrue(has_labeling)
self.assertEqual(labeling.labelSettings().fieldName, 'concat("foo","bar")')
self.assertTrue(labeling.labelSettings().isExpression)
style = {
"layout": {
"text-field": "{name_en} - {name_fr}",
"text-font": [
"Open Sans Semibold",
"Arial Unicode MS Bold"
],
"text-max-width": 8,
"text-anchor": "top",
"text-size": 11,
"icon-size": 1
},
"type": "symbol",
"id": "poi_label",
"paint": {
"text-color": "#666",
"text-halo-width": 1.5,
"text-halo-color": "rgba(255,255,255,0.95)",
"text-halo-blur": 1
},
"source-layer": "poi_label"
}
renderer, has_renderer, labeling, has_labeling = QgsMapBoxGlStyleConverter.parseSymbolLayer(style, context)
self.assertFalse(has_renderer)
self.assertTrue(has_labeling)
self.assertEqual(labeling.labelSettings().fieldName, '''concat("name_en",' - ',"name_fr")''')
self.assertTrue(labeling.labelSettings().isExpression)
style = {
"layout": {
"text-field": ["format",
"{name_en} - {name_fr}", {"font-scale": 1.2},
"bar", {"font-scale": 0.8}
],
"text-font": [
"Open Sans Semibold",
"Arial Unicode MS Bold"
],
"text-max-width": 8,
"text-anchor": "top",
"text-size": 11,
"icon-size": 1
},
"type": "symbol",
"id": "poi_label",
"paint": {
"text-color": "#666",
"text-halo-width": 1.5,
"text-halo-color": "rgba(255,255,255,0.95)",
"text-halo-blur": 1
},
"source-layer": "poi_label"
}
renderer, has_renderer, labeling, has_labeling = QgsMapBoxGlStyleConverter.parseSymbolLayer(style, context)
self.assertFalse(has_renderer)
self.assertTrue(has_labeling)
self.assertEqual(labeling.labelSettings().fieldName, '''concat(concat("name_en",' - ',"name_fr"),"bar")''')
self.assertTrue(labeling.labelSettings().isExpression)
style = {
"layout": {
"text-field": ["to-string", ["get", "name"]],
"text-font": [
"Open Sans Semibold",
"Arial Unicode MS Bold"
],
"text-max-width": 8,
"text-anchor": "top",
"text-size": 11,
"icon-size": 1
},
"type": "symbol",
"id": "poi_label",
"paint": {
"text-color": "#666",
"text-halo-width": 1.5,
"text-halo-color": "rgba(255,255,255,0.95)",
"text-halo-blur": 1
},
"source-layer": "poi_label"
}
renderer, has_renderer, labeling, has_labeling = QgsMapBoxGlStyleConverter.parseSymbolLayer(style, context)
self.assertFalse(has_renderer)
self.assertTrue(has_labeling)
self.assertEqual(labeling.labelSettings().fieldName, '''to_string("name")''')
self.assertTrue(labeling.labelSettings().isExpression)
# text-transform
style = {
"layout": {
"text-field": "name_en",
"text-font": [
"Open Sans Semibold",
"Arial Unicode MS Bold"
],
"text-transform": "uppercase",
"text-max-width": 8,
"text-anchor": "top",
"text-size": 11,
"icon-size": 1
},
"type": "symbol",
"id": "poi_label",
"paint": {
"text-color": "#666",
"text-halo-width": 1.5,
"text-halo-color": "rgba(255,255,255,0.95)",
"text-halo-blur": 1
},
"source-layer": "poi_label"
}
renderer, has_renderer, labeling, has_labeling = QgsMapBoxGlStyleConverter.parseSymbolLayer(style, context)
self.assertFalse(has_renderer)
self.assertTrue(has_labeling)
self.assertEqual(labeling.labelSettings().fieldName, 'upper("name_en")')
self.assertTrue(labeling.labelSettings().isExpression)
style = {
"layout": {
"text-field": ["format",
"{name_en} - {name_fr}", {"font-scale": 1.2},
"bar", {"font-scale": 0.8}
],
"text-font": [
"Open Sans Semibold",
"Arial Unicode MS Bold"
],
"text-transform": "lowercase",
"text-max-width": 8,
"text-anchor": "top",
"text-size": 11,
"icon-size": 1
},
"type": "symbol",
"id": "poi_label",
"paint": {
"text-color": "#666",
"text-halo-width": 1.5,
"text-halo-color": "rgba(255,255,255,0.95)",
"text-halo-blur": 1
},
"source-layer": "poi_label"
}
renderer, has_renderer, labeling, has_labeling = QgsMapBoxGlStyleConverter.parseSymbolLayer(style, context)
self.assertFalse(has_renderer)
self.assertTrue(has_labeling)
self.assertEqual(labeling.labelSettings().fieldName, '''lower(concat(concat("name_en",' - ',"name_fr"),"bar"))''')
self.assertTrue(labeling.labelSettings().isExpression)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
elkingtonmcb/shogun | examples/undocumented/python_modular/structure_discrete_hmsvm_bmrm.py | 15 | 1113 | #!/usr/bin/env python
import numpy
import scipy
from scipy import io
data_dict = scipy.io.loadmat('../data/hmsvm_data_large_integer.mat', struct_as_record=False)
parameter_list=[[data_dict]]
def structure_discrete_hmsvm_bmrm (m_data_dict=data_dict):
from modshogun import RealMatrixFeatures, SequenceLabels, HMSVMModel, Sequence, TwoStateModel
from modshogun import StructuredAccuracy, DualLibQPBMSOSVM, SMT_TWO_STATE
labels_array = m_data_dict['label'][0]
idxs = numpy.nonzero(labels_array == -1)
labels_array[idxs] = 0
labels = SequenceLabels(labels_array, 250, 500, 2)
features = RealMatrixFeatures(m_data_dict['signal'].astype(float), 250, 500)
num_obs = 4 # given by the data file used
model = HMSVMModel(features, labels, SMT_TWO_STATE, num_obs)
sosvm = DualLibQPBMSOSVM(model, labels, 5000.0)
sosvm.train()
#print sosvm.get_w()
predicted = sosvm.apply(features)
evaluator = StructuredAccuracy()
acc = evaluator.evaluate(predicted, labels)
#print('Accuracy = %.4f' % acc)
if __name__ == '__main__':
print("Discrete HMSVM BMRM")
structure_discrete_hmsvm_bmrm(*parameter_list[0])
| gpl-3.0 |
beezee/GAE-Django-base-app | django/contrib/gis/maps/google/zoom.py | 327 | 6628 | from django.contrib.gis.geos import GEOSGeometry, LinearRing, Polygon, Point
from django.contrib.gis.maps.google.gmap import GoogleMapException
from math import pi, sin, cos, log, exp, atan
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each one of the
# zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in xrange(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixl
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude value
# with with the number of degrees/pixel at the given zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac)/(1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * ( 2 * atan(exp((px[1] - npix)/ (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0]-delta, px[1]-delta), zoom)
ur = self.pixel_to_lonlat((px[0]+delta, px[1]+delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = self.get_width_height(env.extent)
center = env.centroid
for z in xrange(self._nzoom):
# Getting the tile at the zoom level.
tile_w, tile_h = self.get_width_height(self.tile(center, z).extent)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z-1
# Otherwise, we've zoomed in to the max.
return self._nzoom-1
def get_width_height(self, extent):
"""
Returns the width and height for the given extent.
"""
# Getting the lower-left, upper-left, and upper-right
# coordinates from the extent.
ll = Point(extent[:2])
ul = Point(extent[0], extent[3])
ur = Point(extent[2:])
# Calculating the width and height.
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
| bsd-3-clause |
ambikeshwar1991/sandhi-2 | module/gr36/gr-howto-write-a-block/docs/doxygen/doxyxml/base.py | 333 | 6794 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
A base class is created.
Classes based upon this are used to make more user-friendly interfaces
to the doxygen xml docs than the generated classes provide.
"""
import os
import pdb
from xml.parsers.expat import ExpatError
from generated import compound
class Base(object):
class Duplicate(StandardError):
pass
class NoSuchMember(StandardError):
pass
class ParsingError(StandardError):
pass
def __init__(self, parse_data, top=None):
self._parsed = False
self._error = False
self._parse_data = parse_data
self._members = []
self._dict_members = {}
self._in_category = {}
self._data = {}
if top is not None:
self._xml_path = top._xml_path
# Set up holder of references
else:
top = self
self._refs = {}
self._xml_path = parse_data
self.top = top
@classmethod
def from_refid(cls, refid, top=None):
""" Instantiate class from a refid rather than parsing object. """
# First check to see if its already been instantiated.
if top is not None and refid in top._refs:
return top._refs[refid]
# Otherwise create a new instance and set refid.
inst = cls(None, top=top)
inst.refid = refid
inst.add_ref(inst)
return inst
@classmethod
def from_parse_data(cls, parse_data, top=None):
refid = getattr(parse_data, 'refid', None)
if refid is not None and top is not None and refid in top._refs:
return top._refs[refid]
inst = cls(parse_data, top=top)
if refid is not None:
inst.refid = refid
inst.add_ref(inst)
return inst
def add_ref(self, obj):
if hasattr(obj, 'refid'):
self.top._refs[obj.refid] = obj
mem_classes = []
def get_cls(self, mem):
for cls in self.mem_classes:
if cls.can_parse(mem):
return cls
raise StandardError(("Did not find a class for object '%s'." \
% (mem.get_name())))
def convert_mem(self, mem):
try:
cls = self.get_cls(mem)
converted = cls.from_parse_data(mem, self.top)
if converted is None:
raise StandardError('No class matched this object.')
self.add_ref(converted)
return converted
except StandardError, e:
print e
@classmethod
def includes(cls, inst):
return isinstance(inst, cls)
@classmethod
def can_parse(cls, obj):
return False
def _parse(self):
self._parsed = True
def _get_dict_members(self, cat=None):
"""
For given category a dictionary is returned mapping member names to
members of that category. For names that are duplicated the name is
mapped to None.
"""
self.confirm_no_error()
if cat not in self._dict_members:
new_dict = {}
for mem in self.in_category(cat):
if mem.name() not in new_dict:
new_dict[mem.name()] = mem
else:
new_dict[mem.name()] = self.Duplicate
self._dict_members[cat] = new_dict
return self._dict_members[cat]
def in_category(self, cat):
self.confirm_no_error()
if cat is None:
return self._members
if cat not in self._in_category:
self._in_category[cat] = [mem for mem in self._members
if cat.includes(mem)]
return self._in_category[cat]
def get_member(self, name, cat=None):
self.confirm_no_error()
# Check if it's in a namespace or class.
bits = name.split('::')
first = bits[0]
rest = '::'.join(bits[1:])
member = self._get_dict_members(cat).get(first, self.NoSuchMember)
# Raise any errors that are returned.
if member in set([self.NoSuchMember, self.Duplicate]):
raise member()
if rest:
return member.get_member(rest, cat=cat)
return member
def has_member(self, name, cat=None):
try:
mem = self.get_member(name, cat=cat)
return True
except self.NoSuchMember:
return False
def data(self):
self.confirm_no_error()
return self._data
def members(self):
self.confirm_no_error()
return self._members
def process_memberdefs(self):
mdtss = []
for sec in self._retrieved_data.compounddef.sectiondef:
mdtss += sec.memberdef
# At the moment we lose all information associated with sections.
# Sometimes a memberdef is in several sectiondef.
# We make sure we don't get duplicates here.
uniques = set([])
for mem in mdtss:
converted = self.convert_mem(mem)
pair = (mem.name, mem.__class__)
if pair not in uniques:
uniques.add(pair)
self._members.append(converted)
def retrieve_data(self):
filename = os.path.join(self._xml_path, self.refid + '.xml')
try:
self._retrieved_data = compound.parse(filename)
except ExpatError:
print('Error in xml in file %s' % filename)
self._error = True
self._retrieved_data = None
def check_parsed(self):
if not self._parsed:
self._parse()
def confirm_no_error(self):
self.check_parsed()
if self._error:
raise self.ParsingError()
def error(self):
self.check_parsed()
return self._error
def name(self):
# first see if we can do it without processing.
if self._parse_data is not None:
return self._parse_data.name
self.check_parsed()
return self._retrieved_data.compounddef.name
| gpl-3.0 |
michalliu/watchman | tests/integration/WatchmanTapTests.py | 18 | 4275 | import unittest
import os
import os.path
import subprocess
import glob
import re
import WatchmanInstance
import signal
import Interrupt
import tempfile
class TapExeTestCase(unittest.TestCase):
def __init__(self, executable):
super(TapExeTestCase, self).__init__()
self.executable = executable
def id(self):
return self.executable
def getCommandArgs(self):
return [self.executable]
def run(self, result=None):
if result is not None:
result.setFlavour(None, None)
return super(TapExeTestCase, self).run(result)
def runTest(self):
env = os.environ.copy()
env['WATCHMAN_SOCK'] = WatchmanInstance.getSharedInstance().getSockPath()
dotted = os.path.normpath(self.id()).replace(os.sep, '.').replace(
'tests.integration.', '').replace('.php', '')
env['TMPDIR'] = os.path.join(tempfile.tempdir, dotted)
if os.name != 'nt' and len(env['TMPDIR']) > 94:
self.fail('temp dir name %s is too long for unix domain sockets' %
env['TMPDIR'])
os.mkdir(env['TMPDIR'])
env['TMP'] = env['TMPDIR']
env['TEMP'] = env['TMPDIR']
env['IN_PYTHON_HARNESS'] = '1'
proc = subprocess.Popen(
self.getCommandArgs(),
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
status = proc.poll()
if status == -signal.SIGINT:
Interrupt.setInterrupted()
self.fail('Interrupted by SIGINT')
return
if status != 0:
self.fail("Exit status %d\n%s\n%s\n" % (status, stdout, stderr))
return
res_pat = re.compile('^(not )?ok (\d+) (.*)$')
diag_pat = re.compile('^# (.*)$')
plan_pat = re.compile('^1\.\.(\d+)$')
# Now parse the TAP output
lines = stdout.replace('\r\n', '\n').split('\n')
last_test = 0
diags = None
plan = None
for line in lines:
res = plan_pat.match(line)
if res:
plan = int(res.group(1))
continue
res = res_pat.match(line)
if res:
this_test = int(res.group(2))
if this_test != last_test + 1:
print(stdout, stderr)
self.fail('Expected test number %d, got %d' % (
last_test + 1,
this_test))
last_test = this_test
if res.group(1) == 'not ':
# Failed
msg = line
if diags is not None:
msg = msg + '\n' + '\n'.join(diags)
self.fail(msg)
failed
diags = None
continue
res = diag_pat.match(line)
if res:
if diags is None:
diags = []
diags.append(res.group(1))
continue
if line != '':
print('Invalid tap output from %s: %s' %
(self.id(), line))
if plan is None:
self.fail('no plan was observed')
else:
self.assertEqual(last_test, plan,
'%s planned %d but executed %s tests' % (
self.id(),
plan,
last_test))
class PhpTestCase(TapExeTestCase):
def __init__(self, phpfile):
super(TapExeTestCase, self).__init__()
self.phpfile = phpfile
def id(self):
return self.phpfile
def getCommandArgs(self):
return ['php', '-d register_argc_argv=1',
'tests/integration/phprunner', self.phpfile]
def discover(filematcher, path):
suite = unittest.TestSuite()
for exe in glob.glob(path):
if not filematcher(exe):
continue
base = os.path.basename(exe)
if base.startswith('.') or base.startswith('_'):
continue
if exe.endswith('.php'):
suite.addTest(PhpTestCase(exe))
else:
suite.addTest(TapExeTestCase(exe))
return suite
| apache-2.0 |
Smart-Torvy/torvy-home-assistant | homeassistant/components/scene/homeassistant.py | 23 | 2523 | """
Allow users to set and activate scenes.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/scene/
"""
from collections import namedtuple
from homeassistant.components.scene import Scene
from homeassistant.const import (
ATTR_ENTITY_ID, STATE_OFF, STATE_ON)
from homeassistant.core import State
from homeassistant.helpers.state import reproduce_state
DEPENDENCIES = ['group']
STATE = 'scening'
CONF_ENTITIES = "entities"
SceneConfig = namedtuple('SceneConfig', ['name', 'states'])
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup home assistant scene entries."""
scene_config = config.get("states")
if not isinstance(scene_config, list):
scene_config = [scene_config]
add_devices(HomeAssistantScene(hass, _process_config(scene))
for scene in scene_config)
return True
def _process_config(scene_config):
"""Process passed in config into a format to work with."""
name = scene_config.get('name')
states = {}
c_entities = dict(scene_config.get(CONF_ENTITIES, {}))
for entity_id in c_entities:
if isinstance(c_entities[entity_id], dict):
entity_attrs = c_entities[entity_id].copy()
state = entity_attrs.pop('state', None)
attributes = entity_attrs
else:
state = c_entities[entity_id]
attributes = {}
# YAML translates 'on' to a boolean
# http://yaml.org/type/bool.html
if isinstance(state, bool):
state = STATE_ON if state else STATE_OFF
else:
state = str(state)
states[entity_id.lower()] = State(entity_id, state, attributes)
return SceneConfig(name, states)
class HomeAssistantScene(Scene):
"""A scene is a group of entities and the states we want them to be."""
def __init__(self, hass, scene_config):
"""Initialize the scene."""
self.hass = hass
self.scene_config = scene_config
@property
def name(self):
"""Return the name of the scene."""
return self.scene_config.name
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
return {
ATTR_ENTITY_ID: list(self.scene_config.states.keys()),
}
def activate(self):
"""Activate scene. Try to get entities into requested state."""
reproduce_state(self.hass, self.scene_config.states.values(), True)
| mit |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/object_detection/matchers/argmax_matcher_test.py | 21 | 9704 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.matchers.argmax_matcher."""
import numpy as np
import tensorflow as tf
from object_detection.matchers import argmax_matcher
class ArgMaxMatcherTest(tf.test.TestCase):
def test_return_correct_matches_with_default_thresholds(self):
similarity = np.array([[1., 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]])
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
expected_matched_rows = np.array([2, 0, 1, 0, 1])
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, np.arange(similarity.shape[1]))
self.assertEmpty(res_unmatched_cols)
def test_return_correct_matches_with_empty_rows(self):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
sim = 0.2*tf.ones([0, 5])
match = matcher.match(sim)
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_unmatched_cols, np.arange(5))
def test_return_correct_matches_with_matched_threshold(self):
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1, 2])
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_return_correct_matches_with_matched_and_unmatched_threshold(self):
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_return_correct_matches_negatives_lower_than_unmatched_false(self):
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2,
negatives_lower_than_unmatched=False)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_not_using_force_match(self):
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2)
expected_matched_cols = np.array([0, 3])
expected_matched_rows = np.array([2, 0])
expected_unmatched_cols = np.array([1, 2, 4])
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_while_using_force_match(self):
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2,
force_match_for_each_row=True)
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([2, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_valid_arguments_corner_case(self):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1)
def test_invalid_arguments_corner_case_negatives_lower_than_thres_false(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1,
negatives_lower_than_unmatched=False)
def test_invalid_arguments_no_matched_threshold(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=None,
unmatched_threshold=4)
def test_invalid_arguments_unmatched_thres_larger_than_matched_thres(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=2)
def test_set_values_using_indicator(self):
input_a = np.array([3, 4, 5, 1, 4, 3, 2])
expected_b = np.array([3, 0, 0, 1, 0, 3, 2]) # Set a>3 to 0
expected_c = np.array(
[3., 4., 5., -1., 4., 3., -1.]) # Set a<3 to -1. Float32
idxb_ = input_a > 3
idxc_ = input_a < 3
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
a = tf.constant(input_a)
idxb = tf.constant(idxb_)
idxc = tf.constant(idxc_)
b = matcher._set_values_using_indicator(a, idxb, 0)
c = matcher._set_values_using_indicator(tf.cast(a, tf.float32), idxc, -1)
with self.test_session() as sess:
res_b = sess.run(b)
res_c = sess.run(c)
self.assertAllEqual(res_b, expected_b)
self.assertAllEqual(res_c, expected_c)
if __name__ == '__main__':
tf.test.main()
| bsd-2-clause |
Attorney-Fee/django-wiki | wiki/conf/settings.py | 1 | 6506 | # -*- coding: utf-8 -*-
from django.conf import settings as django_settings
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext as _
# Should urls be case sensitive?
URL_CASE_SENSITIVE = getattr( django_settings, 'WIKI_URL_CASE_SENSITIVE', False )
# Non-configurable (at the moment)
APP_LABEL = 'wiki'
WIKI_LANGUAGE = 'markdown'
# The editor class to use -- maybe a 3rd party or your own...? You can always
# extend the built-in editor and customize it....
EDITOR = getattr( django_settings, 'WIKI_EDITOR', 'wiki.editors.markitup.MarkItUp' )
MARKDOWN_KWARGS = {
'extensions': ['footnotes', 'headerid', 'extra',],
'safe_mode': 'replace',
'extension_configs': {'toc': {'title': _('Table of Contents')}},
}
MARKDOWN_KWARGS.update(getattr( django_settings, 'WIKI_MARKDOWN_KWARGS', {} ))
# This slug is used in URLPath if an article has been deleted. The children of the
# URLPath of that article are moved to lost and found. They keep their permissions
# and all their content.
LOST_AND_FOUND_SLUG = getattr( django_settings, 'WIKI_LOST_AND_FOUND_SLUG', 'lost-and-found' )
# Do we want to log IPs?
LOG_IPS_ANONYMOUS = getattr( django_settings, 'WIKI_LOG_IPS_ANONYMOUS', True )
LOG_IPS_USERS = getattr( django_settings, 'WIKI_LOG_IPS_USERS', False )
####################################
# PERMISSIONS AND ACCOUNT HANDLING #
####################################
# NB! None of these callables need to handle anonymous users as they are treated
# in separate settings...
# A function returning True/False if a user has permission to assign
# permissions on an article
# Relevance: changing owner and group membership
CAN_ASSIGN = getattr( django_settings, 'WIKI_CAN_ASSIGN', lambda article, user: user.has_perm( 'wiki.assign' ) )
# A function returning True/False if the owner of an article has permission to change
# the group to a user's own groups
# Relevance: changing group membership
CAN_ASSIGN_OWNER = getattr( django_settings, 'WIKI_ASSIGN_OWNER', lambda article, user: False )
# A function returning True/False if a user has permission to change
# read/write access for groups and others
CAN_CHANGE_PERMISSIONS = getattr( django_settings, 'WIKI_CAN_CHANGE_PERMISSIONS', lambda article, user: article.owner == user or user.has_perm( 'wiki.assign' ) )
# Specifies if a user has access to soft deletion of articles
CAN_DELETE = getattr( django_settings, 'WIKI_CAN_DELETE', lambda article, user: article.can_write( user = user ) )
# A function returning True/False if a user has permission to change
# moderate, ie. lock articles and permanently delete content.
CAN_MODERATE = getattr( django_settings, 'WIKI_CAN_MODERATE', lambda article, user: user.has_perm( 'wiki.moderate' ) )
# A function returning True/False if a user has permission to create
# new groups and users for the wiki.
CAN_ADMIN = getattr( django_settings, 'WIKI_CAN_ADMIN', lambda article, user: user.has_perm( 'wiki.admin' ) )
# Treat anonymous (non logged in) users as the "other" user group
ANONYMOUS = getattr( django_settings, 'WIKI_ANONYMOUS', True )
# Globally enable write access for anonymous users, if true anonymous users will be treated
# as the others_write boolean field on models.Article.
ANONYMOUS_WRITE = getattr( django_settings, 'WIKI_ANONYMOUS_WRITE', False )
# Globally enable create access for anonymous users
# Defaults to ANONYMOUS_WRITE.
ANONYMOUS_CREATE = getattr( django_settings, 'WIKI_ANONYMOUS_CREATE', ANONYMOUS_WRITE )
# Default setting to allow anonymous users upload access (used in
# plugins.attachments and plugins.images).
ANONYMOUS_UPLOAD = getattr( django_settings, 'WIKI_ANONYMOUS_UPLOAD', False )
# Sign up, login and logout views should be accessible
ACCOUNT_HANDLING = getattr( django_settings, 'WIKI_ACCOUNT_HANDLING', True )
if ACCOUNT_HANDLING:
LOGIN_URL = reverse_lazy("wiki:login")
LOGOUT_URL = reverse_lazy("wiki:logout")
SIGNUP_URL = reverse_lazy("wiki:signup")
else:
LOGIN_URL = getattr( django_settings, "LOGIN_URL", "/" )
LOGOUT_URL = getattr( django_settings, "LOGOUT_URL", "/" )
SIGNUP_URL = getattr( django_settings, "WIKI_SIGNUP_URL", "/" )
##################
# OTHER SETTINGS #
##################
# Maximum amount of children to display in a menu before going "+more"
# NEVER set this to 0 as it will wrongly inform the user that there are no
# children and for instance that an article can be safely deleted.
SHOW_MAX_CHILDREN = getattr( django_settings, 'WIKI_SHOW_MAX_CHILDREN', 20 )
USE_BOOTSTRAP_SELECT_WIDGET = getattr( django_settings, 'WIKI_USE_BOOTSTRAP_SELECT_WIDGET', True )
#: dottedname of class used to construct urlpatterns for wiki.
#:
#: Default is wiki.urls.WikiURLPatterns. To customize urls or view handlers,
#: you can derive from this.
URL_CONFIG_CLASS = getattr( django_settings, 'WIKI_URL_CONFIG_CLASS', 'wiki.urls.WikiURLPatterns' )
# Seconds of timeout before renewing article cache. Articles are automatically
# renewed whenever an edit occurs but article content may be generated from
# other objects that are changed.
CACHE_TIMEOUT = getattr( django_settings, 'WIKI_CACHE_TIMEOUT', 600 )
###################
# SPAM PROTECTION #
###################
# Maximum allowed revisions per hour for any given user or IP
REVISIONS_PER_HOUR = getattr( django_settings, 'WIKI_REVISIONS_PER_HOUR', 60 )
# Maximum allowed revisions per hour for any given user or IP
REVISIONS_PER_MINUTES = getattr( django_settings, 'WIKI_REVISIONS_PER_MINUTES', 5 )
# Maximum allowed revisions per hour for any given user or IP
REVISIONS_PER_HOUR_ANONYMOUS = getattr( django_settings, 'WIKI_REVISIONS_PER_HOUR_ANONYMOUS', 10 )
# Maximum allowed revisions per hour for any given user or IP
REVISIONS_PER_MINUTES_ANONYMOUS = getattr( django_settings, 'WIKI_REVISIONS_PER_MINUTES_ANONYMOUS', 2 )
# Number of minutes for looking up REVISIONS_PER_MINUTES and REVISIONS_PER_MINUTES_ANONYMOUS
REVISIONS_MINUTES_LOOKBACK = getattr( django_settings, 'WIKI_REVISIONS_MINUTES_LOOKBACK', 2 )
###########
# STORAGE #
###########
from django.core.files.storage import default_storage
STORAGE_BACKEND = getattr(django_settings, 'WIKI_STORAGE_BACKEND', default_storage)
####################
# PLANNED SETTINGS #
####################
# Maximum revisions to keep for an article, 0=unlimited
MAX_REVISIONS = getattr( django_settings, 'WIKI_MAX_REVISIONS', 100 )
# Maximum age of revisions in days, 0=unlimited
MAX_REVISION_AGE = getattr( django_settings, 'MAX_REVISION_AGE', 365 )
| gpl-3.0 |
Yukarumya/Yukarum-Redfoxes | python/mozbuild/mozbuild/frontend/emitter.py | 1 | 61700 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, unicode_literals
import itertools
import logging
import os
import traceback
import sys
import time
from collections import defaultdict, OrderedDict
from mach.mixin.logging import LoggingMixin
from mozbuild.util import (
memoize,
OrderedDefaultDict,
)
import mozpack.path as mozpath
import mozinfo
import pytoml
from .data import (
AndroidAssetsDirs,
AndroidExtraPackages,
AndroidExtraResDirs,
AndroidResDirs,
BaseSources,
BrandingFiles,
ChromeManifestEntry,
ConfigFileSubstitution,
ContextWrapped,
Defines,
DirectoryTraversal,
Exports,
FinalTargetFiles,
FinalTargetPreprocessedFiles,
GeneratedEventWebIDLFile,
GeneratedFile,
GeneratedSources,
GeneratedWebIDLFile,
ExampleWebIDLInterface,
ExternalStaticLibrary,
ExternalSharedLibrary,
HostDefines,
HostLibrary,
HostProgram,
HostRustProgram,
HostSimpleProgram,
HostSources,
InstallationTarget,
IPDLFile,
JARManifest,
Library,
Linkable,
LocalInclude,
ObjdirFiles,
ObjdirPreprocessedFiles,
PerSourceFlag,
PreprocessedTestWebIDLFile,
PreprocessedWebIDLFile,
Program,
RustLibrary,
RustProgram,
SharedLibrary,
SimpleProgram,
Sources,
StaticLibrary,
TestHarnessFiles,
TestWebIDLFile,
TestManifest,
UnifiedSources,
VariablePassthru,
WebIDLFile,
XPIDLFile,
)
from mozpack.chrome.manifest import (
ManifestBinaryComponent,
Manifest,
)
from .reader import SandboxValidationError
from ..testing import (
TEST_MANIFESTS,
REFTEST_FLAVORS,
WEB_PLATFORM_TESTS_FLAVORS,
SupportFilesConverter,
)
from .context import (
Context,
SourcePath,
ObjDirPath,
Path,
SubContext,
TemplateContext,
)
from mozbuild.base import ExecutionSummary
class TreeMetadataEmitter(LoggingMixin):
"""Converts the executed mozbuild files into data structures.
This is a bridge between reader.py and data.py. It takes what was read by
reader.BuildReader and converts it into the classes defined in the data
module.
"""
def __init__(self, config):
self.populate_logger()
self.config = config
mozinfo.find_and_update_from_json(config.topobjdir)
# Python 2.6 doesn't allow unicode keys to be used for keyword
# arguments. This gross hack works around the problem until we
# rid ourselves of 2.6.
self.info = {}
for k, v in mozinfo.info.items():
if isinstance(k, unicode):
k = k.encode('ascii')
self.info[k] = v
self._libs = OrderedDefaultDict(list)
self._binaries = OrderedDict()
self._linkage = []
self._static_linking_shared = set()
self._crate_verified_local = set()
self._crate_directories = dict()
# Keep track of external paths (third party build systems), starting
# from what we run a subconfigure in. We'll eliminate some directories
# as we traverse them with moz.build (e.g. js/src).
subconfigures = os.path.join(self.config.topobjdir, 'subconfigures')
paths = []
if os.path.exists(subconfigures):
paths = open(subconfigures).read().splitlines()
self._external_paths = set(mozpath.normsep(d) for d in paths)
self._emitter_time = 0.0
self._object_count = 0
self._test_files_converter = SupportFilesConverter()
def summary(self):
return ExecutionSummary(
'Processed into {object_count:d} build config descriptors in '
'{execution_time:.2f}s',
execution_time=self._emitter_time,
object_count=self._object_count)
def emit(self, output):
"""Convert the BuildReader output into data structures.
The return value from BuildReader.read_topsrcdir() (a generator) is
typically fed into this function.
"""
contexts = {}
def emit_objs(objs):
for o in objs:
self._object_count += 1
yield o
for out in output:
# Nothing in sub-contexts is currently of interest to us. Filter
# them all out.
if isinstance(out, SubContext):
continue
if isinstance(out, Context):
# Keep all contexts around, we will need them later.
contexts[out.objdir] = out
start = time.time()
# We need to expand the generator for the timings to work.
objs = list(self.emit_from_context(out))
self._emitter_time += time.time() - start
for o in emit_objs(objs): yield o
else:
raise Exception('Unhandled output type: %s' % type(out))
# Don't emit Linkable objects when COMPILE_ENVIRONMENT is not set
if self.config.substs.get('COMPILE_ENVIRONMENT'):
start = time.time()
objs = list(self._emit_libs_derived(contexts))
self._emitter_time += time.time() - start
for o in emit_objs(objs): yield o
def _emit_libs_derived(self, contexts):
# First do FINAL_LIBRARY linkage.
for lib in (l for libs in self._libs.values() for l in libs):
if not isinstance(lib, (StaticLibrary, RustLibrary)) or not lib.link_into:
continue
if lib.link_into not in self._libs:
raise SandboxValidationError(
'FINAL_LIBRARY ("%s") does not match any LIBRARY_NAME'
% lib.link_into, contexts[lib.objdir])
candidates = self._libs[lib.link_into]
# When there are multiple candidates, but all are in the same
# directory and have a different type, we want all of them to
# have the library linked. The typical usecase is when building
# both a static and a shared library in a directory, and having
# that as a FINAL_LIBRARY.
if len(set(type(l) for l in candidates)) == len(candidates) and \
len(set(l.objdir for l in candidates)) == 1:
for c in candidates:
c.link_library(lib)
else:
raise SandboxValidationError(
'FINAL_LIBRARY ("%s") matches a LIBRARY_NAME defined in '
'multiple places:\n %s' % (lib.link_into,
'\n '.join(l.objdir for l in candidates)),
contexts[lib.objdir])
# Next, USE_LIBS linkage.
for context, obj, variable in self._linkage:
self._link_libraries(context, obj, variable)
def recurse_refs(lib):
for o in lib.refs:
yield o
if isinstance(o, StaticLibrary):
for q in recurse_refs(o):
yield q
# Check that all static libraries refering shared libraries in
# USE_LIBS are linked into a shared library or program.
for lib in self._static_linking_shared:
if all(isinstance(o, StaticLibrary) for o in recurse_refs(lib)):
shared_libs = sorted(l.basename for l in lib.linked_libraries
if isinstance(l, SharedLibrary))
raise SandboxValidationError(
'The static "%s" library is not used in a shared library '
'or a program, but USE_LIBS contains the following shared '
'library names:\n %s\n\nMaybe you can remove the '
'static "%s" library?' % (lib.basename,
'\n '.join(shared_libs), lib.basename),
contexts[lib.objdir])
# Propagate LIBRARY_DEFINES to all child libraries recursively.
def propagate_defines(outerlib, defines):
outerlib.lib_defines.update(defines)
for lib in outerlib.linked_libraries:
# Propagate defines only along FINAL_LIBRARY paths, not USE_LIBS
# paths.
if (isinstance(lib, StaticLibrary) and
lib.link_into == outerlib.basename):
propagate_defines(lib, defines)
for lib in (l for libs in self._libs.values() for l in libs):
if isinstance(lib, Library):
propagate_defines(lib, lib.lib_defines)
yield lib
for obj in self._binaries.values():
yield obj
LIBRARY_NAME_VAR = {
'host': 'HOST_LIBRARY_NAME',
'target': 'LIBRARY_NAME',
}
def _link_libraries(self, context, obj, variable):
"""Add linkage declarations to a given object."""
assert isinstance(obj, Linkable)
for path in context.get(variable, []):
force_static = path.startswith('static:') and obj.KIND == 'target'
if force_static:
path = path[7:]
name = mozpath.basename(path)
dir = mozpath.dirname(path)
candidates = [l for l in self._libs[name] if l.KIND == obj.KIND]
if dir:
if dir.startswith('/'):
dir = mozpath.normpath(
mozpath.join(obj.topobjdir, dir[1:]))
else:
dir = mozpath.normpath(
mozpath.join(obj.objdir, dir))
dir = mozpath.relpath(dir, obj.topobjdir)
candidates = [l for l in candidates if l.relobjdir == dir]
if not candidates:
# If the given directory is under one of the external
# (third party) paths, use a fake library reference to
# there.
for d in self._external_paths:
if dir.startswith('%s/' % d):
candidates = [self._get_external_library(dir, name,
force_static)]
break
if not candidates:
raise SandboxValidationError(
'%s contains "%s", but there is no "%s" %s in %s.'
% (variable, path, name,
self.LIBRARY_NAME_VAR[obj.KIND], dir), context)
if len(candidates) > 1:
# If there's more than one remaining candidate, it could be
# that there are instances for the same library, in static and
# shared form.
libs = {}
for l in candidates:
key = mozpath.join(l.relobjdir, l.basename)
if force_static:
if isinstance(l, StaticLibrary):
libs[key] = l
else:
if key in libs and isinstance(l, SharedLibrary):
libs[key] = l
if key not in libs:
libs[key] = l
candidates = libs.values()
if force_static and not candidates:
if dir:
raise SandboxValidationError(
'%s contains "static:%s", but there is no static '
'"%s" %s in %s.' % (variable, path, name,
self.LIBRARY_NAME_VAR[obj.KIND], dir), context)
raise SandboxValidationError(
'%s contains "static:%s", but there is no static "%s" '
'%s in the tree' % (variable, name, name,
self.LIBRARY_NAME_VAR[obj.KIND]), context)
if not candidates:
raise SandboxValidationError(
'%s contains "%s", which does not match any %s in the tree.'
% (variable, path, self.LIBRARY_NAME_VAR[obj.KIND]),
context)
elif len(candidates) > 1:
paths = (mozpath.join(l.relativedir, 'moz.build')
for l in candidates)
raise SandboxValidationError(
'%s contains "%s", which matches a %s defined in multiple '
'places:\n %s' % (variable, path,
self.LIBRARY_NAME_VAR[obj.KIND],
'\n '.join(paths)), context)
elif force_static and not isinstance(candidates[0], StaticLibrary):
raise SandboxValidationError(
'%s contains "static:%s", but there is only a shared "%s" '
'in %s. You may want to add FORCE_STATIC_LIB=True in '
'%s/moz.build, or remove "static:".' % (variable, path,
name, candidates[0].relobjdir, candidates[0].relobjdir),
context)
elif isinstance(obj, StaticLibrary) and isinstance(candidates[0],
SharedLibrary):
self._static_linking_shared.add(obj)
obj.link_library(candidates[0])
# Link system libraries from OS_LIBS/HOST_OS_LIBS.
for lib in context.get(variable.replace('USE', 'OS'), []):
obj.link_system_library(lib)
@memoize
def _get_external_library(self, dir, name, force_static):
# Create ExternalStaticLibrary or ExternalSharedLibrary object with a
# context more or less truthful about where the external library is.
context = Context(config=self.config)
context.add_source(mozpath.join(self.config.topsrcdir, dir, 'dummy'))
if force_static:
return ExternalStaticLibrary(context, name)
else:
return ExternalSharedLibrary(context, name)
def _parse_cargo_file(self, context):
"""Parse the Cargo.toml file in context and return a Python object
representation of it. Raise a SandboxValidationError if the Cargo.toml
file does not exist. Return a tuple of (config, cargo_file)."""
cargo_file = mozpath.join(context.srcdir, 'Cargo.toml')
if not os.path.exists(cargo_file):
raise SandboxValidationError(
'No Cargo.toml file found in %s' % cargo_file, context)
with open(cargo_file, 'r') as f:
return pytoml.load(f), cargo_file
def _verify_deps(self, context, crate_dir, crate_name, dependencies, description='Dependency'):
"""Verify that a crate's dependencies all specify local paths."""
for dep_crate_name, values in dependencies.iteritems():
# A simple version number.
if isinstance(values, (str, unicode)):
raise SandboxValidationError(
'%s %s of crate %s does not list a path' % (description, dep_crate_name, crate_name),
context)
dep_path = values.get('path', None)
if not dep_path:
raise SandboxValidationError(
'%s %s of crate %s does not list a path' % (description, dep_crate_name, crate_name),
context)
# Try to catch the case where somebody listed a
# local path for development.
if os.path.isabs(dep_path):
raise SandboxValidationError(
'%s %s of crate %s has a non-relative path' % (description, dep_crate_name, crate_name),
context)
if not os.path.exists(mozpath.join(context.config.topsrcdir, crate_dir, dep_path)):
raise SandboxValidationError(
'%s %s of crate %s refers to a non-existent path' % (description, dep_crate_name, crate_name),
context)
def _rust_library(self, context, libname, static_args):
# We need to note any Rust library for linking purposes.
config, cargo_file = self._parse_cargo_file(context)
crate_name = config['package']['name']
if crate_name != libname:
raise SandboxValidationError(
'library %s does not match Cargo.toml-defined package %s' % (libname, crate_name),
context)
# Check that the [lib.crate-type] field is correct
lib_section = config.get('lib', None)
if not lib_section:
raise SandboxValidationError(
'Cargo.toml for %s has no [lib] section' % libname,
context)
crate_type = lib_section.get('crate-type', None)
if not crate_type:
raise SandboxValidationError(
'Can\'t determine a crate-type for %s from Cargo.toml' % libname,
context)
crate_type = crate_type[0]
if crate_type != 'staticlib':
raise SandboxValidationError(
'crate-type %s is not permitted for %s' % (crate_type, libname),
context)
# Check that the [profile.{dev,release}.panic] field is "abort"
profile_section = config.get('profile', None)
if not profile_section:
raise SandboxValidationError(
'Cargo.toml for %s has no [profile] section' % libname,
context)
for profile_name in ['dev', 'release']:
profile = profile_section.get(profile_name, None)
if not profile:
raise SandboxValidationError(
'Cargo.toml for %s has no [profile.%s] section' % (libname, profile_name),
context)
panic = profile.get('panic', None)
if panic != 'abort':
raise SandboxValidationError(
('Cargo.toml for %s does not specify `panic = "abort"`'
' in [profile.%s] section') % (libname, profile_name),
context)
dependencies = set(config.get('dependencies', {}).iterkeys())
features = context.get('RUST_LIBRARY_FEATURES', [])
unique_features = set(features)
if len(features) != len(unique_features):
raise SandboxValidationError(
'features for %s should not contain duplicates: %s' % (libname, features),
context)
return RustLibrary(context, libname, cargo_file, crate_type,
dependencies, features, **static_args)
def _handle_linkables(self, context, passthru, generated_files):
linkables = []
host_linkables = []
def add_program(prog, var):
if var.startswith('HOST_'):
host_linkables.append(prog)
else:
linkables.append(prog)
def check_unique_binary(program, kind):
if program in self._binaries:
raise SandboxValidationError(
'Cannot use "%s" as %s name, '
'because it is already used in %s' % (program, kind,
self._binaries[program].relativedir), context)
for kind, cls in [('PROGRAM', Program), ('HOST_PROGRAM', HostProgram)]:
program = context.get(kind)
if program:
check_unique_binary(program, kind)
self._binaries[program] = cls(context, program)
self._linkage.append((context, self._binaries[program],
kind.replace('PROGRAM', 'USE_LIBS')))
add_program(self._binaries[program], kind)
all_rust_programs = []
for kind, cls in [('RUST_PROGRAMS', RustProgram),
('HOST_RUST_PROGRAMS', HostRustProgram)]:
programs = context[kind]
if not programs:
continue
all_rust_programs.append((programs, kind, cls))
# Verify Rust program definitions.
if all_rust_programs:
config, cargo_file = self._parse_cargo_file(context);
bin_section = config.get('bin', None)
if not bin_section:
raise SandboxValidationError(
'Cargo.toml in %s has no [bin] section' % context.srcdir,
context)
defined_binaries = {b['name'] for b in bin_section}
for programs, kind, cls in all_rust_programs:
for program in programs:
if program not in defined_binaries:
raise SandboxValidationError(
'Cannot find Cargo.toml definition for %s' % program,
context)
check_unique_binary(program, kind)
self._binaries[program] = cls(context, program, cargo_file)
for kind, cls in [
('SIMPLE_PROGRAMS', SimpleProgram),
('CPP_UNIT_TESTS', SimpleProgram),
('HOST_SIMPLE_PROGRAMS', HostSimpleProgram)]:
for program in context[kind]:
if program in self._binaries:
raise SandboxValidationError(
'Cannot use "%s" in %s, '
'because it is already used in %s' % (program, kind,
self._binaries[program].relativedir), context)
self._binaries[program] = cls(context, program,
is_unit_test=kind == 'CPP_UNIT_TESTS')
self._linkage.append((context, self._binaries[program],
'HOST_USE_LIBS' if kind == 'HOST_SIMPLE_PROGRAMS'
else 'USE_LIBS'))
add_program(self._binaries[program], kind)
host_libname = context.get('HOST_LIBRARY_NAME')
libname = context.get('LIBRARY_NAME')
if host_libname:
if host_libname == libname:
raise SandboxValidationError('LIBRARY_NAME and '
'HOST_LIBRARY_NAME must have a different value', context)
lib = HostLibrary(context, host_libname)
self._libs[host_libname].append(lib)
self._linkage.append((context, lib, 'HOST_USE_LIBS'))
host_linkables.append(lib)
final_lib = context.get('FINAL_LIBRARY')
if not libname and final_lib:
# If no LIBRARY_NAME is given, create one.
libname = context.relsrcdir.replace('/', '_')
static_lib = context.get('FORCE_STATIC_LIB')
shared_lib = context.get('FORCE_SHARED_LIB')
static_name = context.get('STATIC_LIBRARY_NAME')
shared_name = context.get('SHARED_LIBRARY_NAME')
is_framework = context.get('IS_FRAMEWORK')
is_component = context.get('IS_COMPONENT')
soname = context.get('SONAME')
lib_defines = context.get('LIBRARY_DEFINES')
shared_args = {}
static_args = {}
if final_lib:
if static_lib:
raise SandboxValidationError(
'FINAL_LIBRARY implies FORCE_STATIC_LIB. '
'Please remove the latter.', context)
if shared_lib:
raise SandboxValidationError(
'FINAL_LIBRARY conflicts with FORCE_SHARED_LIB. '
'Please remove one.', context)
if is_framework:
raise SandboxValidationError(
'FINAL_LIBRARY conflicts with IS_FRAMEWORK. '
'Please remove one.', context)
if is_component:
raise SandboxValidationError(
'FINAL_LIBRARY conflicts with IS_COMPONENT. '
'Please remove one.', context)
static_args['link_into'] = final_lib
static_lib = True
if libname:
if is_component:
if static_lib:
raise SandboxValidationError(
'IS_COMPONENT conflicts with FORCE_STATIC_LIB. '
'Please remove one.', context)
shared_lib = True
shared_args['variant'] = SharedLibrary.COMPONENT
if is_framework:
if soname:
raise SandboxValidationError(
'IS_FRAMEWORK conflicts with SONAME. '
'Please remove one.', context)
shared_lib = True
shared_args['variant'] = SharedLibrary.FRAMEWORK
if not static_lib and not shared_lib:
static_lib = True
if static_name:
if not static_lib:
raise SandboxValidationError(
'STATIC_LIBRARY_NAME requires FORCE_STATIC_LIB',
context)
static_args['real_name'] = static_name
if shared_name:
if not shared_lib:
raise SandboxValidationError(
'SHARED_LIBRARY_NAME requires FORCE_SHARED_LIB',
context)
shared_args['real_name'] = shared_name
if soname:
if not shared_lib:
raise SandboxValidationError(
'SONAME requires FORCE_SHARED_LIB', context)
shared_args['soname'] = soname
if context.get('NO_EXPAND_LIBS'):
if not static_lib:
raise SandboxValidationError(
'NO_EXPAND_LIBS can only be set for static libraries.',
context)
static_args['no_expand_lib'] = True
if shared_lib and static_lib:
if not static_name and not shared_name:
raise SandboxValidationError(
'Both FORCE_STATIC_LIB and FORCE_SHARED_LIB are True, '
'but neither STATIC_LIBRARY_NAME or '
'SHARED_LIBRARY_NAME is set. At least one is required.',
context)
if static_name and not shared_name and static_name == libname:
raise SandboxValidationError(
'Both FORCE_STATIC_LIB and FORCE_SHARED_LIB are True, '
'but STATIC_LIBRARY_NAME is the same as LIBRARY_NAME, '
'and SHARED_LIBRARY_NAME is unset. Please either '
'change STATIC_LIBRARY_NAME or LIBRARY_NAME, or set '
'SHARED_LIBRARY_NAME.', context)
if shared_name and not static_name and shared_name == libname:
raise SandboxValidationError(
'Both FORCE_STATIC_LIB and FORCE_SHARED_LIB are True, '
'but SHARED_LIBRARY_NAME is the same as LIBRARY_NAME, '
'and STATIC_LIBRARY_NAME is unset. Please either '
'change SHARED_LIBRARY_NAME or LIBRARY_NAME, or set '
'STATIC_LIBRARY_NAME.', context)
if shared_name and static_name and shared_name == static_name:
raise SandboxValidationError(
'Both FORCE_STATIC_LIB and FORCE_SHARED_LIB are True, '
'but SHARED_LIBRARY_NAME is the same as '
'STATIC_LIBRARY_NAME. Please change one of them.',
context)
symbols_file = context.get('SYMBOLS_FILE')
if symbols_file:
if not shared_lib:
raise SandboxValidationError(
'SYMBOLS_FILE can only be used with a SHARED_LIBRARY.',
context)
if context.get('DEFFILE') or context.get('LD_VERSION_SCRIPT'):
raise SandboxValidationError(
'SYMBOLS_FILE cannot be used along DEFFILE or '
'LD_VERSION_SCRIPT.', context)
if isinstance(symbols_file, SourcePath):
if not os.path.exists(symbols_file.full_path):
raise SandboxValidationError(
'Path specified in SYMBOLS_FILE does not exist: %s '
'(resolved to %s)' % (symbols_file,
symbols_file.full_path), context)
shared_args['symbols_file'] = True
else:
if symbols_file.target_basename not in generated_files:
raise SandboxValidationError(
('Objdir file specified in SYMBOLS_FILE not in ' +
'GENERATED_FILES: %s') % (symbols_file,), context)
shared_args['symbols_file'] = symbols_file.target_basename
if shared_lib:
lib = SharedLibrary(context, libname, **shared_args)
self._libs[libname].append(lib)
self._linkage.append((context, lib, 'USE_LIBS'))
linkables.append(lib)
generated_files.add(lib.lib_name)
if is_component and not context['NO_COMPONENTS_MANIFEST']:
yield ChromeManifestEntry(context,
'components/components.manifest',
ManifestBinaryComponent('components', lib.lib_name))
if symbols_file and isinstance(symbols_file, SourcePath):
script = mozpath.join(
mozpath.dirname(mozpath.dirname(__file__)),
'action', 'generate_symbols_file.py')
defines = ()
if lib.defines:
defines = lib.defines.get_defines()
yield GeneratedFile(context, script,
'generate_symbols_file', lib.symbols_file,
[symbols_file], defines)
if static_lib:
is_rust_library = context.get('IS_RUST_LIBRARY')
if is_rust_library:
lib = self._rust_library(context, libname, static_args)
else:
lib = StaticLibrary(context, libname, **static_args)
self._libs[libname].append(lib)
self._linkage.append((context, lib, 'USE_LIBS'))
linkables.append(lib)
if lib_defines:
if not libname:
raise SandboxValidationError('LIBRARY_DEFINES needs a '
'LIBRARY_NAME to take effect', context)
lib.lib_defines.update(lib_defines)
# Only emit sources if we have linkables defined in the same context.
# Note the linkables are not emitted in this function, but much later,
# after aggregation (because of e.g. USE_LIBS processing).
if not (linkables or host_linkables):
return
sources = defaultdict(list)
gen_sources = defaultdict(list)
all_flags = {}
for symbol in ('SOURCES', 'HOST_SOURCES', 'UNIFIED_SOURCES'):
srcs = sources[symbol]
gen_srcs = gen_sources[symbol]
context_srcs = context.get(symbol, [])
for f in context_srcs:
full_path = f.full_path
if isinstance(f, SourcePath):
srcs.append(full_path)
else:
assert isinstance(f, Path)
gen_srcs.append(full_path)
if symbol == 'SOURCES':
flags = context_srcs[f]
if flags:
all_flags[full_path] = flags
if isinstance(f, SourcePath) and not os.path.exists(full_path):
raise SandboxValidationError('File listed in %s does not '
'exist: \'%s\'' % (symbol, full_path), context)
# HOST_SOURCES and UNIFIED_SOURCES only take SourcePaths, so
# there should be no generated source in here
assert not gen_sources['HOST_SOURCES']
assert not gen_sources['UNIFIED_SOURCES']
no_pgo = context.get('NO_PGO')
no_pgo_sources = [f for f, flags in all_flags.iteritems()
if flags.no_pgo]
if no_pgo:
if no_pgo_sources:
raise SandboxValidationError('NO_PGO and SOURCES[...].no_pgo '
'cannot be set at the same time', context)
passthru.variables['NO_PROFILE_GUIDED_OPTIMIZE'] = no_pgo
if no_pgo_sources:
passthru.variables['NO_PROFILE_GUIDED_OPTIMIZE'] = no_pgo_sources
# A map from "canonical suffixes" for a particular source file
# language to the range of suffixes associated with that language.
#
# We deliberately don't list the canonical suffix in the suffix list
# in the definition; we'll add it in programmatically after defining
# things.
suffix_map = {
'.s': set(['.asm']),
'.c': set(),
'.m': set(),
'.mm': set(),
'.cpp': set(['.cc', '.cxx']),
'.S': set(),
}
# The inverse of the above, mapping suffixes to their canonical suffix.
canonicalized_suffix_map = {}
for suffix, alternatives in suffix_map.iteritems():
alternatives.add(suffix)
for a in alternatives:
canonicalized_suffix_map[a] = suffix
def canonical_suffix_for_file(f):
return canonicalized_suffix_map[mozpath.splitext(f)[1]]
# A map from moz.build variables to the canonical suffixes of file
# kinds that can be listed therein.
all_suffixes = list(suffix_map.keys())
varmap = dict(
SOURCES=(Sources, GeneratedSources, all_suffixes),
HOST_SOURCES=(HostSources, None, ['.c', '.mm', '.cpp']),
UNIFIED_SOURCES=(UnifiedSources, None, ['.c', '.mm', '.cpp']),
)
# Track whether there are any C++ source files.
# Technically this won't do the right thing for SIMPLE_PROGRAMS in
# a directory with mixed C and C++ source, but it's not that important.
cxx_sources = defaultdict(bool)
for variable, (klass, gen_klass, suffixes) in varmap.items():
allowed_suffixes = set().union(*[suffix_map[s] for s in suffixes])
# First ensure that we haven't been given filetypes that we don't
# recognize.
for f in itertools.chain(sources[variable], gen_sources[variable]):
ext = mozpath.splitext(f)[1]
if ext not in allowed_suffixes:
raise SandboxValidationError(
'%s has an unknown file type.' % f, context)
for srcs, cls in ((sources[variable], klass),
(gen_sources[variable], gen_klass)):
# Now sort the files to let groupby work.
sorted_files = sorted(srcs, key=canonical_suffix_for_file)
for canonical_suffix, files in itertools.groupby(
sorted_files, canonical_suffix_for_file):
if canonical_suffix in ('.cpp', '.mm'):
cxx_sources[variable] = True
arglist = [context, list(files), canonical_suffix]
if (variable.startswith('UNIFIED_') and
'FILES_PER_UNIFIED_FILE' in context):
arglist.append(context['FILES_PER_UNIFIED_FILE'])
obj = cls(*arglist)
yield obj
for f, flags in all_flags.iteritems():
if flags.flags:
ext = mozpath.splitext(f)[1]
yield PerSourceFlag(context, f, flags.flags)
# If there are any C++ sources, set all the linkables defined here
# to require the C++ linker.
for vars, linkable_items in ((('SOURCES', 'UNIFIED_SOURCES'), linkables),
(('HOST_SOURCES',), host_linkables)):
for var in vars:
if cxx_sources[var]:
for l in linkable_items:
l.cxx_link = True
break
def emit_from_context(self, context):
"""Convert a Context to tree metadata objects.
This is a generator of mozbuild.frontend.data.ContextDerived instances.
"""
# We only want to emit an InstallationTarget if one of the consulted
# variables is defined. Later on, we look up FINAL_TARGET, which has
# the side-effect of populating it. So, we need to do this lookup
# early.
if any(k in context for k in ('FINAL_TARGET', 'XPI_NAME', 'DIST_SUBDIR')):
yield InstallationTarget(context)
# We always emit a directory traversal descriptor. This is needed by
# the recursive make backend.
for o in self._emit_directory_traversal_from_context(context): yield o
for obj in self._process_xpidl(context):
yield obj
# Proxy some variables as-is until we have richer classes to represent
# them. We should aim to keep this set small because it violates the
# desired abstraction of the build definition away from makefiles.
passthru = VariablePassthru(context)
varlist = [
'ALLOW_COMPILER_WARNINGS',
'ANDROID_APK_NAME',
'ANDROID_APK_PACKAGE',
'ANDROID_GENERATED_RESFILES',
'DISABLE_STL_WRAPPING',
'EXTRA_DSO_LDOPTS',
'RCFILE',
'RESFILE',
'RCINCLUDE',
'DEFFILE',
'WIN32_EXE_LDFLAGS',
'LD_VERSION_SCRIPT',
'USE_EXTENSION_MANIFEST',
'NO_JS_MANIFEST',
'HAS_MISC_RULE',
]
for v in varlist:
if v in context and context[v]:
passthru.variables[v] = context[v]
if context.config.substs.get('OS_TARGET') == 'WINNT' and \
context['DELAYLOAD_DLLS']:
context['LDFLAGS'].extend([('-DELAYLOAD:%s' % dll)
for dll in context['DELAYLOAD_DLLS']])
context['OS_LIBS'].append('delayimp')
for v in ['CFLAGS', 'CXXFLAGS', 'CMFLAGS', 'CMMFLAGS', 'ASFLAGS',
'LDFLAGS', 'HOST_CFLAGS', 'HOST_CXXFLAGS']:
if v in context and context[v]:
passthru.variables['MOZBUILD_' + v] = context[v]
# NO_VISIBILITY_FLAGS is slightly different
if context['NO_VISIBILITY_FLAGS']:
passthru.variables['VISIBILITY_FLAGS'] = ''
if isinstance(context, TemplateContext) and context.template == 'Gyp':
passthru.variables['IS_GYP_DIR'] = True
dist_install = context['DIST_INSTALL']
if dist_install is True:
passthru.variables['DIST_INSTALL'] = True
elif dist_install is False:
passthru.variables['NO_DIST_INSTALL'] = True
# Ideally, this should be done in templates, but this is difficult at
# the moment because USE_STATIC_LIBS can be set after a template
# returns. Eventually, with context-based templates, it will be
# possible.
if (context.config.substs.get('OS_ARCH') == 'WINNT' and
not context.config.substs.get('GNU_CC')):
use_static_lib = (context.get('USE_STATIC_LIBS') and
not context.config.substs.get('MOZ_ASAN'))
rtl_flag = '-MT' if use_static_lib else '-MD'
if (context.config.substs.get('MOZ_DEBUG') and
not context.config.substs.get('MOZ_NO_DEBUG_RTL')):
rtl_flag += 'd'
# Use a list, like MOZBUILD_*FLAGS variables
passthru.variables['RTL_FLAGS'] = [rtl_flag]
generated_files = set()
for obj in self._process_generated_files(context):
for f in obj.outputs:
generated_files.add(f)
yield obj
for path in context['CONFIGURE_SUBST_FILES']:
sub = self._create_substitution(ConfigFileSubstitution, context,
path)
generated_files.add(str(sub.relpath))
yield sub
defines = context.get('DEFINES')
if defines:
yield Defines(context, defines)
host_defines = context.get('HOST_DEFINES')
if host_defines:
yield HostDefines(context, host_defines)
simple_lists = [
('GENERATED_EVENTS_WEBIDL_FILES', GeneratedEventWebIDLFile),
('GENERATED_WEBIDL_FILES', GeneratedWebIDLFile),
('IPDL_SOURCES', IPDLFile),
('PREPROCESSED_TEST_WEBIDL_FILES', PreprocessedTestWebIDLFile),
('PREPROCESSED_WEBIDL_FILES', PreprocessedWebIDLFile),
('TEST_WEBIDL_FILES', TestWebIDLFile),
('WEBIDL_FILES', WebIDLFile),
('WEBIDL_EXAMPLE_INTERFACES', ExampleWebIDLInterface),
]
for context_var, klass in simple_lists:
for name in context.get(context_var, []):
yield klass(context, name)
for local_include in context.get('LOCAL_INCLUDES', []):
if (not isinstance(local_include, ObjDirPath) and
not os.path.exists(local_include.full_path)):
raise SandboxValidationError('Path specified in LOCAL_INCLUDES '
'does not exist: %s (resolved to %s)' % (local_include,
local_include.full_path), context)
yield LocalInclude(context, local_include)
for obj in self._handle_linkables(context, passthru, generated_files):
yield obj
generated_files.update(['%s%s' % (k, self.config.substs.get('BIN_SUFFIX', '')) for k in self._binaries.keys()])
components = []
for var, cls in (
('BRANDING_FILES', BrandingFiles),
('EXPORTS', Exports),
('FINAL_TARGET_FILES', FinalTargetFiles),
('FINAL_TARGET_PP_FILES', FinalTargetPreprocessedFiles),
('OBJDIR_FILES', ObjdirFiles),
('OBJDIR_PP_FILES', ObjdirPreprocessedFiles),
('TEST_HARNESS_FILES', TestHarnessFiles),
):
all_files = context.get(var)
if not all_files:
continue
if dist_install is False and var != 'TEST_HARNESS_FILES':
raise SandboxValidationError(
'%s cannot be used with DIST_INSTALL = False' % var,
context)
has_prefs = False
has_resources = False
for base, files in all_files.walk():
if var == 'TEST_HARNESS_FILES' and not base:
raise SandboxValidationError(
'Cannot install files to the root of TEST_HARNESS_FILES', context)
if base == 'components':
components.extend(files)
if base == 'defaults/pref':
has_prefs = True
if mozpath.split(base)[0] == 'res':
has_resources = True
for f in files:
if ((var == 'FINAL_TARGET_PP_FILES' or
var == 'OBJDIR_PP_FILES') and
not isinstance(f, SourcePath)):
raise SandboxValidationError(
('Only source directory paths allowed in ' +
'%s: %s')
% (var, f,), context)
if not isinstance(f, ObjDirPath):
path = f.full_path
if '*' not in path and not os.path.exists(path):
raise SandboxValidationError(
'File listed in %s does not exist: %s'
% (var, path), context)
else:
# TODO: Bug 1254682 - The '/' check is to allow
# installing files generated from other directories,
# which is done occasionally for tests. However, it
# means we don't fail early if the file isn't actually
# created by the other moz.build file.
if f.target_basename not in generated_files and '/' not in f:
raise SandboxValidationError(
('Objdir file listed in %s not in ' +
'GENERATED_FILES: %s') % (var, f), context)
# Addons (when XPI_NAME is defined) and Applications (when
# DIST_SUBDIR is defined) use a different preferences directory
# (default/preferences) from the one the GRE uses (defaults/pref).
# Hence, we move the files from the latter to the former in that
# case.
if has_prefs and (context.get('XPI_NAME') or
context.get('DIST_SUBDIR')):
all_files.defaults.preferences += all_files.defaults.pref
del all_files.defaults._children['pref']
if has_resources and (context.get('DIST_SUBDIR') or
context.get('XPI_NAME')):
raise SandboxValidationError(
'RESOURCES_FILES cannot be used with DIST_SUBDIR or '
'XPI_NAME.', context)
yield cls(context, all_files)
# Check for manifest declarations in EXTRA_{PP_,}COMPONENTS.
if any(e.endswith('.js') for e in components) and \
not any(e.endswith('.manifest') for e in components) and \
not context.get('NO_JS_MANIFEST', False):
raise SandboxValidationError('A .js component was specified in EXTRA_COMPONENTS '
'or EXTRA_PP_COMPONENTS without a matching '
'.manifest file. See '
'https://developer.mozilla.org/en/XPCOM/XPCOM_changes_in_Gecko_2.0 .',
context);
for c in components:
if c.endswith('.manifest'):
yield ChromeManifestEntry(context, 'chrome.manifest',
Manifest('components',
mozpath.basename(c)))
for obj in self._process_test_manifests(context):
yield obj
for obj in self._process_jar_manifests(context):
yield obj
for name, jar in context.get('JAVA_JAR_TARGETS', {}).items():
yield ContextWrapped(context, jar)
for name, data in context.get('ANDROID_ECLIPSE_PROJECT_TARGETS', {}).items():
yield ContextWrapped(context, data)
if context.get('USE_YASM') is True:
yasm = context.config.substs.get('YASM')
if not yasm:
raise SandboxValidationError('yasm is not available', context)
passthru.variables['AS'] = yasm
passthru.variables['ASFLAGS'] = context.config.substs.get('YASM_ASFLAGS')
passthru.variables['AS_DASH_C_FLAG'] = ''
for (symbol, cls) in [
('ANDROID_RES_DIRS', AndroidResDirs),
('ANDROID_EXTRA_RES_DIRS', AndroidExtraResDirs),
('ANDROID_ASSETS_DIRS', AndroidAssetsDirs)]:
paths = context.get(symbol)
if not paths:
continue
for p in paths:
if isinstance(p, SourcePath) and not os.path.isdir(p.full_path):
raise SandboxValidationError('Directory listed in '
'%s is not a directory: \'%s\'' %
(symbol, p.full_path), context)
yield cls(context, paths)
android_extra_packages = context.get('ANDROID_EXTRA_PACKAGES')
if android_extra_packages:
yield AndroidExtraPackages(context, android_extra_packages)
if passthru.variables:
yield passthru
def _create_substitution(self, cls, context, path):
sub = cls(context)
sub.input_path = '%s.in' % path.full_path
sub.output_path = path.translated
sub.relpath = path
return sub
def _process_xpidl(self, context):
# XPIDL source files get processed and turned into .h and .xpt files.
# If there are multiple XPIDL files in a directory, they get linked
# together into a final .xpt, which has the name defined by
# XPIDL_MODULE.
xpidl_module = context['XPIDL_MODULE']
if context['XPIDL_SOURCES'] and not xpidl_module:
raise SandboxValidationError('XPIDL_MODULE must be defined if '
'XPIDL_SOURCES is defined.', context)
if xpidl_module and not context['XPIDL_SOURCES']:
raise SandboxValidationError('XPIDL_MODULE cannot be defined '
'unless there are XPIDL_SOURCES', context)
if context['XPIDL_SOURCES'] and context['DIST_INSTALL'] is False:
self.log(logging.WARN, 'mozbuild_warning', dict(
path=context.main_path),
'{path}: DIST_INSTALL = False has no effect on XPIDL_SOURCES.')
for idl in context['XPIDL_SOURCES']:
yield XPIDLFile(context, mozpath.join(context.srcdir, idl),
xpidl_module, add_to_manifest=not context['XPIDL_NO_MANIFEST'])
def _process_generated_files(self, context):
for path in context['CONFIGURE_DEFINE_FILES']:
script = mozpath.join(mozpath.dirname(mozpath.dirname(__file__)),
'action', 'process_define_files.py')
yield GeneratedFile(context, script, 'process_define_file',
unicode(path),
[Path(context, path + '.in')])
generated_files = context.get('GENERATED_FILES')
if not generated_files:
return
for f in generated_files:
flags = generated_files[f]
outputs = f
inputs = []
if flags.script:
method = "main"
script = SourcePath(context, flags.script).full_path
# Deal with cases like "C:\\path\\to\\script.py:function".
if '.py:' in script:
script, method = script.rsplit('.py:', 1)
script += '.py'
if not os.path.exists(script):
raise SandboxValidationError(
'Script for generating %s does not exist: %s'
% (f, script), context)
if os.path.splitext(script)[1] != '.py':
raise SandboxValidationError(
'Script for generating %s does not end in .py: %s'
% (f, script), context)
for i in flags.inputs:
p = Path(context, i)
if (isinstance(p, SourcePath) and
not os.path.exists(p.full_path)):
raise SandboxValidationError(
'Input for generating %s does not exist: %s'
% (f, p.full_path), context)
inputs.append(p)
else:
script = None
method = None
yield GeneratedFile(context, script, method, outputs, inputs,
flags.flags)
def _process_test_manifests(self, context):
for prefix, info in TEST_MANIFESTS.items():
for path, manifest in context.get('%s_MANIFESTS' % prefix, []):
for obj in self._process_test_manifest(context, info, path, manifest):
yield obj
for flavor in REFTEST_FLAVORS:
for path, manifest in context.get('%s_MANIFESTS' % flavor.upper(), []):
for obj in self._process_reftest_manifest(context, flavor, path, manifest):
yield obj
for flavor in WEB_PLATFORM_TESTS_FLAVORS:
for path, manifest in context.get("%s_MANIFESTS" % flavor.upper().replace('-', '_'), []):
for obj in self._process_web_platform_tests_manifest(context, path, manifest):
yield obj
def _process_test_manifest(self, context, info, manifest_path, mpmanifest):
flavor, install_root, install_subdir, package_tests = info
path = mozpath.normpath(mozpath.join(context.srcdir, manifest_path))
manifest_dir = mozpath.dirname(path)
manifest_reldir = mozpath.dirname(mozpath.relpath(path,
context.config.topsrcdir))
manifest_sources = [mozpath.relpath(pth, context.config.topsrcdir)
for pth in mpmanifest.source_files]
install_prefix = mozpath.join(install_root, install_subdir)
try:
if not mpmanifest.tests:
raise SandboxValidationError('Empty test manifest: %s'
% path, context)
defaults = mpmanifest.manifest_defaults[os.path.normpath(path)]
obj = TestManifest(context, path, mpmanifest, flavor=flavor,
install_prefix=install_prefix,
relpath=mozpath.join(manifest_reldir, mozpath.basename(path)),
sources=manifest_sources,
dupe_manifest='dupe-manifest' in defaults)
filtered = mpmanifest.tests
# Jetpack add-on tests are expected to be generated during the
# build process so they won't exist here.
if flavor != 'jetpack-addon':
missing = [t['name'] for t in filtered if not os.path.exists(t['path'])]
if missing:
raise SandboxValidationError('Test manifest (%s) lists '
'test that does not exist: %s' % (
path, ', '.join(missing)), context)
out_dir = mozpath.join(install_prefix, manifest_reldir)
if 'install-to-subdir' in defaults:
# This is terrible, but what are you going to do?
out_dir = mozpath.join(out_dir, defaults['install-to-subdir'])
obj.manifest_obj_relpath = mozpath.join(manifest_reldir,
defaults['install-to-subdir'],
mozpath.basename(path))
def process_support_files(test):
install_info = self._test_files_converter.convert_support_files(
test, install_root, manifest_dir, out_dir)
obj.pattern_installs.extend(install_info.pattern_installs)
for source, dest in install_info.installs:
obj.installs[source] = (dest, False)
obj.external_installs |= install_info.external_installs
for install_path in install_info.deferred_installs:
if all(['*' not in install_path,
not os.path.isfile(mozpath.join(context.config.topsrcdir,
install_path[2:])),
install_path not in install_info.external_installs]):
raise SandboxValidationError('Error processing test '
'manifest %s: entry in support-files not present '
'in the srcdir: %s' % (path, install_path), context)
obj.deferred_installs |= install_info.deferred_installs
for test in filtered:
obj.tests.append(test)
# Some test files are compiled and should not be copied into the
# test package. They function as identifiers rather than files.
if package_tests:
manifest_relpath = mozpath.relpath(test['path'],
mozpath.dirname(test['manifest']))
obj.installs[mozpath.normpath(test['path'])] = \
((mozpath.join(out_dir, manifest_relpath)), True)
process_support_files(test)
for path, m_defaults in mpmanifest.manifest_defaults.items():
process_support_files(m_defaults)
# We also copy manifests into the output directory,
# including manifests from [include:foo] directives.
for mpath in mpmanifest.manifests():
mpath = mozpath.normpath(mpath)
out_path = mozpath.join(out_dir, mozpath.basename(mpath))
obj.installs[mpath] = (out_path, False)
# Some manifests reference files that are auto generated as
# part of the build or shouldn't be installed for some
# reason. Here, we prune those files from the install set.
# FUTURE we should be able to detect autogenerated files from
# other build metadata. Once we do that, we can get rid of this.
for f in defaults.get('generated-files', '').split():
# We re-raise otherwise the stack trace isn't informative.
try:
del obj.installs[mozpath.join(manifest_dir, f)]
except KeyError:
raise SandboxValidationError('Error processing test '
'manifest %s: entry in generated-files not present '
'elsewhere in manifest: %s' % (path, f), context)
yield obj
except (AssertionError, Exception):
raise SandboxValidationError('Error processing test '
'manifest file %s: %s' % (path,
'\n'.join(traceback.format_exception(*sys.exc_info()))),
context)
def _process_reftest_manifest(self, context, flavor, manifest_path, manifest):
manifest_full_path = mozpath.normpath(mozpath.join(
context.srcdir, manifest_path))
manifest_reldir = mozpath.dirname(mozpath.relpath(manifest_full_path,
context.config.topsrcdir))
# reftest manifests don't come from manifest parser. But they are
# similar enough that we can use the same emitted objects. Note
# that we don't perform any installs for reftests.
obj = TestManifest(context, manifest_full_path, manifest,
flavor=flavor, install_prefix='%s/' % flavor,
relpath=mozpath.join(manifest_reldir,
mozpath.basename(manifest_path)))
for test, source_manifest in sorted(manifest.tests):
obj.tests.append({
'path': test,
'here': mozpath.dirname(test),
'manifest': source_manifest,
'name': mozpath.basename(test),
'head': '',
'support-files': '',
'subsuite': '',
})
yield obj
def _process_web_platform_tests_manifest(self, context, paths, manifest):
manifest_path, tests_root = paths
manifest_full_path = mozpath.normpath(mozpath.join(
context.srcdir, manifest_path))
manifest_reldir = mozpath.dirname(mozpath.relpath(manifest_full_path,
context.config.topsrcdir))
tests_root = mozpath.normpath(mozpath.join(context.srcdir, tests_root))
# Create a equivalent TestManifest object
obj = TestManifest(context, manifest_full_path, manifest,
flavor="web-platform-tests",
relpath=mozpath.join(manifest_reldir,
mozpath.basename(manifest_path)),
install_prefix="web-platform/")
for test_type, path, tests in manifest:
path = mozpath.join(tests_root, path)
if test_type not in ["testharness", "reftest", "wdspec"]:
continue
for test in tests:
obj.tests.append({
'path': path,
'here': mozpath.dirname(path),
'manifest': manifest_path,
'name': test.id,
'head': '',
'support-files': '',
'subsuite': '',
})
yield obj
def _process_jar_manifests(self, context):
jar_manifests = context.get('JAR_MANIFESTS', [])
if len(jar_manifests) > 1:
raise SandboxValidationError('While JAR_MANIFESTS is a list, '
'it is currently limited to one value.', context)
for path in jar_manifests:
yield JARManifest(context, path)
# Temporary test to look for jar.mn files that creep in without using
# the new declaration. Before, we didn't require jar.mn files to
# declared anywhere (they were discovered). This will detect people
# relying on the old behavior.
if os.path.exists(os.path.join(context.srcdir, 'jar.mn')):
if 'jar.mn' not in jar_manifests:
raise SandboxValidationError('A jar.mn exists but it '
'is not referenced in the moz.build file. '
'Please define JAR_MANIFESTS.', context)
def _emit_directory_traversal_from_context(self, context):
o = DirectoryTraversal(context)
o.dirs = context.get('DIRS', [])
# Some paths have a subconfigure, yet also have a moz.build. Those
# shouldn't end up in self._external_paths.
if o.objdir:
self._external_paths -= { o.relobjdir }
yield o
| mpl-2.0 |
Zentyal/openchange | mapiproxy/services/ocsmanager/ocsmanager/config/middleware.py | 12 | 3101 | """Pylons middleware initialization"""
from beaker.middleware import SessionMiddleware
from paste.cascade import Cascade
from paste.registry import RegistryManager
from paste.urlparser import StaticURLParser
from paste.deploy.converters import asbool
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.wsgiapp import PylonsApp
from routes.middleware import RoutesMiddleware
from openchange.web.auth.NTLMAuthHandler import NTLMAuthHandler
from ocsmanager.config.environment import load_environment
# from paste.auth.basic import AuthBasicHandler
# from ocsmanager.model.OCSAuthenticator import *
def make_app(global_conf, full_stack=True, static_files=True, **app_conf):
"""Create a Pylons WSGI application and return it
``global_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether this application provides a full WSGI stack (by default,
meaning it handles its own exceptions and errors). Disable
full_stack when this application is "managed" by another WSGI
middleware.
``static_files``
Whether this application serves its own static files; disable
when another web server is responsible for serving them.
``app_conf``
The application's local configuration. Normally specified in
the [app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
# Configure the Pylons environment
config = load_environment(global_conf, app_conf)
# The Pylons WSGI app
app = PylonsApp(config=config)
# Routing/Session Middleware
app = RoutesMiddleware(app, config['routes.map'])
app = SessionMiddleware(app, config)
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
if asbool(full_stack):
# Handle Python exceptions
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
# Display error documents for 401, 403, 404 status codes (and
# 500 when debug is disabled)
if asbool(config['debug']):
app = StatusCodeRedirect(app, [417])
else:
app = StatusCodeRedirect(app, [400, 401, 403, 404, 417, 500])
# authenticator = OCSAuthenticator(config)
# app = AuthBasicHandler(app, "OCSManager", authenticator)
fqdn = "%(hostname)s.%(dnsdomain)s" % config["samba"]
auth_handler = NTLMAuthHandler(app)
def ntlm_env_setter(environ, start_response):
for var in ["SAMBA_HOST", "NTLMAUTHHANDLER_WORKDIR"]:
try:
environ[var] = app_conf[var]
except KeyError:
# FIXME: logging?
pass
return auth_handler(environ, start_response)
# Establish the Registry for this application
app = RegistryManager(ntlm_env_setter)
if asbool(static_files):
# Serve static files
static_app = StaticURLParser(config['pylons.paths']['static_files'])
app = Cascade([static_app, app])
app.config = config
return app
| gpl-3.0 |
tempbottle/grpc | src/python/grpcio_test/grpc_test/_adapter/_future_invocation_asynchronous_event_service_test.py | 14 | 2007 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""One of the tests of the Face layer of RPC Framework."""
import unittest
from grpc_test._adapter import _face_test_case
from grpc_test.framework.face.testing import future_invocation_asynchronous_event_service_test_case as test_case
class FutureInvocationAsynchronousEventServiceTest(
_face_test_case.FaceTestCase,
test_case.FutureInvocationAsynchronousEventServiceTestCase,
unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
aptrishu/coala | tests/results/SourceRangeTest.py | 21 | 5938 | import unittest
from collections import namedtuple
from os.path import abspath
from coalib.results.SourcePosition import SourcePosition
from coalib.results.SourceRange import SourceRange
from coalib.results.AbsolutePosition import AbsolutePosition
from coalib.results.Diff import Diff
class SourceRangeTest(unittest.TestCase):
def setUp(self):
self.result_fileA_noline = SourcePosition('A')
self.result_fileA_line2 = SourcePosition('A', 2)
self.result_fileB_noline = SourcePosition('B')
self.result_fileB_line2 = SourcePosition('B', 2)
self.result_fileB_line4 = SourcePosition('B', 4)
def test_construction(self):
uut1 = SourceRange(self.result_fileA_noline)
self.assertEqual(uut1.end, self.result_fileA_noline)
uut2 = SourceRange.from_values('A')
self.assertEqual(uut1, uut2)
uut = SourceRange.from_values('B', start_line=2, end_line=4)
self.assertEqual(uut.start, self.result_fileB_line2)
self.assertEqual(uut.end, self.result_fileB_line4)
def test_from_clang_range(self):
# Simulating a clang SourceRange is easier than setting one up without
# actually parsing a complete C file.
ClangRange = namedtuple('ClangRange', 'start end')
ClangPosition = namedtuple('ClangPosition', 'file line column')
ClangFile = namedtuple('ClangFile', 'name')
file = ClangFile('t.c')
start = ClangPosition(file, 1, 2)
end = ClangPosition(file, 3, 4)
uut = SourceRange.from_clang_range(ClangRange(start, end))
compare = SourceRange.from_values('t.c', 1, 2, 3, 4)
self.assertEqual(uut, compare)
def test_from_absolute_position(self):
text = ('a\n', 'b\n')
start = AbsolutePosition(text, 0)
end = AbsolutePosition(text, 2)
uut = SourceRange.from_absolute_position('F', start, end)
compare = SourceRange.from_values('F', 1, 1, 2, 1)
self.assertEqual(uut, compare)
uut = SourceRange.from_absolute_position('F', start, None)
compare = SourceRange(SourcePosition('F', 1, 1), None)
self.assertEqual(uut, compare)
def test_file_property(self):
uut = SourceRange(self.result_fileA_line2)
self.assertRegex(uut.file, '.*A')
def test_invalid_arguments(self):
# arguments must be SourceRanges
with self.assertRaises(TypeError):
SourceRange(1, self.result_fileA_noline)
with self.assertRaises(TypeError):
SourceRange(self.result_fileA_line2, 1)
def test_argument_file(self):
# both Source_Positions should describe the same file
with self.assertRaises(ValueError):
SourceRange(self.result_fileA_noline, self.result_fileB_noline)
def test_argument_order(self):
# end should come after the start
with self.assertRaises(ValueError):
SourceRange(self.result_fileA_line2, self.result_fileA_noline)
def test_invalid_comparison(self):
with self.assertRaises(TypeError):
SourceRange(self.result_fileB_noline, self.result_fileB_line2) < 1
def test_json(self):
uut = SourceRange.from_values('B', start_line=2,
end_line=4).__json__(use_relpath=True)
self.assertEqual(uut['start'], self.result_fileB_line2)
def test_contains(self):
a = SourceRange.from_values('test_file', 1, 2, 1, 20)
b = SourceRange.from_values('test_file', 1, 2, 1, 20)
self.assertIn(a, b)
a = SourceRange.from_values('test_file', 1, 2, 2, 20)
b = SourceRange.from_values('test_file', 1, 1, 2, 20)
self.assertIn(a, b)
a = SourceRange.from_values('test_file', 1, 2, 1, 20)
b = SourceRange.from_values('test_file2', 1, 2, 1, 20)
self.assertNotIn(a, b)
a = SourceRange.from_values('test_file', 2, 2, 64, 20)
b = SourceRange.from_values('test_file', 1, 1, 50, 20)
self.assertNotIn(a, b)
def test_overlaps(self):
a = SourceRange.from_values('test_file', 2, None, 3)
b = SourceRange.from_values('test_file', 3, None, 5)
self.assertTrue(a.overlaps(b))
self.assertTrue(b.overlaps(a))
a = SourceRange.from_values('test_file1', 2, None, 3)
b = SourceRange.from_values('test_file2', 3, None, 5)
self.assertFalse(a.overlaps(b))
self.assertFalse(b.overlaps(a))
a = SourceRange.from_values('test_file', 2, None, 2, None)
b = SourceRange.from_values('test_file', 2, 2, 2, 80)
self.assertTrue(a.overlaps(b))
self.assertTrue(b.overlaps(a))
a = SourceRange.from_values('test_file1', 1, None, None, None)
b = SourceRange.from_values('test_file2', 1, None, 1, None)
self.assertFalse(a.overlaps(b))
self.assertFalse(b.overlaps(a))
a = SourceRange.from_values('test_file', 1, None, None, None)
b = SourceRange.from_values('test_file', 1, None, 1, None)
self.assertTrue(a.overlaps(b))
self.assertTrue(b.overlaps(a))
def test_renamed_file(self):
src_range = SourceRange(SourcePosition('test_file'))
self.assertEqual(src_range.renamed_file({}), abspath('test_file'))
self.assertEqual(
src_range.renamed_file({abspath('test_file'): Diff([])}),
abspath('test_file'))
self.assertEqual(
src_range.renamed_file(
{abspath('test_file'): Diff([], rename='another_file')}),
'another_file')
class SourceRangeExpandTest(unittest.TestCase):
def test_expand(self):
empty_position = SourcePosition('filename')
file = ['abc\n', 'def\n', 'ghi\n']
empty_range = SourceRange(empty_position, empty_position)
full_range = SourceRange.from_values('filename', 1, 1, 3, 4)
self.assertEqual(empty_range.expand(file), full_range)
| agpl-3.0 |
fumen/gae-fumen | lib/click/utils.py | 201 | 14916 | import os
import sys
from .globals import resolve_color_default
from ._compat import text_type, open_stream, get_filesystem_encoding, \
get_streerror, string_types, PY2, binary_streams, text_streams, \
filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \
_default_text_stdout, _default_text_stderr, is_bytes, WIN
if not PY2:
from ._compat import _find_binary_writer
elif WIN:
from ._winconsole import _get_windows_argv, \
_hash_py_argv, _initial_argv_hash
echo_native_types = string_types + (bytes, bytearray)
def _posixify(name):
return '-'.join(name.split()).lower()
def safecall(func):
"""Wraps a function so that it swallows exceptions."""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
pass
return wrapper
def make_str(value):
"""Converts a value into a valid string."""
if isinstance(value, bytes):
try:
return value.decode(get_filesystem_encoding())
except UnicodeError:
return value.decode('utf-8', 'replace')
return text_type(value)
def make_default_short_help(help, max_length=45):
words = help.split()
total_length = 0
result = []
done = False
for word in words:
if word[-1:] == '.':
done = True
new_length = result and 1 + len(word) or len(word)
if total_length + new_length > max_length:
result.append('...')
done = True
else:
if result:
result.append(' ')
result.append(word)
if done:
break
total_length += new_length
return ''.join(result)
class LazyFile(object):
"""A lazy file works like a regular file but it does not fully open
the file but it does perform some basic checks early to see if the
filename parameter does make sense. This is useful for safely opening
files for writing.
"""
def __init__(self, filename, mode='r', encoding=None, errors='strict',
atomic=False):
self.name = filename
self.mode = mode
self.encoding = encoding
self.errors = errors
self.atomic = atomic
if filename == '-':
self._f, self.should_close = open_stream(filename, mode,
encoding, errors)
else:
if 'r' in mode:
# Open and close the file in case we're opening it for
# reading so that we can catch at least some errors in
# some cases early.
open(filename, mode).close()
self._f = None
self.should_close = True
def __getattr__(self, name):
return getattr(self.open(), name)
def __repr__(self):
if self._f is not None:
return repr(self._f)
return '<unopened file %r %s>' % (self.name, self.mode)
def open(self):
"""Opens the file if it's not yet open. This call might fail with
a :exc:`FileError`. Not handling this error will produce an error
that Click shows.
"""
if self._f is not None:
return self._f
try:
rv, self.should_close = open_stream(self.name, self.mode,
self.encoding,
self.errors,
atomic=self.atomic)
except (IOError, OSError) as e:
from .exceptions import FileError
raise FileError(self.name, hint=get_streerror(e))
self._f = rv
return rv
def close(self):
"""Closes the underlying file, no matter what."""
if self._f is not None:
self._f.close()
def close_intelligently(self):
"""This function only closes the file if it was opened by the lazy
file wrapper. For instance this will never close stdin.
"""
if self.should_close:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close_intelligently()
def __iter__(self):
self.open()
return iter(self._f)
class KeepOpenFile(object):
def __init__(self, file):
self._file = file
def __getattr__(self, name):
return getattr(self._file, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
pass
def __repr__(self):
return repr(self._file)
def __iter__(self):
return iter(self._file)
def echo(message=None, file=None, nl=True, err=False, color=None):
"""Prints a message plus a newline to the given file or stdout. On
first sight, this looks like the print function, but it has improved
support for handling Unicode and binary data that does not fail no
matter how badly configured the system is.
Primarily it means that you can print binary data as well as Unicode
data on both 2.x and 3.x to the given file in the most appropriate way
possible. This is a very carefree function as in that it will try its
best to not fail. As of Click 6.0 this includes support for unicode
output on the Windows console.
In addition to that, if `colorama`_ is installed, the echo function will
also support clever handling of ANSI codes. Essentially it will then
do the following:
- add transparent handling of ANSI color codes on Windows.
- hide ANSI codes automatically if the destination file is not a
terminal.
.. _colorama: http://pypi.python.org/pypi/colorama
.. versionchanged:: 6.0
As of Click 6.0 the echo function will properly support unicode
output on the windows console. Not that click does not modify
the interpreter in any way which means that `sys.stdout` or the
print statement or function will still not provide unicode support.
.. versionchanged:: 2.0
Starting with version 2.0 of Click, the echo function will work
with colorama if it's installed.
.. versionadded:: 3.0
The `err` parameter was added.
.. versionchanged:: 4.0
Added the `color` flag.
:param message: the message to print
:param file: the file to write to (defaults to ``stdout``)
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``. This is faster and easier than calling
:func:`get_text_stderr` yourself.
:param nl: if set to `True` (the default) a newline is printed afterwards.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, echo_native_types):
message = text_type(message)
if nl:
message = message or u''
if isinstance(message, text_type):
message += u'\n'
else:
message += b'\n'
# If there is a message, and we're in Python 3, and the value looks
# like bytes, we manually need to find the binary stream and write the
# message in there. This is done separately so that most stream
# types will work as you would expect. Eg: you can write to StringIO
# for other cases.
if message and not PY2 and is_bytes(message):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(message)
binary_file.flush()
return
# ANSI-style support. If there is no message or we are dealing with
# bytes nothing is happening. If we are connected to a file we want
# to strip colors. If we are on windows we either wrap the stream
# to strip the color or we use the colorama support to translate the
# ansi codes to API calls.
if message and not is_bytes(message):
color = resolve_color_default(color)
if should_strip_ansi(file, color):
message = strip_ansi(message)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file)
elif not color:
message = strip_ansi(message)
if message:
file.write(message)
file.flush()
def get_binary_stream(name):
"""Returns a system stream for byte processing. This essentially
returns the stream from the sys module with the given name but it
solves some compatibility issues between different Python versions.
Primarily this function is necessary for getting binary streams on
Python 3.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
"""
opener = binary_streams.get(name)
if opener is None:
raise TypeError('Unknown standard stream %r' % name)
return opener()
def get_text_stream(name, encoding=None, errors='strict'):
"""Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts on Python 3
for already correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
"""
opener = text_streams.get(name)
if opener is None:
raise TypeError('Unknown standard stream %r' % name)
return opener(encoding, errors)
def open_file(filename, mode='r', encoding=None, errors='strict',
lazy=False, atomic=False):
"""This is similar to how the :class:`File` works but for manual
usage. Files are opened non lazy by default. This can open regular
files as well as stdin/stdout if ``'-'`` is passed.
If stdin/stdout is returned the stream is wrapped so that the context
manager will not close the stream accidentally. This makes it possible
to always use the function like this without having to worry to
accidentally close a standard stream::
with open_file(filename) as f:
...
.. versionadded:: 3.0
:param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
:param mode: the mode in which to open the file.
:param encoding: the encoding to use.
:param errors: the error handling for this file.
:param lazy: can be flipped to true to open the file lazily.
:param atomic: in atomic mode writes go into a temporary file and it's
moved on close.
"""
if lazy:
return LazyFile(filename, mode, encoding, errors, atomic=atomic)
f, should_close = open_stream(filename, mode, encoding, errors,
atomic=atomic)
if not should_close:
f = KeepOpenFile(f)
return f
def get_os_args():
"""This returns the argument part of sys.argv in the most appropriate
form for processing. What this means is that this return value is in
a format that works for Click to process but does not necessarily
correspond well to what's actually standard for the interpreter.
On most environments the return value is ``sys.argv[:1]`` unchanged.
However if you are on Windows and running Python 2 the return value
will actually be a list of unicode strings instead because the
default behavior on that platform otherwise will not be able to
carry all possible values that sys.argv can have.
.. versionadded:: 6.0
"""
# We can only extract the unicode argv if sys.argv has not been
# changed since the startup of the application.
if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
return _get_windows_argv()
return sys.argv[1:]
def format_filename(filename, shorten=False):
"""Formats a filename for user display. The main purpose of this
function is to ensure that the filename can be displayed at all. This
will decode the filename to unicode if necessary in a way that it will
not fail. Optionally, it can shorten the filename to not include the
full path to the filename.
:param filename: formats a filename for UI display. This will also convert
the filename into unicode without failing.
:param shorten: this optionally shortens the filename to strip of the
path that leads up to it.
"""
if shorten:
filename = os.path.basename(filename)
return filename_to_ui(filename)
def get_app_dir(app_name, roaming=True, force_posix=False):
r"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
To give you an idea, for an app called ``"Foo Bar"``, something like
the following folders could be returned:
Mac OS X:
``~/Library/Application Support/Foo Bar``
Mac OS X (POSIX):
``~/.foo-bar``
Unix:
``~/.config/foo-bar``
Unix (POSIX):
``~/.foo-bar``
Win XP (roaming):
``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
Win XP (not roaming):
``C:\Documents and Settings\<user>\Application Data\Foo Bar``
Win 7 (roaming):
``C:\Users\<user>\AppData\Roaming\Foo Bar``
Win 7 (not roaming):
``C:\Users\<user>\AppData\Local\Foo Bar``
.. versionadded:: 2.0
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param roaming: controls if the folder should be roaming or not on Windows.
Has no affect otherwise.
:param force_posix: if this is set to `True` then on any POSIX system the
folder will be stored in the home folder with a leading
dot instead of the XDG config home or darwin's
application support folder.
"""
if WIN:
key = roaming and 'APPDATA' or 'LOCALAPPDATA'
folder = os.environ.get(key)
if folder is None:
folder = os.path.expanduser('~')
return os.path.join(folder, app_name)
if force_posix:
return os.path.join(os.path.expanduser('~/.' + _posixify(app_name)))
if sys.platform == 'darwin':
return os.path.join(os.path.expanduser(
'~/Library/Application Support'), app_name)
return os.path.join(
os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')),
_posixify(app_name))
| bsd-3-clause |
fgesora/odoo | addons/account_budget/wizard/account_budget_crossovered_summary_report.py | 373 | 2191 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_budget_crossvered_summary_report(osv.osv_memory):
"""
This wizard provides the crossovered budget summary report'
"""
_name = 'account.budget.crossvered.summary.report'
_description = 'Account Budget crossvered summary report'
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids',[]),
'model': 'crossovered.budget',
'form': data
}
datas['form']['ids'] = datas['ids']
datas['form']['report'] = 'analytic-one'
return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_crossoveredbudget', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
SanchayanMaity/gem5 | tests/testing/tests.py | 8 | 12237 | #!/usr/bin/env python
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from abc import ABCMeta, abstractmethod
import os
from collections import namedtuple
from units import *
from results import TestResult
import shutil
_test_base = os.path.join(os.path.dirname(__file__), "..")
ClassicConfig = namedtuple("ClassicConfig", (
"category",
"mode",
"workload",
"isa",
"os",
"config",
))
# There are currently two "classes" of test
# configurations. Architecture-specific ones and generic ones
# (typically SE mode tests). In both cases, the configuration name
# matches a file in tests/configs/ that will be picked up by the test
# runner (run.py).
#
# Architecture specific configurations are listed in the arch_configs
# dictionary. This is indexed by a (cpu architecture, gpu
# architecture) tuple. GPU architecture is optional and may be None.
#
# Generic configurations are listed in the generic_configs tuple.
#
# When discovering available test cases, this script look uses the
# test list as a list of /candidate/ configurations. A configuration
# is only used if a test has a reference output for that
# configuration. In addition to the base configurations from
# arch_configs and generic_configs, a Ruby configuration may be
# appended to the base name (this is probed /in addition/ to the
# original name. See get_tests() for details.
#
arch_configs = {
("alpha", None) : (
'tsunami-simple-atomic',
'tsunami-simple-timing',
'tsunami-simple-atomic-dual',
'tsunami-simple-timing-dual',
'twosys-tsunami-simple-atomic',
'tsunami-o3', 'tsunami-o3-dual',
'tsunami-minor', 'tsunami-minor-dual',
'tsunami-switcheroo-full',
),
("arm", None) : (
'simple-atomic-dummychecker',
'o3-timing-checker',
'realview-simple-atomic',
'realview-simple-atomic-dual',
'realview-simple-atomic-checkpoint',
'realview-simple-timing',
'realview-simple-timing-dual',
'realview-o3',
'realview-o3-checker',
'realview-o3-dual',
'realview-minor',
'realview-minor-dual',
'realview-switcheroo-atomic',
'realview-switcheroo-timing',
'realview-switcheroo-o3',
'realview-switcheroo-full',
'realview64-simple-atomic',
'realview64-simple-atomic-checkpoint',
'realview64-simple-atomic-dual',
'realview64-simple-timing',
'realview64-simple-timing-dual',
'realview64-o3',
'realview64-o3-checker',
'realview64-o3-dual',
'realview64-minor',
'realview64-minor-dual',
'realview64-switcheroo-atomic',
'realview64-switcheroo-timing',
'realview64-switcheroo-o3',
'realview64-switcheroo-full',
),
("sparc", None) : (
't1000-simple-atomic',
't1000-simple-x86',
),
("timing", None) : (
'pc-simple-atomic',
'pc-simple-timing',
'pc-o3-timing',
'pc-switcheroo-full',
),
("x86", "hsail") : (
'gpu',
),
}
generic_configs = (
'simple-atomic',
'simple-atomic-mp',
'simple-timing',
'simple-timing-mp',
'minor-timing',
'minor-timing-mp',
'o3-timing',
'o3-timing-mt',
'o3-timing-mp',
'rubytest',
'memcheck',
'memtest',
'memtest-filter',
'tgen-simple-mem',
'tgen-dram-ctrl',
'learning-gem5-p1-simple',
'learning-gem5-p1-two-level',
)
all_categories = ("quick", "long")
all_modes = ("fs", "se")
class Test(object):
"""Test case base class.
Test cases consists of one or more test units that are run in two
phases. A run phase (units produced by run_units() and a verify
phase (units from verify_units()). The verify phase is skipped if
the run phase fails.
"""
__metaclass__ = ABCMeta
def __init__(self, name):
self.test_name = name
@abstractmethod
def ref_files(self):
"""Get a list of reference files used by this test case"""
pass
@abstractmethod
def run_units(self):
"""Units (typically RunGem5 instances) that describe the run phase of
this test.
"""
pass
@abstractmethod
def verify_units(self):
"""Verify the output from the run phase (see run_units())."""
pass
@abstractmethod
def update_ref(self):
"""Update reference files with files from a test run"""
pass
def run(self):
"""Run this test case and return a list of results"""
run_results = [ u.run() for u in self.run_units() ]
run_ok = all([not r.skipped() and r for r in run_results ])
verify_results = [
u.run() if run_ok else u.skip()
for u in self.verify_units()
]
return TestResult(self.test_name,
run_results=run_results,
verify_results=verify_results)
def __str__(self):
return self.test_name
class ClassicTest(Test):
# The diff ignore list contains all files that shouldn't be diffed
# using DiffOutFile. These files typically use special-purpose
# diff tools (e.g., DiffStatFile).
diff_ignore_files = FileIgnoreList(
names=(
# Stat files use a special stat differ
"stats.txt",
), rex=(
))
# These files should never be included in the list of
# reference files. This list should include temporary files
# and other files that we don't care about.
ref_ignore_files = FileIgnoreList(
names=(
"EMPTY",
), rex=(
# Mercurial sometimes leaves backups when applying MQ patches
r"\.orig$",
r"\.rej$",
))
def __init__(self, gem5, output_dir, config_tuple,
timeout=None,
skip=False, skip_diff_out=False, skip_diff_stat=False):
super(ClassicTest, self).__init__("/".join(config_tuple))
ct = config_tuple
self.gem5 = os.path.abspath(gem5)
self.script = os.path.join(_test_base, "run.py")
self.config_tuple = ct
self.timeout = timeout
self.output_dir = output_dir
self.ref_dir = os.path.join(_test_base,
ct.category, ct.mode, ct.workload,
"ref", ct.isa, ct.os, ct.config)
self.skip_run = skip
self.skip_diff_out = skip or skip_diff_out
self.skip_diff_stat = skip or skip_diff_stat
def ref_files(self):
ref_dir = os.path.abspath(self.ref_dir)
for root, dirs, files in os.walk(ref_dir, topdown=False):
for f in files:
fpath = os.path.join(root[len(ref_dir) + 1:], f)
if fpath not in ClassicTest.ref_ignore_files:
yield fpath
def run_units(self):
args = [
self.script,
"/".join(self.config_tuple),
]
return [
RunGem5(self.gem5, args,
ref_dir=self.ref_dir, test_dir=self.output_dir,
skip=self.skip_run),
]
def verify_units(self):
ref_files = set(self.ref_files())
units = []
if "stats.txt" in ref_files:
units.append(
DiffStatFile(ref_dir=self.ref_dir, test_dir=self.output_dir,
skip=self.skip_diff_stat))
units += [
DiffOutFile(f,
ref_dir=self.ref_dir, test_dir=self.output_dir,
skip=self.skip_diff_out)
for f in ref_files if f not in ClassicTest.diff_ignore_files
]
return units
def update_ref(self):
for fname in self.ref_files():
shutil.copy(
os.path.join(self.output_dir, fname),
os.path.join(self.ref_dir, fname))
def parse_test_filter(test_filter):
wildcards = ("", "*")
_filter = list(test_filter.split("/"))
if len(_filter) > 3:
raise RuntimeError("Illegal test filter string")
_filter += [ "", ] * (3 - len(_filter))
isa, cat, mode = _filter
if isa in wildcards:
raise RuntimeError("No ISA specified")
cat = all_categories if cat in wildcards else (cat, )
mode = all_modes if mode in wildcards else (mode, )
return isa, cat, mode
def get_tests(isa,
categories=all_categories, modes=all_modes,
ruby_protocol=None, gpu_isa=None):
# Generate a list of candidate configs
configs = list(arch_configs.get((isa, gpu_isa), []))
if (isa, gpu_isa) == ("x86", "hsail"):
if ruby_protocol == "GPU_RfO":
configs += ['gpu-randomtest']
else:
configs += generic_configs
if ruby_protocol == 'MI_example':
configs += [ "%s-ruby" % (c, ) for c in configs ]
elif ruby_protocol is not None:
# Override generic ISA configs when using Ruby (excluding
# MI_example which is included in all ISAs by default). This
# reduces the number of generic tests we re-run for when
# compiling Ruby targets.
configs = [ "%s-ruby-%s" % (c, ruby_protocol) for c in configs ]
# /(quick|long)/(fs|se)/workload/ref/arch/guest/config/
for conf_script in configs:
for cat in categories:
for mode in modes:
mode_dir = os.path.join(_test_base, cat, mode)
if not os.path.exists(mode_dir):
continue
for workload in os.listdir(mode_dir):
isa_dir = os.path.join(mode_dir, workload, "ref", isa)
if not os.path.isdir(isa_dir):
continue
for _os in os.listdir(isa_dir):
test_dir = os.path.join(isa_dir, _os, conf_script)
if not os.path.exists(test_dir) or \
os.path.exists(os.path.join(test_dir, "skip")):
continue
yield ClassicConfig(cat, mode, workload, isa, _os,
conf_script)
| bsd-3-clause |
protochron/aurora | 3rdparty/javascript/scheduler/assets/bower_components/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| apache-2.0 |
Evervolv/android_external_chromium_org | chrome/common/extensions/docs/server2/instance_servlet.py | 24 | 3697 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from appengine_wrappers import IsDevServer
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from empty_dir_file_system import EmptyDirFileSystem
from github_file_system import GithubFileSystem
from host_file_system_creator import HostFileSystemCreator
from third_party.json_schema_compiler.memoize import memoize
from render_servlet import RenderServlet
from object_store_creator import ObjectStoreCreator
from server_instance import ServerInstance
class OfflineRenderServletDelegate(RenderServlet.Delegate):
'''AppEngine instances should never need to call out to SVN. That should only
ever be done by the cronjobs, which then write the result into DataStore,
which is as far as instances look. To enable this, crons can pass a custom
(presumably online) ServerInstance into Get().
Why? SVN is slow and a bit flaky. Cronjobs failing is annoying but temporary.
Instances failing affects users, and is really bad.
Anyway - to enforce this, we actually don't give instances access to SVN. If
anything is missing from datastore, it'll be a 404. If the cronjobs don't
manage to catch everything - uhoh. On the other hand, we'll figure it out
pretty soon, and it also means that legitimate 404s are caught before a round
trip to SVN.
'''
def __init__(self, delegate):
self._delegate = delegate
@memoize
def CreateServerInstance(self):
object_store_creator = ObjectStoreCreator(start_empty=False)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_creator = self._delegate.CreateHostFileSystemCreator(
object_store_creator)
host_file_system = host_file_system_creator.Create()
app_samples_file_system = self._delegate.CreateAppSamplesFileSystem(
object_store_creator)
compiled_host_fs_factory = CompiledFileSystem.Factory(
host_file_system,
object_store_creator)
return ServerInstance(object_store_creator,
host_file_system,
app_samples_file_system,
'',
compiled_host_fs_factory,
branch_utility,
host_file_system_creator)
class InstanceServlet(object):
'''Servlet for running on normal AppEngine instances.
Create this via GetConstructor() so that cache state can be shared amongst
them via the memoizing Delegate.
'''
class Delegate(object):
'''Allow runtime dependencies to be overriden for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemCreator(self, object_store_creator):
return HostFileSystemCreator(object_store_creator, offline=True)
def CreateAppSamplesFileSystem(self, object_store_creator):
# TODO(kalman): OfflineServerInstance wrapper for GithubFileSystem, but
# the cron job doesn't crawl the samples yet.
return (EmptyDirFileSystem() if IsDevServer() else
GithubFileSystem.Create(object_store_creator))
@staticmethod
def GetConstructor(delegate_for_test=None):
render_servlet_delegate = OfflineRenderServletDelegate(
delegate_for_test or InstanceServlet.Delegate())
return lambda request: RenderServlet(request, render_servlet_delegate)
# NOTE: if this were a real Servlet it would implement a Get() method, but
# GetConstructor returns an appropriate lambda function (Request -> Servlet).
| bsd-3-clause |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Doc/includes/mp_benchmarks.py | 57 | 5535 | #
# Simple benchmarks for the multiprocessing package
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time, sys, multiprocessing, threading, Queue, gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in xrange(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through the queue in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in xrange(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through connection in',elapsed,'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
a = seq[5]
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in xrange(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print iterations * 2, 'waits in', elapsed, 'seconds'
print 'average number/sec:', iterations * 2 / elapsed
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print '\n\t######## testing Queue.Queue\n'
test_queuespeed(threading.Thread, Queue.Queue(),
threading.Condition())
print '\n\t######## testing multiprocessing.Queue\n'
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print '\n\t######## testing Queue managed by server process\n'
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print '\n\t######## testing multiprocessing.Pipe\n'
test_pipespeed()
print
print '\n\t######## testing list\n'
test_seqspeed(range(10))
print '\n\t######## testing list managed by server process\n'
test_seqspeed(manager.list(range(10)))
print '\n\t######## testing Array("i", ..., lock=False)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=False))
print '\n\t######## testing Array("i", ..., lock=True)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=True))
print
print '\n\t######## testing threading.Lock\n'
test_lockspeed(threading.Lock())
print '\n\t######## testing threading.RLock\n'
test_lockspeed(threading.RLock())
print '\n\t######## testing multiprocessing.Lock\n'
test_lockspeed(multiprocessing.Lock())
print '\n\t######## testing multiprocessing.RLock\n'
test_lockspeed(multiprocessing.RLock())
print '\n\t######## testing lock managed by server process\n'
test_lockspeed(manager.Lock())
print '\n\t######## testing rlock managed by server process\n'
test_lockspeed(manager.RLock())
print
print '\n\t######## testing threading.Condition\n'
test_conditionspeed(threading.Thread, threading.Condition())
print '\n\t######## testing multiprocessing.Condition\n'
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print '\n\t######## testing condition managed by a server process\n'
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()
| gpl-3.0 |
mbareta/edx-platform-ft | lms/djangoapps/certificates/queue.py | 8 | 21616 | """Interface for adding certificate generation tasks to the XQueue. """
import json
import random
import logging
import lxml.html
from lxml.etree import XMLSyntaxError, ParserError
from uuid import uuid4
from django.test.client import RequestFactory
from django.conf import settings
from django.core.urlresolvers import reverse
from requests.auth import HTTPBasicAuth
from courseware import grades
from xmodule.modulestore.django import modulestore
from capa.xqueue_interface import XQueueInterface
from capa.xqueue_interface import make_xheader, make_hashkey
from course_modes.models import CourseMode
from student.models import UserProfile, CourseEnrollment
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from certificates.models import (
CertificateStatuses,
GeneratedCertificate,
certificate_status_for_student,
CertificateStatuses as status,
CertificateWhitelist,
ExampleCertificate
)
LOGGER = logging.getLogger(__name__)
class XQueueAddToQueueError(Exception):
"""An error occurred when adding a certificate task to the queue. """
def __init__(self, error_code, error_msg):
self.error_code = error_code
self.error_msg = error_msg
super(XQueueAddToQueueError, self).__init__(unicode(self))
def __unicode__(self):
return (
u"Could not add certificate to the XQueue. "
u"The error code was '{code}' and the message was '{msg}'."
).format(
code=self.error_code,
msg=self.error_msg
)
class XQueueCertInterface(object):
"""
XQueueCertificateInterface provides an
interface to the xqueue server for
managing student certificates.
Instantiating an object will create a new
connection to the queue server.
See models.py for valid state transitions,
summary of methods:
add_cert: Add a new certificate. Puts a single
request on the queue for the student/course.
Once the certificate is generated a post
will be made to the update_certificate
view which will save the certificate
download URL.
regen_cert: Regenerate an existing certificate.
For a user that already has a certificate
this will delete the existing one and
generate a new cert.
del_cert: Delete an existing certificate
For a user that already has a certificate
this will delete his cert.
"""
def __init__(self, request=None):
# Get basic auth (username/password) for
# xqueue connection if it's in the settings
if settings.XQUEUE_INTERFACE.get('basic_auth') is not None:
requests_auth = HTTPBasicAuth(
*settings.XQUEUE_INTERFACE['basic_auth'])
else:
requests_auth = None
if request is None:
factory = RequestFactory()
self.request = factory.get('/')
else:
self.request = request
self.xqueue_interface = XQueueInterface(
settings.XQUEUE_INTERFACE['url'],
settings.XQUEUE_INTERFACE['django_auth'],
requests_auth,
)
self.whitelist = CertificateWhitelist.objects.all()
self.restricted = UserProfile.objects.filter(allow_certificate=False)
self.use_https = True
def regen_cert(self, student, course_id, course=None, forced_grade=None, template_file=None, generate_pdf=True):
"""(Re-)Make certificate for a particular student in a particular course
Arguments:
student - User.object
course_id - courseenrollment.course_id (string)
WARNING: this command will leave the old certificate, if one exists,
laying around in AWS taking up space. If this is a problem,
take pains to clean up storage before running this command.
Change the certificate status to unavailable (if it exists) and request
grading. Passing grades will put a certificate request on the queue.
Return the certificate.
"""
# TODO: when del_cert is implemented and plumbed through certificates
# repo also, do a deletion followed by a creation r/t a simple
# recreation. XXX: this leaves orphan cert files laying around in
# AWS. See note in the docstring too.
try:
certificate = GeneratedCertificate.eligible_certificates.get(user=student, course_id=course_id)
LOGGER.info(
(
u"Found an existing certificate entry for student %s "
u"in course '%s' "
u"with status '%s' while regenerating certificates. "
),
student.id,
unicode(course_id),
certificate.status
)
certificate.status = status.unavailable
certificate.save()
LOGGER.info(
(
u"The certificate status for student %s "
u"in course '%s' has been changed to '%s'."
),
student.id,
unicode(course_id),
certificate.status
)
except GeneratedCertificate.DoesNotExist:
pass
return self.add_cert(
student,
course_id,
course=course,
forced_grade=forced_grade,
template_file=template_file,
generate_pdf=generate_pdf
)
def del_cert(self, student, course_id):
"""
Arguments:
student - User.object
course_id - courseenrollment.course_id (string)
Removes certificate for a student, will change
the certificate status to 'deleting'.
Certificate must be in the 'error' or 'downloadable' state
otherwise it will return the current state
"""
raise NotImplementedError
# pylint: disable=too-many-statements
def add_cert(self, student, course_id, course=None, forced_grade=None, template_file=None, generate_pdf=True):
"""
Request a new certificate for a student.
Arguments:
student - User.object
course_id - courseenrollment.course_id (CourseKey)
forced_grade - a string indicating a grade parameter to pass with
the certificate request. If this is given, grading
will be skipped.
generate_pdf - Boolean should a message be sent in queue to generate certificate PDF
Will change the certificate status to 'generating' or
`downloadable` in case of web view certificates.
Certificate must be in the 'unavailable', 'error',
'deleted' or 'generating' state.
If a student has a passing grade or is in the whitelist
table for the course a request will be made for a new cert.
If a student has allow_certificate set to False in the
userprofile table the status will change to 'restricted'
If a student does not have a passing grade the status
will change to status.notpassing
Returns the newly created certificate instance
"""
valid_statuses = [
status.generating,
status.unavailable,
status.deleted,
status.error,
status.notpassing,
status.downloadable,
status.auditing,
status.audit_passing,
status.audit_notpassing,
]
cert_status = certificate_status_for_student(student, course_id)['status']
cert = None
if cert_status not in valid_statuses:
LOGGER.warning(
(
u"Cannot create certificate generation task for user %s "
u"in the course '%s'; "
u"the certificate status '%s' is not one of %s."
),
student.id,
unicode(course_id),
cert_status,
unicode(valid_statuses)
)
return None
# The caller can optionally pass a course in to avoid
# re-fetching it from Mongo. If they have not provided one,
# get it from the modulestore.
if course is None:
course = modulestore().get_course(course_id, depth=0)
profile = UserProfile.objects.get(user=student)
profile_name = profile.name
# Needed for access control in grading.
self.request.user = student
self.request.session = {}
is_whitelisted = self.whitelist.filter(user=student, course_id=course_id, whitelist=True).exists()
grade = grades.grade(student, course)
enrollment_mode, __ = CourseEnrollment.enrollment_mode_for_user(student, course_id)
mode_is_verified = enrollment_mode in GeneratedCertificate.VERIFIED_CERTS_MODES
user_is_verified = SoftwareSecurePhotoVerification.user_is_verified(student)
cert_mode = enrollment_mode
is_eligible_for_certificate = is_whitelisted or CourseMode.is_eligible_for_certificate(enrollment_mode)
unverified = False
# For credit mode generate verified certificate
if cert_mode == CourseMode.CREDIT_MODE:
cert_mode = CourseMode.VERIFIED
if template_file is not None:
template_pdf = template_file
elif mode_is_verified and user_is_verified:
template_pdf = "certificate-template-{id.org}-{id.course}-verified.pdf".format(id=course_id)
elif mode_is_verified and not user_is_verified:
template_pdf = "certificate-template-{id.org}-{id.course}.pdf".format(id=course_id)
if CourseMode.mode_for_course(course_id, CourseMode.HONOR):
cert_mode = GeneratedCertificate.MODES.honor
else:
unverified = True
else:
# honor code and audit students
template_pdf = "certificate-template-{id.org}-{id.course}.pdf".format(id=course_id)
if forced_grade:
grade['grade'] = forced_grade
LOGGER.info(
(
u"Certificate generated for student %s in the course: %s with template: %s. "
u"given template: %s, "
u"user is verified: %s, "
u"mode is verified: %s"
),
student.username,
unicode(course_id),
template_pdf,
template_file,
user_is_verified,
mode_is_verified
)
cert, created = GeneratedCertificate.objects.get_or_create(user=student, course_id=course_id) # pylint: disable=no-member
cert.mode = cert_mode
cert.user = student
cert.grade = grade['percent']
cert.course_id = course_id
cert.name = profile_name
cert.download_url = ''
# Strip HTML from grade range label
grade_contents = grade.get('grade', None)
try:
grade_contents = lxml.html.fromstring(grade_contents).text_content()
passing = True
except (TypeError, XMLSyntaxError, ParserError) as exc:
LOGGER.info(
(
u"Could not retrieve grade for student %s "
u"in the course '%s' "
u"because an exception occurred while parsing the "
u"grade contents '%s' as HTML. "
u"The exception was: '%s'"
),
student.id,
unicode(course_id),
grade_contents,
unicode(exc)
)
# Log if the student is whitelisted
if is_whitelisted:
LOGGER.info(
u"Student %s is whitelisted in '%s'",
student.id,
unicode(course_id)
)
passing = True
else:
passing = False
# If this user's enrollment is not eligible to receive a
# certificate, mark it as such for reporting and
# analytics. Only do this if the certificate is new, or
# already marked as ineligible -- we don't want to mark
# existing audit certs as ineligible.
cutoff = settings.AUDIT_CERT_CUTOFF_DATE
if (cutoff and cert.created_date >= cutoff) and not is_eligible_for_certificate:
cert.status = CertificateStatuses.audit_passing if passing else CertificateStatuses.audit_notpassing
cert.save()
LOGGER.info(
u"Student %s with enrollment mode %s is not eligible for a certificate.",
student.id,
enrollment_mode
)
return cert
# If they are not passing, short-circuit and don't generate cert
elif not passing:
cert.status = status.notpassing
cert.save()
LOGGER.info(
(
u"Student %s does not have a grade for '%s', "
u"so their certificate status has been set to '%s'. "
u"No certificate generation task was sent to the XQueue."
),
student.id,
unicode(course_id),
cert.status
)
return cert
# Check to see whether the student is on the the embargoed
# country restricted list. If so, they should not receive a
# certificate -- set their status to restricted and log it.
if self.restricted.filter(user=student).exists():
cert.status = status.restricted
cert.save()
LOGGER.info(
(
u"Student %s is in the embargoed country restricted "
u"list, so their certificate status has been set to '%s' "
u"for the course '%s'. "
u"No certificate generation task was sent to the XQueue."
),
student.id,
cert.status,
unicode(course_id)
)
return cert
if unverified:
cert.status = status.unverified
cert.save()
LOGGER.info(
(
u"User %s has a verified enrollment in course %s "
u"but is missing ID verification. "
u"Certificate status has been set to unverified"
),
student.id,
unicode(course_id),
)
return cert
# Finally, generate the certificate and send it off.
return self._generate_cert(cert, course, student, grade_contents, template_pdf, generate_pdf)
def _generate_cert(self, cert, course, student, grade_contents, template_pdf, generate_pdf):
"""
Generate a certificate for the student. If `generate_pdf` is True,
sends a request to XQueue.
"""
course_id = unicode(course.id)
key = make_hashkey(random.random())
cert.key = key
contents = {
'action': 'create',
'username': student.username,
'course_id': course_id,
'course_name': course.display_name or course_id,
'name': cert.name,
'grade': grade_contents,
'template_pdf': template_pdf,
}
if generate_pdf:
cert.status = status.generating
else:
cert.status = status.downloadable
cert.verify_uuid = uuid4().hex
cert.save()
if generate_pdf:
try:
self._send_to_xqueue(contents, key)
except XQueueAddToQueueError as exc:
cert.status = ExampleCertificate.STATUS_ERROR
cert.error_reason = unicode(exc)
cert.save()
LOGGER.critical(
(
u"Could not add certificate task to XQueue. "
u"The course was '%s' and the student was '%s'."
u"The certificate task status has been marked as 'error' "
u"and can be re-submitted with a management command."
), course_id, student.id
)
else:
LOGGER.info(
(
u"The certificate status has been set to '%s'. "
u"Sent a certificate grading task to the XQueue "
u"with the key '%s'. "
),
cert.status,
key
)
return cert
def add_example_cert(self, example_cert):
"""Add a task to create an example certificate.
Unlike other certificates, an example certificate is
not associated with any particular user and is never
shown to students.
If an error occurs when adding the example certificate
to the queue, the example certificate status
will be set to "error".
Arguments:
example_cert (ExampleCertificate)
"""
contents = {
'action': 'create',
'course_id': unicode(example_cert.course_key),
'name': example_cert.full_name,
'template_pdf': example_cert.template,
# Example certificates are not associated with a particular user.
# However, we still need to find the example certificate when
# we receive a response from the queue. For this reason,
# we use the example certificate's unique identifier as a username.
# Note that the username is *not* displayed on the certificate;
# it is used only to identify the certificate task in the queue.
'username': example_cert.uuid,
# We send this extra parameter to differentiate
# example certificates from other certificates.
# This is not used by the certificates workers or XQueue.
'example_certificate': True,
}
# The callback for example certificates is different than the callback
# for other certificates. Although both tasks use the same queue,
# we can distinguish whether the certificate was an example cert based
# on which end-point XQueue uses once the task completes.
callback_url_path = reverse('certificates.views.update_example_certificate')
try:
self._send_to_xqueue(
contents,
example_cert.access_key,
task_identifier=example_cert.uuid,
callback_url_path=callback_url_path
)
LOGGER.info(u"Started generating example certificates for course '%s'.", example_cert.course_key)
except XQueueAddToQueueError as exc:
example_cert.update_status(
ExampleCertificate.STATUS_ERROR,
error_reason=unicode(exc)
)
LOGGER.critical(
(
u"Could not add example certificate with uuid '%s' to XQueue. "
u"The exception was %s. "
u"The example certificate has been marked with status 'error'."
), example_cert.uuid, unicode(exc)
)
def _send_to_xqueue(self, contents, key, task_identifier=None, callback_url_path='/update_certificate'):
"""Create a new task on the XQueue.
Arguments:
contents (dict): The contents of the XQueue task.
key (str): An access key for the task. This will be sent
to the callback end-point once the task completes,
so that we can validate that the sender is the same
entity that received the task.
Keyword Arguments:
callback_url_path (str): The path of the callback URL.
If not provided, use the default end-point for student-generated
certificates.
"""
callback_url = u'{protocol}://{base_url}{path}'.format(
protocol=("https" if self.use_https else "http"),
base_url=settings.SITE_NAME,
path=callback_url_path
)
# Append the key to the URL
# This is necessary because XQueue assumes that only one
# submission is active for a particular URL.
# If it receives a second submission with the same callback URL,
# it "retires" any other submission with the same URL.
# This was a hack that depended on the URL containing the user ID
# and courseware location; an assumption that does not apply
# to certificate generation.
# XQueue also truncates the callback URL to 128 characters,
# but since our key lengths are shorter than that, this should
# not affect us.
callback_url += "?key={key}".format(
key=(
task_identifier
if task_identifier is not None
else key
)
)
xheader = make_xheader(callback_url, key, settings.CERT_QUEUE)
(error, msg) = self.xqueue_interface.send_to_queue(
header=xheader, body=json.dumps(contents))
if error:
exc = XQueueAddToQueueError(error, msg)
LOGGER.critical(unicode(exc))
raise exc
| agpl-3.0 |
sarutobi/Rynda | rynda/users/migrations/0001_initial.py | 3 | 1629 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('forgotCode', models.CharField(max_length=40, null=True, editable=False, db_column=b'forgotten_password_code')),
('rememberCode', models.CharField(max_length=40, null=True, editable=False, db_column=b'remember_code')),
('forgotten_time', models.DateTimeField(null=True, editable=False, db_column=b'forgotten_password_time')),
('flags', models.IntegerField(default=0, editable=False, db_column=b'flags')),
('phones', models.CharField(max_length=255, blank=True)),
('about_me', models.TextField(default=b'', blank=True)),
('birthday', models.DateField(null=True, blank=True)),
('gender', models.IntegerField(default=0, verbose_name='Gender', choices=[(0, 'Unknown'), (1, 'Male'), (2, 'Female')])),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['user'],
'verbose_name': 'Profile',
'verbose_name_plural': 'Profiles',
},
bases=(models.Model,),
),
]
| mit |
tsli/test | swift/common/middleware/name_check.py | 9 | 4941 | # Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on February 27, 2012
A filter that disallows any paths that contain defined forbidden characters
or that exceed a defined length.
Place in proxy filter before proxy, e.g.
[pipeline:main]
pipeline = catch_errors healthcheck name_check cache ratelimit tempauth sos
proxy-logging proxy-server
[filter:name_check]
use = egg:swift#name_check
forbidden_chars = '"`<>
maximum_length = 255
There are default settings for forbidden_chars (FORBIDDEN_CHARS) and
maximum_length (MAX_LENGTH)
The filter returns HTTPBadRequest if path is invalid.
@author: eamonn-otoole
'''
import re
from swift.common.utils import get_logger
from urllib2 import unquote
from swift.common.swob import Request, HTTPBadRequest
FORBIDDEN_CHARS = "\'\"`<>"
MAX_LENGTH = 255
FORBIDDEN_REGEXP = "/\./|/\.\./|/\.$|/\.\.$"
class NameCheckMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.forbidden_chars = self.conf.get('forbidden_chars',
FORBIDDEN_CHARS)
self.maximum_length = self.conf.get('maximum_length', MAX_LENGTH)
self.forbidden_regexp = self.conf.get('forbidden_regexp',
FORBIDDEN_REGEXP)
if self.forbidden_regexp:
self.forbidden_regexp_compiled = re.compile(self.forbidden_regexp)
else:
self.forbidden_regexp_compiled = None
self.logger = get_logger(self.conf, log_route='name_check')
def check_character(self, req):
'''
Checks req.path for any forbidden characters
Returns True if there are any forbidden characters
Returns False if there aren't any forbidden characters
'''
self.logger.debug("name_check: path %s" % req.path)
self.logger.debug("name_check: self.forbidden_chars %s" %
self.forbidden_chars)
for c in unquote(req.path):
if c in self.forbidden_chars:
return True
else:
pass
return False
def check_length(self, req):
'''
Checks that req.path doesn't exceed the defined maximum length
Returns True if the length exceeds the maximum
Returns False if the length is <= the maximum
'''
length = len(unquote(req.path))
if length > self.maximum_length:
return True
else:
return False
def check_regexp(self, req):
'''
Checks that req.path doesn't contain a substring matching regexps.
Returns True if there are any forbidden substring
Returns False if there aren't any forbidden substring
'''
if self.forbidden_regexp_compiled is None:
return False
self.logger.debug("name_check: path %s" % req.path)
self.logger.debug("name_check: self.forbidden_regexp %s" %
self.forbidden_regexp)
unquoted_path = unquote(req.path)
match = self.forbidden_regexp_compiled.search(unquoted_path)
return (match is not None)
def __call__(self, env, start_response):
req = Request(env)
if self.check_character(req):
return HTTPBadRequest(
request=req,
body=("Object/Container name contains forbidden chars from %s"
% self.forbidden_chars))(env, start_response)
elif self.check_length(req):
return HTTPBadRequest(
request=req,
body=("Object/Container name longer than the allowed maximum "
"%s" % self.maximum_length))(env, start_response)
elif self.check_regexp(req):
return HTTPBadRequest(
request=req,
body=("Object/Container name contains a forbidden substring "
"from regular expression %s"
% self.forbidden_regexp))(env, start_response)
else:
# Pass on to downstream WSGI component
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def name_check_filter(app):
return NameCheckMiddleware(app, conf)
return name_check_filter
| apache-2.0 |
cstavr/synnefo | snf-django-lib/snf_django/lib/db/fields.py | 9 | 2414 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import exceptions
from django.db.models import DecimalField, SubfieldBase
from django import forms
from django.utils.translation import ugettext_lazy as _
from south.modelsinspector import add_introspection_rules
import decimal
DECIMAL_DIGITS = 38
class IntDecimalField(DecimalField):
__metaclass__ = SubfieldBase
description = _("Integer number as decimal")
def to_python(self, value):
if value is None:
return value
try:
return long(value)
except (ValueError, TypeError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def _to_decimal(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(
self._to_decimal(value), self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self._to_decimal(value)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntDecimalField, self).formfield(**defaults)
add_introspection_rules(
[], ["^snf_django\.lib\.db\.fields\.IntDecimalField"])
def intDecimalField(verbose_name=None, name=None, **kwargs):
# decimal_places is set here instead of the object constructor
# in order to convince south
return IntDecimalField(verbose_name, name,
max_digits=DECIMAL_DIGITS, decimal_places=0,
**kwargs)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.