repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
gmarkall/numba | numba/core/decorators.py | 1 | 11579 | """
Define @jit and related decorators.
"""
import sys
import warnings
import inspect
import logging
from numba.core.errors import DeprecationError, NumbaDeprecationWarning
from numba.stencils.stencil import stencil
from numba.core import config, extending, sigutils, registry
_logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Decorators
_msg_deprecated_signature_arg = ("Deprecated keyword argument `{0}`. "
"Signatures should be passed as the first "
"positional argument.")
def jit(signature_or_function=None, locals={}, cache=False,
pipeline_class=None, boundscheck=None, **options):
"""
This decorator is used to compile a Python function into native code.
Args
-----
signature_or_function:
The (optional) signature or list of signatures to be compiled.
If not passed, required signatures will be compiled when the
decorated function is called, depending on the argument values.
As a convenience, you can directly pass the function to be compiled
instead.
locals: dict
Mapping of local variable names to Numba types. Used to override the
types deduced by Numba's type inference engine.
target (deprecated): str
Specifies the target platform to compile for. Valid targets are cpu,
gpu, npyufunc, and cuda. Defaults to cpu.
pipeline_class: type numba.compiler.CompilerBase
The compiler pipeline type for customizing the compilation stages.
options:
For a cpu target, valid options are:
nopython: bool
Set to True to disable the use of PyObjects and Python API
calls. The default behavior is to allow the use of PyObjects
and Python API. Default value is False.
forceobj: bool
Set to True to force the use of PyObjects for every value.
Default value is False.
looplift: bool
Set to True to enable jitting loops in nopython mode while
leaving surrounding code in object mode. This allows functions
to allocate NumPy arrays and use Python objects, while the
tight loops in the function can still be compiled in nopython
mode. Any arrays that the tight loop uses should be created
before the loop is entered. Default value is True.
error_model: str
The error-model affects divide-by-zero behavior.
Valid values are 'python' and 'numpy'. The 'python' model
raises exception. The 'numpy' model sets the result to
*+/-inf* or *nan*. Default value is 'python'.
inline: str or callable
The inline option will determine whether a function is inlined
at into its caller if called. String options are 'never'
(default) which will never inline, and 'always', which will
always inline. If a callable is provided it will be called with
the call expression node that is requesting inlining, the
caller's IR and callee's IR as arguments, it is expected to
return Truthy as to whether to inline.
NOTE: This inlining is performed at the Numba IR level and is in
no way related to LLVM inlining.
boundscheck: bool or None
Set to True to enable bounds checking for array indices. Out
of bounds accesses will raise IndexError. The default is to
not do bounds checking. If False, bounds checking is disabled,
out of bounds accesses can produce garbage results or segfaults.
However, enabling bounds checking will slow down typical
functions, so it is recommended to only use this flag for
debugging. You can also set the NUMBA_BOUNDSCHECK environment
variable to 0 or 1 to globally override this flag. The default
value is None, which under normal execution equates to False,
but if debug is set to True then bounds checking will be
enabled.
Returns
--------
A callable usable as a compiled function. Actual compiling will be
done lazily if no explicit signatures are passed.
Examples
--------
The function can be used in the following ways:
1) jit(signatures, target='cpu', **targetoptions) -> jit(function)
Equivalent to:
d = dispatcher(function, targetoptions)
for signature in signatures:
d.compile(signature)
Create a dispatcher object for a python function. Then, compile
the function with the given signature(s).
Example:
@jit("int32(int32, int32)")
def foo(x, y):
return x + y
@jit(["int32(int32, int32)", "float32(float32, float32)"])
def bar(x, y):
return x + y
2) jit(function, target='cpu', **targetoptions) -> dispatcher
Create a dispatcher function object that specializes at call site.
Examples:
@jit
def foo(x, y):
return x + y
@jit(target='cpu', nopython=True)
def bar(x, y):
return x + y
"""
if 'argtypes' in options:
raise DeprecationError(_msg_deprecated_signature_arg.format('argtypes'))
if 'restype' in options:
raise DeprecationError(_msg_deprecated_signature_arg.format('restype'))
if options.get('nopython', False) and options.get('forceobj', False):
raise ValueError("Only one of 'nopython' or 'forceobj' can be True.")
if 'target' in options:
target = options.pop('target')
warnings.warn("The 'target' keyword argument is deprecated.", NumbaDeprecationWarning)
else:
target = options.pop('_target', 'cpu')
options['boundscheck'] = boundscheck
# Handle signature
if signature_or_function is None:
# No signature, no function
pyfunc = None
sigs = None
elif isinstance(signature_or_function, list):
# A list of signatures is passed
pyfunc = None
sigs = signature_or_function
elif sigutils.is_signature(signature_or_function):
# A single signature is passed
pyfunc = None
sigs = [signature_or_function]
else:
# A function is passed
pyfunc = signature_or_function
sigs = None
dispatcher_args = {}
if pipeline_class is not None:
dispatcher_args['pipeline_class'] = pipeline_class
wrapper = _jit(sigs, locals=locals, target=target, cache=cache,
targetoptions=options, **dispatcher_args)
if pyfunc is not None:
return wrapper(pyfunc)
else:
return wrapper
def _jit(sigs, locals, target, cache, targetoptions, **dispatcher_args):
dispatcher = registry.dispatcher_registry[target]
def wrapper(func):
if extending.is_jitted(func):
raise TypeError(
"A jit decorator was called on an already jitted function "
f"{func}. If trying to access the original python "
f"function, use the {func}.py_func attribute."
)
if not inspect.isfunction(func):
raise TypeError(
"The decorated object is not a function (got type "
f"{type(func)})."
)
if config.ENABLE_CUDASIM and target == 'cuda':
from numba import cuda
return cuda.jit(func)
if config.DISABLE_JIT and not target == 'npyufunc':
return func
disp = dispatcher(py_func=func, locals=locals,
targetoptions=targetoptions,
**dispatcher_args)
if cache:
disp.enable_caching()
if sigs is not None:
# Register the Dispatcher to the type inference mechanism,
# even though the decorator hasn't returned yet.
from numba.core import typeinfer
with typeinfer.register_dispatcher(disp):
for sig in sigs:
disp.compile(sig)
disp.disable_compile()
return disp
return wrapper
def generated_jit(function=None, target='cpu', cache=False,
pipeline_class=None, **options):
"""
This decorator allows flexible type-based compilation
of a jitted function. It works as `@jit`, except that the decorated
function is called at compile-time with the *types* of the arguments
and should return an implementation function for those types.
"""
dispatcher_args = {}
if pipeline_class is not None:
dispatcher_args['pipeline_class'] = pipeline_class
wrapper = _jit(sigs=None, locals={}, target=target, cache=cache,
targetoptions=options, impl_kind='generated',
**dispatcher_args)
if function is not None:
return wrapper(function)
else:
return wrapper
def njit(*args, **kws):
"""
Equivalent to jit(nopython=True)
See documentation for jit function/decorator for full description.
"""
if 'nopython' in kws:
warnings.warn('nopython is set for njit and is ignored', RuntimeWarning)
if 'forceobj' in kws:
warnings.warn('forceobj is set for njit and is ignored', RuntimeWarning)
del kws['forceobj']
kws.update({'nopython': True})
return jit(*args, **kws)
def cfunc(sig, locals={}, cache=False, pipeline_class=None, **options):
"""
This decorator is used to compile a Python function into a C callback
usable with foreign C libraries.
Usage::
@cfunc("float64(float64, float64)", nopython=True, cache=True)
def add(a, b):
return a + b
"""
sig = sigutils.normalize_signature(sig)
def wrapper(func):
from numba.core.ccallback import CFunc
additional_args = {}
if pipeline_class is not None:
additional_args['pipeline_class'] = pipeline_class
res = CFunc(func, sig, locals=locals, options=options, **additional_args)
if cache:
res.enable_caching()
res.compile()
return res
return wrapper
def jit_module(**kwargs):
""" Automatically ``jit``-wraps functions defined in a Python module
Note that ``jit_module`` should only be called at the end of the module to
be jitted. In addition, only functions which are defined in the module
``jit_module`` is called from are considered for automatic jit-wrapping.
See the Numba documentation for more information about what can/cannot be
jitted.
:param kwargs: Keyword arguments to pass to ``jit`` such as ``nopython``
or ``error_model``.
"""
# Get the module jit_module is being called from
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
# Replace functions in module with jit-wrapped versions
for name, obj in module.__dict__.items():
if inspect.isfunction(obj) and inspect.getmodule(obj) == module:
_logger.debug("Auto decorating function {} from module {} with jit "
"and options: {}".format(obj, module.__name__, kwargs))
module.__dict__[name] = jit(obj, **kwargs)
| bsd-2-clause |
craynot/django | django/contrib/gis/db/backends/base/operations.py | 263 | 4865 | class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
bounding_circle = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
disallowed_aggregates = ()
geom_func_prefix = ''
# Mapping between Django function names and backend names, when names do not
# match; used in spatial_function_name().
function_names = {}
# Blacklist/set of known unsupported functions of the backend
unsupported_functions = {
'Area', 'AsGeoHash', 'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG',
'BoundingCircle', 'Centroid', 'Difference', 'Distance', 'Envelope',
'ForceRHR', 'Intersection', 'Length', 'MemSize', 'NumGeometries',
'NumPoints', 'Perimeter', 'PointOnSurface', 'Reverse', 'Scale',
'SnapToGrid', 'SymDifference', 'Transform', 'Translate',
'Union',
}
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box, srid):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value, compiler):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_placeholder() method')
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotImplementedError(
"%s spatial aggregation is not supported by this database backend." % expression.name
)
super(BaseSpatialOperations, self).check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_function_name(self, func_name):
if func_name in self.unsupported_functions:
raise NotImplementedError("This backend doesn't support the %s function." % func_name)
return self.function_names.get(func_name, self.geom_func_prefix + func_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('Subclasses of BaseSpatialOperations must provide a geometry_columns() method.')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
| bsd-3-clause |
BT-fgarbely/odoo | addons/sale/report/invoice_report.py | 336 | 1680 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_inherit = 'account.invoice.report'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_depends = {
'account.invoice': ['section_id'],
}
def _select(self):
return super(account_invoice_report, self)._select() + ", sub.section_id as section_id"
def _sub_select(self):
return super(account_invoice_report, self)._sub_select() + ", ai.section_id as section_id"
def _group_by(self):
return super(account_invoice_report, self)._group_by() + ", ai.section_id"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
googleapis/googleapis-gen | google/cloud/domains/v1beta1/domains-v1beta1-py/google/cloud/domains/__init__.py | 1 | 3740 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.domains_v1beta1.services.domains.client import DomainsClient
from google.cloud.domains_v1beta1.services.domains.async_client import DomainsAsyncClient
from google.cloud.domains_v1beta1.types.domains import AuthorizationCode
from google.cloud.domains_v1beta1.types.domains import ConfigureContactSettingsRequest
from google.cloud.domains_v1beta1.types.domains import ConfigureDnsSettingsRequest
from google.cloud.domains_v1beta1.types.domains import ConfigureManagementSettingsRequest
from google.cloud.domains_v1beta1.types.domains import ContactSettings
from google.cloud.domains_v1beta1.types.domains import DeleteRegistrationRequest
from google.cloud.domains_v1beta1.types.domains import DnsSettings
from google.cloud.domains_v1beta1.types.domains import ExportRegistrationRequest
from google.cloud.domains_v1beta1.types.domains import GetRegistrationRequest
from google.cloud.domains_v1beta1.types.domains import ListRegistrationsRequest
from google.cloud.domains_v1beta1.types.domains import ListRegistrationsResponse
from google.cloud.domains_v1beta1.types.domains import ManagementSettings
from google.cloud.domains_v1beta1.types.domains import OperationMetadata
from google.cloud.domains_v1beta1.types.domains import RegisterDomainRequest
from google.cloud.domains_v1beta1.types.domains import RegisterParameters
from google.cloud.domains_v1beta1.types.domains import Registration
from google.cloud.domains_v1beta1.types.domains import ResetAuthorizationCodeRequest
from google.cloud.domains_v1beta1.types.domains import RetrieveAuthorizationCodeRequest
from google.cloud.domains_v1beta1.types.domains import RetrieveRegisterParametersRequest
from google.cloud.domains_v1beta1.types.domains import RetrieveRegisterParametersResponse
from google.cloud.domains_v1beta1.types.domains import SearchDomainsRequest
from google.cloud.domains_v1beta1.types.domains import SearchDomainsResponse
from google.cloud.domains_v1beta1.types.domains import UpdateRegistrationRequest
from google.cloud.domains_v1beta1.types.domains import ContactNotice
from google.cloud.domains_v1beta1.types.domains import ContactPrivacy
from google.cloud.domains_v1beta1.types.domains import DomainNotice
from google.cloud.domains_v1beta1.types.domains import TransferLockState
__all__ = ('DomainsClient',
'DomainsAsyncClient',
'AuthorizationCode',
'ConfigureContactSettingsRequest',
'ConfigureDnsSettingsRequest',
'ConfigureManagementSettingsRequest',
'ContactSettings',
'DeleteRegistrationRequest',
'DnsSettings',
'ExportRegistrationRequest',
'GetRegistrationRequest',
'ListRegistrationsRequest',
'ListRegistrationsResponse',
'ManagementSettings',
'OperationMetadata',
'RegisterDomainRequest',
'RegisterParameters',
'Registration',
'ResetAuthorizationCodeRequest',
'RetrieveAuthorizationCodeRequest',
'RetrieveRegisterParametersRequest',
'RetrieveRegisterParametersResponse',
'SearchDomainsRequest',
'SearchDomainsResponse',
'UpdateRegistrationRequest',
'ContactNotice',
'ContactPrivacy',
'DomainNotice',
'TransferLockState',
)
| apache-2.0 |
jralls/gramps | gramps/gen/lib/childreftype.py | 10 | 2770 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provide the different child reference types.
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .grampstype import GrampsType
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
class ChildRefType(GrampsType):
"""
Provide the different ChildRef types.
.. attribute NONE : None - no relationship
.. attribute BIRTH : Birth - relation by birth. Implicates genetic
relationship if no other families with other types are present
.. attribute ADOPTED : Adopted - adopted child. The real parents have
given up the child for adoption
.. attribute STEPCHILD : Stepchild - stepchild, the child is from the other
partner, relationship is due to the forming of the marriage
.. attribute SPONSORED : Sponsored - parent is sponsoring the child
.. attribute FOSTER : Foster - taking care of the child while the real
parents are around and know of it. This can be due to the parents
not being able to care for the child, or because government has
ordered this
.. attribute UNKNOWN : Unknown - unknown relationship
.. attribute CUSTOM : Custom - a relationship given by the user
"""
NONE = 0
BIRTH = 1
ADOPTED = 2
STEPCHILD = 3
SPONSORED = 4
FOSTER = 5
UNKNOWN = 6
CUSTOM = 7
_CUSTOM = CUSTOM
_DEFAULT = BIRTH
_DATAMAP = [
(NONE, _("None"), "None"),
(BIRTH, _("Birth"), "Birth"),
(ADOPTED, _("Adopted"), "Adopted"),
(STEPCHILD, _("Stepchild"), "Stepchild"),
(SPONSORED, _("Sponsored"), "Sponsored"),
(FOSTER, _("Foster"), "Foster"),
(UNKNOWN, _("Unknown"), "Unknown"),
(CUSTOM, _("Custom"), "Custom"),
]
def __init__(self, value=None):
GrampsType.__init__(self, value)
| gpl-2.0 |
balloob/home-assistant | tests/components/sentry/test_init.py | 7 | 10213 | """Tests for Sentry integration."""
import logging
import pytest
from homeassistant.components.sentry import get_channel, process_before_send
from homeassistant.components.sentry.const import (
CONF_DSN,
CONF_ENVIRONMENT,
CONF_EVENT_CUSTOM_COMPONENTS,
CONF_EVENT_HANDLED,
CONF_EVENT_THIRD_PARTY_PACKAGES,
CONF_TRACING,
CONF_TRACING_SAMPLE_RATE,
DOMAIN,
)
from homeassistant.const import __version__ as current_version
from homeassistant.core import HomeAssistant
from tests.async_mock import MagicMock, Mock, patch
from tests.common import MockConfigEntry
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test integration setup from entry."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_DSN: "http://public@example.com/1", CONF_ENVIRONMENT: "production"},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.sentry.AioHttpIntegration"
) as sentry_aiohttp_mock, patch(
"homeassistant.components.sentry.SqlalchemyIntegration"
) as sentry_sqlalchemy_mock, patch(
"homeassistant.components.sentry.LoggingIntegration"
) as sentry_logging_mock, patch(
"homeassistant.components.sentry.sentry_sdk"
) as sentry_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
# Test CONF_ENVIRONMENT is migrated to entry options
assert CONF_ENVIRONMENT not in entry.data
assert CONF_ENVIRONMENT in entry.options
assert entry.options[CONF_ENVIRONMENT] == "production"
assert sentry_logging_mock.call_count == 1
assert sentry_logging_mock.called_once_with(
level=logging.WARNING, event_level=logging.WARNING
)
assert sentry_aiohttp_mock.call_count == 1
assert sentry_sqlalchemy_mock.call_count == 1
assert sentry_mock.init.call_count == 1
call_args = sentry_mock.init.call_args[1]
assert set(call_args) == {
"dsn",
"environment",
"integrations",
"release",
"before_send",
}
assert call_args["dsn"] == "http://public@example.com/1"
assert call_args["environment"] == "production"
assert call_args["integrations"] == [
sentry_logging_mock.return_value,
sentry_aiohttp_mock.return_value,
sentry_sqlalchemy_mock.return_value,
]
assert call_args["release"] == current_version
assert call_args["before_send"]
async def test_setup_entry_with_tracing(hass: HomeAssistant) -> None:
"""Test integration setup from entry with tracing enabled."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_DSN: "http://public@example.com/1"},
options={CONF_TRACING: True, CONF_TRACING_SAMPLE_RATE: 0.5},
)
entry.add_to_hass(hass)
with patch("homeassistant.components.sentry.AioHttpIntegration"), patch(
"homeassistant.components.sentry.SqlalchemyIntegration"
), patch("homeassistant.components.sentry.LoggingIntegration"), patch(
"homeassistant.components.sentry.sentry_sdk"
) as sentry_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
call_args = sentry_mock.init.call_args[1]
assert set(call_args) == {
"dsn",
"environment",
"integrations",
"release",
"before_send",
"traces_sample_rate",
}
assert call_args["traces_sample_rate"] == 0.5
@pytest.mark.parametrize(
"version,channel",
[
("0.115.0.dev20200815", "nightly"),
("0.115.0", "stable"),
("0.115.0b4", "beta"),
("0.115.0dev0", "dev"),
],
)
async def test_get_channel(version, channel) -> None:
"""Test if channel detection works from Home Assistant version number."""
assert get_channel(version) == channel
async def test_process_before_send(hass: HomeAssistant):
"""Test regular use of the Sentry process before sending function."""
hass.config.components.add("puppies")
hass.config.components.add("a_integration")
# These should not show up in the result.
hass.config.components.add("puppies.light")
hass.config.components.add("auth")
result = process_before_send(
hass,
options={},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot", "fridge_opener"],
event={},
hint={},
)
assert result
assert result["tags"]
assert result["contexts"]
assert result["contexts"]
ha_context = result["contexts"]["Home Assistant"]
assert ha_context["channel"] == "test"
assert ha_context["custom_components"] == "fridge_opener\nironing_robot"
assert ha_context["integrations"] == "a_integration\npuppies"
tags = result["tags"]
assert tags["channel"] == "test"
assert tags["uuid"] == "12345"
assert tags["installation_type"] == "pytest"
user = result["user"]
assert user["id"] == "12345"
async def test_event_with_platform_context(hass: HomeAssistant):
"""Test extraction of platform context information during Sentry events."""
current_platform_mock = Mock()
current_platform_mock.get().platform_name = "hue"
current_platform_mock.get().domain = "light"
with patch(
"homeassistant.components.sentry.entity_platform.current_platform",
new=current_platform_mock,
):
result = process_before_send(
hass,
options={},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={},
hint={},
)
assert result
assert result["tags"]["integration"] == "hue"
assert result["tags"]["platform"] == "light"
assert result["tags"]["custom_component"] == "no"
current_platform_mock.get().platform_name = "ironing_robot"
current_platform_mock.get().domain = "switch"
with patch(
"homeassistant.components.sentry.entity_platform.current_platform",
new=current_platform_mock,
):
result = process_before_send(
hass,
options={CONF_EVENT_CUSTOM_COMPONENTS: True},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={},
hint={},
)
assert result
assert result["tags"]["integration"] == "ironing_robot"
assert result["tags"]["platform"] == "switch"
assert result["tags"]["custom_component"] == "yes"
@pytest.mark.parametrize(
"logger,tags",
[
("adguard", {"package": "adguard"}),
(
"homeassistant.components.hue.coordinator",
{"integration": "hue", "custom_component": "no"},
),
(
"homeassistant.components.hue.light",
{"integration": "hue", "platform": "light", "custom_component": "no"},
),
(
"homeassistant.components.ironing_robot.switch",
{
"integration": "ironing_robot",
"platform": "switch",
"custom_component": "yes",
},
),
(
"homeassistant.components.ironing_robot",
{"integration": "ironing_robot", "custom_component": "yes"},
),
("homeassistant.helpers.network", {"helpers": "network"}),
("tuyapi.test", {"package": "tuyapi"}),
],
)
async def test_logger_event_extraction(hass: HomeAssistant, logger, tags):
"""Test extraction of information from Sentry logger events."""
result = process_before_send(
hass,
options={
CONF_EVENT_CUSTOM_COMPONENTS: True,
CONF_EVENT_THIRD_PARTY_PACKAGES: True,
},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={"logger": logger},
hint={},
)
assert result
assert result["tags"] == {
"channel": "test",
"uuid": "12345",
"installation_type": "pytest",
**tags,
}
@pytest.mark.parametrize(
"logger,options,event",
[
("adguard", {CONF_EVENT_THIRD_PARTY_PACKAGES: True}, True),
("adguard", {CONF_EVENT_THIRD_PARTY_PACKAGES: False}, False),
(
"homeassistant.components.ironing_robot.switch",
{CONF_EVENT_CUSTOM_COMPONENTS: True},
True,
),
(
"homeassistant.components.ironing_robot.switch",
{CONF_EVENT_CUSTOM_COMPONENTS: False},
False,
),
],
)
async def test_filter_log_events(hass: HomeAssistant, logger, options, event):
"""Test filtering of events based on configuration options."""
result = process_before_send(
hass,
options=options,
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={"logger": logger},
hint={},
)
if event:
assert result
else:
assert result is None
@pytest.mark.parametrize(
"handled,options,event",
[
("yes", {CONF_EVENT_HANDLED: True}, True),
("yes", {CONF_EVENT_HANDLED: False}, False),
("no", {CONF_EVENT_HANDLED: False}, True),
("no", {CONF_EVENT_HANDLED: True}, True),
],
)
async def test_filter_handled_events(hass: HomeAssistant, handled, options, event):
"""Tests filtering of handled events based on configuration options."""
event_mock = MagicMock()
event_mock.__iter__ = ["tags"]
event_mock.__contains__ = lambda _, val: val == "tags"
event_mock.tags = {"handled": handled}
result = process_before_send(
hass,
options=options,
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=[],
event=event_mock,
hint={},
)
if event:
assert result
else:
assert result is None
| apache-2.0 |
larroy/clearskies_core | tools/gyp/test/mac/gyptest-rebuild.py | 299 | 1260 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are rebuilt correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'rebuild'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'test_app', chdir=CHDIR)
# Touch a source file, rebuild, and check that the app target is up-to-date.
test.touch('rebuild/main.c')
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_app', chdir=CHDIR)
# Xcode runs postbuilds on every build, so targets with postbuilds are
# never marked as up_to_date.
if test.format != 'xcode':
# Same for a framework bundle.
test.build('test.gyp', 'test_framework_postbuilds', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_framework_postbuilds', chdir=CHDIR)
# Test that an app bundle with a postbuild that touches the app binary needs
# to be built only once.
test.build('test.gyp', 'test_app_postbuilds', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_app_postbuilds', chdir=CHDIR)
test.pass_test()
| lgpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/elan/Error_Email_Backup.py | 2 | 4409 | import os
from email.MIMEBase import MIMEBase
from email import Encoders
import smtplib
from elan import ElanSettings
from email.mime.multipart import MIMEMultipart
from email.MIMEText import MIMEText
from elan.ElanSettings import Script_Runner_Log,Say_Log_File_Path
Say_Log_File = Say_Log_File_Path
zip_file = r"C:\Elan_Tools\ImageScripter\ProgramData\error"
send_from ="kennyshay123test@gmail.com"
send_from ="kennyshay123test@gmail.com"
send_from_password = "corebrands123"
send_to = "kennyshay123@gmail.com"
recipients = ["kennyshay123@gmail.com",
"ben.bickell@corebrands.com",
"anthony.nelson@nortekcontrol.com",
"elantestertools@gmail.com",
"ken.shay@corebrands.com"]
#recipients = ["kennyshay123@gmail.com","elantestertools@gmail.com"]
class Error_Email_Class():
def __init__(self,SCRIPT_NAME):
self.SCRIPT_NAME = SCRIPT_NAME
self.compName = str(os.environ['COMPUTERNAME'])
########################################Settings####################################
pass
def Send(self):
body_bottom = ''
# body_top = The_Exception
with open(Say_Log_File, 'r') as myfile:
body_top_list = myfile.read().split('\n')
# Last selection
Last_Ten = body_top_list[-50:]
last_Item = Last_Ten[-1]
Last_Ten.pop()
Last_Ten.append(last_Item + " <----------------------Last")
# Revers
Last_Ten = Last_Ten[::-1]
body_top = ''
x = 0
for i in Last_Ten:
body_top = body_top + i + ',' + '\r\n'
x += 1
with open(Script_Runner_Log, 'r') as myfile:
body_log = myfile.read()
TWO = body_log.split('_ _ _ _ _ _ _ _ _ _ ')[0]
THREE = body_log.split('_ _ _ _ _ _ _ _ _ _ ')[1]
print("############TWO#################")
print(TWO)
print("#################################")
print("############THREE#################")
print(THREE)
print("#################################")
bodylogList = body_log.split('\n')
bodylogList = bodylogList[::-1]
bodylogList = bodylogList[:100]
bodylogList.append('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
body_log = "\n".join(str(x) for x in bodylogList)
body_Text = body_top + '\n' + body_log + '\n' + body_bottom
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = ", ".join(recipients)
msg['Date'] = " Use any date time module to insert or use email.utils formatdate"
msg['Subject'] = 'FAIL ( ' + ElanSettings.Elan_Build + ' ) ' + self.compName + ' ' + self.SCRIPT_NAME
# msg['Subject'] = self.compName + "-> FAIL-> " + SCRIPT_NAME
body = str(body_Text)
body = MIMEText(body) # convert the body to a MIME compatible string
msg.attach(body) # attach it to your main message
part = MIMEBase('application', "octet-stream")
zip_file_with_extention = zip_file + ".zip"
try:
fo = open(zip_file_with_extention, "rb")
except Exception as e:
print("Exception 112 -> " + str(e))
afile = open(zip_file_with_extention, 'w')
afile.close()
fo = open(zip_file_with_extention, "rb")
part.set_payload(fo.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(zip_file_with_extention))
msg.attach(part)
server = smtplib.SMTP("smtp.gmail.com", 587)
# server.ehlo()
'''
server.starttls()
server.login(send_from, send_from_password)
sent = server.sendmail(send_from, recipients, msg.as_string())
server.close()
print 'Successfully Sent The Email!'
return sent
'''
if __name__ == "__main__":
SCRIPT_NAME = "Default"
#Send_Certified_By_Automation_Email()
Error_Email = Error_Email_Class(SCRIPT_NAME)
Error_Email.Send() | gpl-3.0 |
ProtractorNinja/qutebrowser | tests/unit/utils/test_qtutils.py | 6 | 34264 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.utils.qtutils."""
import io
import os
import sys
import operator
import os.path
try:
from test import test_file # pylint: disable=no-name-in-module
except ImportError:
# Debian patches Python to remove the tests...
test_file = None
import pytest
import unittest
import unittest.mock
from PyQt5.QtCore import (QDataStream, QPoint, QUrl, QByteArray, QIODevice,
QTimer, QBuffer, QFile, QProcess, QFileDevice)
from qutebrowser import qutebrowser
from qutebrowser.utils import qtutils
import overflow_test_cases
@pytest.mark.parametrize('qversion, version, op, expected', [
('5.4.0', '5.4.0', operator.ge, True),
('5.4.0', '5.4.0', operator.eq, True),
('5.4.0', '5.4', operator.eq, True),
('5.4.1', '5.4', operator.ge, True),
('5.3.2', '5.4', operator.ge, False),
('5.3.0', '5.3.2', operator.ge, False),
])
def test_version_check(monkeypatch, qversion, version, op, expected):
"""Test for version_check().
Args:
monkeypatch: The pytest monkeypatch fixture.
qversion: The version to set as fake qVersion().
version: The version to compare with.
op: The operator to use when comparing.
expected: The expected result.
"""
monkeypatch.setattr('qutebrowser.utils.qtutils.qVersion', lambda: qversion)
assert qtutils.version_check(version, op) == expected
class TestCheckOverflow:
"""Test check_overflow."""
@pytest.mark.parametrize('ctype, val',
overflow_test_cases.iter_good_values())
def test_good_values(self, ctype, val):
"""Test values which are inside bounds."""
qtutils.check_overflow(val, ctype)
@pytest.mark.parametrize('ctype, val',
[(ctype, val) for (ctype, val, _) in
overflow_test_cases.iter_bad_values()])
def test_bad_values_fatal(self, ctype, val):
"""Test values which are outside bounds with fatal=True."""
with pytest.raises(OverflowError):
qtutils.check_overflow(val, ctype)
@pytest.mark.parametrize('ctype, val, repl',
overflow_test_cases.iter_bad_values())
def test_bad_values_nonfatal(self, ctype, val, repl):
"""Test values which are outside bounds with fatal=False."""
newval = qtutils.check_overflow(val, ctype, fatal=False)
assert newval == repl
class TestGetQtArgs:
"""Tests for get_args."""
@pytest.fixture
def parser(self, mocker):
"""Fixture to provide an argparser.
Monkey-patches .exit() of the argparser so it doesn't exit on errors.
"""
parser = qutebrowser.get_argparser()
mocker.patch.object(parser, 'exit', side_effect=Exception)
return parser
@pytest.mark.parametrize('args, expected', [
# No Qt arguments
(['--debug'], [sys.argv[0]]),
# Qt flag
(['--debug', '--qt-reverse', '--nocolor'], [sys.argv[0], '-reverse']),
# Qt argument with value
(['--qt-stylesheet', 'foo'], [sys.argv[0], '-stylesheet', 'foo']),
])
def test_qt_args(self, args, expected, parser):
"""Test commandline with no Qt arguments given."""
parsed = parser.parse_args(args)
assert qtutils.get_args(parsed) == expected
def test_qt_both(self, parser):
"""Test commandline with a Qt argument and flag."""
args = parser.parse_args(['--qt-stylesheet', 'foobar', '--qt-reverse'])
qt_args = qtutils.get_args(args)
assert qt_args[0] == sys.argv[0]
assert '-reverse' in qt_args
assert '-stylesheet' in qt_args
assert 'foobar' in qt_args
@pytest.mark.parametrize('os_name, qversion, expected', [
('linux', '5.2.1', True), # unaffected OS
('linux', '5.4.1', True), # unaffected OS
('nt', '5.2.1', False),
('nt', '5.3.0', True), # unaffected Qt version
('nt', '5.4.1', True), # unaffected Qt version
])
def test_check_print_compat(os_name, qversion, expected, monkeypatch):
"""Test check_print_compat.
Args:
os_name: The fake os.name to set.
qversion: The fake qVersion() to set.
expected: The expected return value.
"""
monkeypatch.setattr('qutebrowser.utils.qtutils.os.name', os_name)
monkeypatch.setattr('qutebrowser.utils.qtutils.qVersion', lambda: qversion)
assert qtutils.check_print_compat() == expected
class QtObject:
"""Fake Qt object for test_ensure."""
def __init__(self, valid=True, null=False, error=None):
self._valid = valid
self._null = null
self._error = error
def __repr__(self):
return '<QtObject>'
def errorString(self):
"""Get the fake error, or raise AttributeError if set to None."""
if self._error is None:
raise AttributeError
else:
return self._error
def isValid(self):
return self._valid
def isNull(self):
return self._null
@pytest.mark.parametrize('func_name, obj, raising, exc_reason, exc_str', [
# ensure_valid, good examples
('ensure_valid', QtObject(valid=True, null=True), False, None, None),
('ensure_valid', QtObject(valid=True, null=False), False, None, None),
# ensure_valid, bad examples
('ensure_valid', QtObject(valid=False, null=True), True, None,
'<QtObject> is not valid'),
('ensure_valid', QtObject(valid=False, null=False), True, None,
'<QtObject> is not valid'),
('ensure_valid', QtObject(valid=False, null=True, error='Test'), True,
'Test', '<QtObject> is not valid: Test'),
# ensure_not_null, good examples
('ensure_not_null', QtObject(valid=True, null=False), False, None, None),
('ensure_not_null', QtObject(valid=False, null=False), False, None, None),
# ensure_not_null, bad examples
('ensure_not_null', QtObject(valid=True, null=True), True, None,
'<QtObject> is null'),
('ensure_not_null', QtObject(valid=False, null=True), True, None,
'<QtObject> is null'),
('ensure_not_null', QtObject(valid=False, null=True, error='Test'), True,
'Test', '<QtObject> is null: Test'),
])
def test_ensure(func_name, obj, raising, exc_reason, exc_str):
"""Test ensure_valid and ensure_not_null.
The function is parametrized as they do nearly the same.
Args:
func_name: The name of the function to call.
obj: The object to test with.
raising: Whether QtValueError is expected to be raised.
exc_reason: The expected .reason attribute of the exception.
exc_str: The expected string of the exception.
"""
func = getattr(qtutils, func_name)
if raising:
with pytest.raises(qtutils.QtValueError) as excinfo:
func(obj)
assert excinfo.value.reason == exc_reason
assert str(excinfo.value) == exc_str
else:
func(obj)
@pytest.mark.parametrize('status, raising, message', [
(QDataStream.Ok, False, None),
(QDataStream.ReadPastEnd, True, "The data stream has read past the end of "
"the data in the underlying device."),
(QDataStream.ReadCorruptData, True, "The data stream has read corrupt "
"data."),
(QDataStream.WriteFailed, True, "The data stream cannot write to the "
"underlying device."),
])
def test_check_qdatastream(status, raising, message):
"""Test check_qdatastream.
Args:
status: The status to set on the QDataStream we test with.
raising: Whether check_qdatastream is expected to raise OSError.
message: The expected exception string.
"""
stream = QDataStream()
stream.setStatus(status)
if raising:
with pytest.raises(OSError) as excinfo:
qtutils.check_qdatastream(stream)
assert str(excinfo.value) == message
else:
qtutils.check_qdatastream(stream)
def test_qdatastream_status_count():
"""Make sure no new members are added to QDataStream.Status."""
values = vars(QDataStream).values()
status_vals = [e for e in values if isinstance(e, QDataStream.Status)]
assert len(status_vals) == 4
@pytest.mark.parametrize('obj', [
QPoint(23, 42),
QUrl('http://www.qutebrowser.org/'),
])
def test_serialize(obj):
"""Test a serialize/deserialize round trip.
Args:
obj: The object to test with.
"""
new_obj = type(obj)()
qtutils.deserialize(qtutils.serialize(obj), new_obj)
assert new_obj == obj
class TestSerializeStream:
"""Tests for serialize_stream and deserialize_stream."""
def _set_status(self, stream, status):
"""Helper function so mocks can set an error status when used."""
stream.status.return_value = status
@pytest.fixture
def stream_mock(self):
"""Fixture providing a QDataStream-like mock."""
m = unittest.mock.MagicMock(spec=QDataStream)
m.status.return_value = QDataStream.Ok
return m
def test_serialize_pre_error_mock(self, stream_mock):
"""Test serialize_stream with an error already set."""
stream_mock.status.return_value = QDataStream.ReadCorruptData
with pytest.raises(OSError) as excinfo:
qtutils.serialize_stream(stream_mock, QPoint())
assert not stream_mock.__lshift__.called
assert str(excinfo.value) == "The data stream has read corrupt data."
def test_serialize_post_error_mock(self, stream_mock):
"""Test serialize_stream with an error while serializing."""
obj = QPoint()
stream_mock.__lshift__.side_effect = lambda _other: self._set_status(
stream_mock, QDataStream.ReadCorruptData)
with pytest.raises(OSError) as excinfo:
qtutils.serialize_stream(stream_mock, obj)
assert stream_mock.__lshift__.called_once_with(obj)
assert str(excinfo.value) == "The data stream has read corrupt data."
def test_deserialize_pre_error_mock(self, stream_mock):
"""Test deserialize_stream with an error already set."""
stream_mock.status.return_value = QDataStream.ReadCorruptData
with pytest.raises(OSError) as excinfo:
qtutils.deserialize_stream(stream_mock, QPoint())
assert not stream_mock.__rshift__.called
assert str(excinfo.value) == "The data stream has read corrupt data."
def test_deserialize_post_error_mock(self, stream_mock):
"""Test deserialize_stream with an error while deserializing."""
obj = QPoint()
stream_mock.__rshift__.side_effect = lambda _other: self._set_status(
stream_mock, QDataStream.ReadCorruptData)
with pytest.raises(OSError) as excinfo:
qtutils.deserialize_stream(stream_mock, obj)
assert stream_mock.__rshift__.called_once_with(obj)
assert str(excinfo.value) == "The data stream has read corrupt data."
def test_round_trip_real_stream(self):
"""Test a round trip with a real QDataStream."""
src_obj = QPoint(23, 42)
dest_obj = QPoint()
data = QByteArray()
write_stream = QDataStream(data, QIODevice.WriteOnly)
qtutils.serialize_stream(write_stream, src_obj)
read_stream = QDataStream(data, QIODevice.ReadOnly)
qtutils.deserialize_stream(read_stream, dest_obj)
assert src_obj == dest_obj
@pytest.mark.qt_log_ignore('^QIODevice::write.*: ReadOnly device')
def test_serialize_readonly_stream(self):
"""Test serialize_stream with a read-only stream."""
data = QByteArray()
stream = QDataStream(data, QIODevice.ReadOnly)
with pytest.raises(OSError) as excinfo:
qtutils.serialize_stream(stream, QPoint())
assert str(excinfo.value) == ("The data stream cannot write to the "
"underlying device.")
@pytest.mark.qt_log_ignore('QIODevice::read.*: WriteOnly device')
def test_deserialize_writeonly_stream(self):
"""Test deserialize_stream with a write-only stream."""
data = QByteArray()
obj = QPoint()
stream = QDataStream(data, QIODevice.WriteOnly)
with pytest.raises(OSError) as excinfo:
qtutils.deserialize_stream(stream, obj)
assert str(excinfo.value) == ("The data stream has read past the end "
"of the data in the underlying device.")
class SavefileTestException(Exception):
"""Exception raised in TestSavefileOpen for testing."""
pass
class TestSavefileOpen:
"""Tests for savefile_open."""
## Tests with a mock testing that the needed methods are called.
@pytest.yield_fixture
def qsavefile_mock(self, mocker):
"""Mock for QSaveFile."""
m = mocker.patch('qutebrowser.utils.qtutils.QSaveFile')
instance = m()
yield instance
instance.commit.assert_called_once_with()
def test_mock_open_error(self, qsavefile_mock):
"""Test with a mock and a failing open()."""
qsavefile_mock.open.return_value = False
qsavefile_mock.errorString.return_value = "Hello World"
with pytest.raises(OSError) as excinfo:
with qtutils.savefile_open('filename'):
pass
qsavefile_mock.open.assert_called_once_with(QIODevice.WriteOnly)
qsavefile_mock.cancelWriting.assert_called_once_with()
assert str(excinfo.value) == "Hello World"
def test_mock_exception(self, qsavefile_mock):
"""Test with a mock and an exception in the block."""
qsavefile_mock.open.return_value = True
with pytest.raises(SavefileTestException):
with qtutils.savefile_open('filename'):
raise SavefileTestException
qsavefile_mock.open.assert_called_once_with(QIODevice.WriteOnly)
qsavefile_mock.cancelWriting.assert_called_once_with()
def test_mock_commit_failed(self, qsavefile_mock):
"""Test with a mock and an exception in the block."""
qsavefile_mock.open.return_value = True
qsavefile_mock.commit.return_value = False
with pytest.raises(OSError) as excinfo:
with qtutils.savefile_open('filename'):
pass
qsavefile_mock.open.assert_called_once_with(QIODevice.WriteOnly)
assert not qsavefile_mock.cancelWriting.called
assert not qsavefile_mock.errorString.called
assert str(excinfo.value) == "Commit failed!"
def test_mock_successful(self, qsavefile_mock):
"""Test with a mock and a successful write."""
qsavefile_mock.open.return_value = True
qsavefile_mock.errorString.return_value = "Hello World"
qsavefile_mock.commit.return_value = True
qsavefile_mock.write.side_effect = len
qsavefile_mock.isOpen.return_value = True
with qtutils.savefile_open('filename') as f:
f.write("Hello World")
qsavefile_mock.open.assert_called_once_with(QIODevice.WriteOnly)
assert not qsavefile_mock.cancelWriting.called
qsavefile_mock.write.assert_called_once_with(b"Hello World")
## Tests with real files
@pytest.mark.parametrize('data', ["Hello World", "Snowman! ☃"])
def test_utf8(self, data, tmpdir):
"""Test with UTF8 data."""
filename = tmpdir / 'foo'
filename.write("Old data")
with qtutils.savefile_open(str(filename)) as f:
f.write(data)
assert tmpdir.listdir() == [filename]
assert filename.read_text(encoding='utf-8') == data
def test_binary(self, tmpdir):
"""Test with binary data."""
filename = tmpdir / 'foo'
with qtutils.savefile_open(str(filename), binary=True) as f:
f.write(b'\xde\xad\xbe\xef')
assert tmpdir.listdir() == [filename]
assert filename.read_binary() == b'\xde\xad\xbe\xef'
def test_exception(self, tmpdir):
"""Test with an exception in the block."""
filename = tmpdir / 'foo'
filename.write("Old content")
with pytest.raises(SavefileTestException):
with qtutils.savefile_open(str(filename)) as f:
f.write("Hello World!")
raise SavefileTestException
assert tmpdir.listdir() == [filename]
assert filename.read_text(encoding='utf-8') == "Old content"
def test_existing_dir(self, tmpdir):
"""Test with the filename already occupied by a directory."""
filename = tmpdir / 'foo'
filename.mkdir()
with pytest.raises(OSError) as excinfo:
with qtutils.savefile_open(str(filename)):
pass
errors = ["Filename refers to a directory", # Qt >= 5.4
"Commit failed!"] # older Qt versions
assert str(excinfo.value) in errors
assert tmpdir.listdir() == [filename]
def test_failing_commit(self, tmpdir):
"""Test with the file being closed before comitting."""
filename = tmpdir / 'foo'
with pytest.raises(OSError) as excinfo:
with qtutils.savefile_open(str(filename), binary=True) as f:
f.write(b'Hello')
f.dev.commit() # provoke failing "real" commit
assert str(excinfo.value) == "Commit failed!"
assert tmpdir.listdir() == [filename]
def test_line_endings(self, tmpdir):
"""Make sure line endings are translated correctly.
See https://github.com/The-Compiler/qutebrowser/issues/309
"""
filename = tmpdir / 'foo'
with qtutils.savefile_open(str(filename)) as f:
f.write('foo\nbar\nbaz')
data = filename.read_binary()
if os.name == 'nt':
assert data == b'foo\r\nbar\r\nbaz'
else:
assert data == b'foo\nbar\nbaz'
@pytest.mark.parametrize('orgname, expected', [(None, ''), ('test', 'test')])
def test_unset_organization(qapp, orgname, expected):
"""Test unset_organization.
Args:
orgname: The organizationName to set initially.
expected: The organizationName which is expected when reading back.
"""
qapp.setOrganizationName(orgname)
assert qapp.organizationName() == expected # sanity check
with qtutils.unset_organization():
assert qapp.organizationName() == ''
assert qapp.organizationName() == expected
if test_file is not None:
# If we were able to import Python's test_file module, we run some code
# here which defines unittest TestCases to run the python tests over
# PyQIODevice.
@pytest.yield_fixture(scope='session', autouse=True)
def clean_up_python_testfile():
"""Clean up the python testfile after tests if tests didn't."""
yield
try:
os.remove(test_file.TESTFN)
except FileNotFoundError:
pass
class PyIODeviceTestMixin:
"""Some helper code to run Python's tests with PyQIODevice.
Attributes:
_data: A QByteArray containing the data in memory.
f: The opened PyQIODevice.
"""
def setUp(self):
"""Set up self.f using a PyQIODevice instead of a real file."""
self._data = QByteArray()
self.f = self.open(test_file.TESTFN, 'wb')
def open(self, _fname, mode):
"""Open an in-memory PyQIODevice instead of a real file."""
modes = {
'wb': QIODevice.WriteOnly | QIODevice.Truncate,
'w': QIODevice.WriteOnly | QIODevice.Text | QIODevice.Truncate,
'rb': QIODevice.ReadOnly,
'r': QIODevice.ReadOnly | QIODevice.Text,
}
try:
qt_mode = modes[mode]
except KeyError:
raise ValueError("Invalid mode {}!".format(mode))
f = QBuffer(self._data)
f.open(qt_mode)
qiodev = qtutils.PyQIODevice(f)
# Make sure tests using name/mode don't blow up.
qiodev.name = test_file.TESTFN
qiodev.mode = mode
# Create empty TESTFN file because the Python tests try to unlink
# it.after the test.
open(test_file.TESTFN, 'w', encoding='utf-8').close()
return qiodev
class PyAutoFileTests(PyIODeviceTestMixin, test_file.AutoFileTests,
unittest.TestCase):
"""Unittest testcase to run Python's AutoFileTests."""
def testReadinto_text(self):
"""Skip this test as BufferedIOBase seems to fail it."""
pass
class PyOtherFileTests(PyIODeviceTestMixin, test_file.OtherFileTests,
unittest.TestCase):
"""Unittest testcase to run Python's OtherFileTests."""
def testSetBufferSize(self):
"""Skip this test as setting buffer size is unsupported."""
pass
def testTruncateOnWindows(self):
"""Skip this test truncating is unsupported."""
pass
class FailingQIODevice(QIODevice):
"""A fake QIODevice where reads/writes fail."""
def isOpen(self):
return True
def isReadable(self):
return True
def isWritable(self):
return True
def write(self, _data):
"""Simulate failed write."""
self.setErrorString("Writing failed")
return -1
def read(self, _maxsize):
"""Simulate failed read."""
self.setErrorString("Reading failed")
return None
def readAll(self):
return self.read(0)
def readLine(self, maxsize):
return self.read(maxsize)
class TestPyQIODevice:
"""Tests for PyQIODevice."""
@pytest.yield_fixture
def pyqiodev(self):
"""Fixture providing a PyQIODevice with a QByteArray to test."""
data = QByteArray()
f = QBuffer(data)
qiodev = qtutils.PyQIODevice(f)
yield qiodev
qiodev.close()
@pytest.fixture
def pyqiodev_failing(self):
"""Fixture providing a PyQIODevice with a FailingQIODevice to test."""
failing = FailingQIODevice()
return qtutils.PyQIODevice(failing)
@pytest.mark.parametrize('method, args', [
('seek', [0]),
('flush', []),
('isatty', []),
('readline', []),
('tell', []),
('write', [b'']),
('read', []),
])
def test_closed_device(self, pyqiodev, method, args):
"""Test various methods with a closed device.
Args:
method: The name of the method to call.
args: The arguments to pass.
"""
func = getattr(pyqiodev, method)
with pytest.raises(ValueError) as excinfo:
func(*args)
assert str(excinfo.value) == "IO operation on closed device!"
@pytest.mark.parametrize('method', ['readline', 'read'])
def test_unreadable(self, pyqiodev, method):
"""Test methods with an unreadable device.
Args:
method: The name of the method to call.
"""
pyqiodev.open(QIODevice.WriteOnly)
func = getattr(pyqiodev, method)
with pytest.raises(OSError) as excinfo:
func()
assert str(excinfo.value) == "Trying to read unreadable file!"
def test_unwritable(self, pyqiodev):
"""Test writing with a read-only device."""
pyqiodev.open(QIODevice.ReadOnly)
with pytest.raises(OSError) as excinfo:
pyqiodev.write(b'')
assert str(excinfo.value) == "Trying to write to unwritable file!"
@pytest.mark.parametrize('data', [b'12345', b''])
def test_len(self, pyqiodev, data):
"""Test len()/__len__.
Args:
data: The data to write before checking if the length equals
len(data).
"""
pyqiodev.open(QIODevice.WriteOnly)
pyqiodev.write(data)
assert len(pyqiodev) == len(data)
def test_failing_open(self, tmpdir):
"""Test open() which fails (because it's an existant directory)."""
qf = QFile(str(tmpdir))
dev = qtutils.PyQIODevice(qf)
with pytest.raises(qtutils.QtOSError) as excinfo:
dev.open(QIODevice.WriteOnly)
assert excinfo.value.qt_errno == QFileDevice.OpenError
assert dev.closed
def test_fileno(self, pyqiodev):
with pytest.raises(io.UnsupportedOperation):
pyqiodev.fileno()
@pytest.mark.qt_log_ignore('^QBuffer::seek: Invalid pos:')
@pytest.mark.parametrize('offset, whence, pos, data, raising', [
(0, io.SEEK_SET, 0, b'1234567890', False),
(42, io.SEEK_SET, 0, b'1234567890', True),
(8, io.SEEK_CUR, 8, b'90', False),
(-5, io.SEEK_CUR, 0, b'1234567890', True),
(-2, io.SEEK_END, 8, b'90', False),
(2, io.SEEK_END, 0, b'1234567890', True),
(0, io.SEEK_END, 10, b'', False),
])
def test_seek_tell(self, pyqiodev, offset, whence, pos, data, raising):
"""Test seek() and tell().
The initial position when these tests run is 0.
Args:
offset: The offset to pass to .seek().
whence: The whence argument to pass to .seek().
pos: The expected position after seeking.
data: The expected data to read after seeking.
raising: Whether seeking should raise OSError.
"""
with pyqiodev.open(QIODevice.WriteOnly) as f:
f.write(b'1234567890')
pyqiodev.open(QIODevice.ReadOnly)
if raising:
with pytest.raises(OSError) as excinfo:
pyqiodev.seek(offset, whence)
assert str(excinfo.value) == "seek failed!"
else:
pyqiodev.seek(offset, whence)
assert pyqiodev.tell() == pos
assert pyqiodev.read() == data
def test_seek_unsupported(self, pyqiodev):
"""Test seeking with unsupported whence arguments."""
if hasattr(os, 'SEEK_HOLE'):
whence = os.SEEK_HOLE # pylint: disable=no-member
elif hasattr(os, 'SEEK_DATA'):
whence = os.SEEK_DATA # pylint: disable=no-member
else:
pytest.skip("Needs os.SEEK_HOLE or os.SEEK_DATA available.")
pyqiodev.open(QIODevice.ReadOnly)
with pytest.raises(io.UnsupportedOperation):
pyqiodev.seek(0, whence)
@pytest.mark.not_frozen
def test_qprocess(self, py_proc):
"""Test PyQIODevice with a QProcess which is non-sequential.
This also verifies seek() and tell() behave as expected.
"""
proc = QProcess()
proc.start(*py_proc('print("Hello World")'))
dev = qtutils.PyQIODevice(proc)
assert not dev.closed
with pytest.raises(OSError) as excinfo:
dev.seek(0)
assert str(excinfo.value) == 'Random access not allowed!'
with pytest.raises(OSError) as excinfo:
dev.tell()
assert str(excinfo.value) == 'Random access not allowed!'
proc.waitForFinished(1000)
proc.kill()
assert bytes(dev.read()).rstrip() == b'Hello World'
def test_truncate(self, pyqiodev):
with pytest.raises(io.UnsupportedOperation):
pyqiodev.truncate()
def test_closed(self, pyqiodev):
"""Test the closed attribute."""
assert pyqiodev.closed
pyqiodev.open(QIODevice.ReadOnly)
assert not pyqiodev.closed
pyqiodev.close()
assert pyqiodev.closed
def test_contextmanager(self, pyqiodev):
"""Make sure using the PyQIODevice as context manager works."""
assert pyqiodev.closed
with pyqiodev.open(QIODevice.ReadOnly) as f:
assert not f.closed
assert f is pyqiodev
assert pyqiodev.closed
def test_flush(self, pyqiodev):
"""Make sure flushing doesn't raise an exception."""
pyqiodev.open(QIODevice.WriteOnly)
pyqiodev.write(b'test')
pyqiodev.flush()
@pytest.mark.parametrize('method, ret', [
('isatty', False),
('seekable', True),
])
def test_bools(self, method, ret, pyqiodev):
"""Make sure simple bool arguments return the right thing.
Args:
method: The name of the method to call.
ret: The return value we expect.
"""
pyqiodev.open(QIODevice.WriteOnly)
func = getattr(pyqiodev, method)
assert func() == ret
@pytest.mark.parametrize('mode, readable, writable', [
(QIODevice.ReadOnly, True, False),
(QIODevice.ReadWrite, True, True),
(QIODevice.WriteOnly, False, True),
])
def test_readable_writable(self, mode, readable, writable, pyqiodev):
"""Test readable() and writable().
Args:
mode: The mode to open the PyQIODevice in.
readable: Whether the device should be readable.
writable: Whether the device should be writable.
"""
assert not pyqiodev.readable()
assert not pyqiodev.writable()
pyqiodev.open(mode)
assert pyqiodev.readable() == readable
assert pyqiodev.writable() == writable
@pytest.mark.parametrize('size, chunks', [
(-1, [b'one\n', b'two\n', b'three', b'']),
(0, [b'', b'', b'', b'']),
(2, [b'on', b'e\n', b'tw', b'o\n', b'th', b're', b'e']),
(10, [b'one\n', b'two\n', b'three', b'']),
])
def test_readline(self, size, chunks, pyqiodev):
"""Test readline() with different sizes.
Args:
size: The size to pass to readline()
chunks: A list of expected chunks to read.
"""
with pyqiodev.open(QIODevice.WriteOnly) as f:
f.write(b'one\ntwo\nthree')
pyqiodev.open(QIODevice.ReadOnly)
for i, chunk in enumerate(chunks, start=1):
print("Expecting chunk {}: {!r}".format(i, chunk))
assert pyqiodev.readline(size) == chunk
def test_write(self, pyqiodev):
"""Make sure writing and re-reading works."""
with pyqiodev.open(QIODevice.WriteOnly) as f:
f.write(b'foo\n')
f.write(b'bar\n')
pyqiodev.open(QIODevice.ReadOnly)
assert pyqiodev.read() == b'foo\nbar\n'
def test_write_error(self, pyqiodev_failing):
"""Test writing with FailingQIODevice."""
with pytest.raises(OSError) as excinfo:
pyqiodev_failing.write(b'x')
assert str(excinfo.value) == 'Writing failed'
@pytest.mark.posix
@pytest.mark.skipif(not os.path.exists('/dev/full'),
reason="Needs /dev/full.")
def test_write_error_real(self):
"""Test a real write error with /dev/full on supported systems."""
qf = QFile('/dev/full')
qf.open(QIODevice.WriteOnly | QIODevice.Unbuffered)
dev = qtutils.PyQIODevice(qf)
with pytest.raises(OSError) as excinfo:
dev.write(b'foo')
qf.close()
assert str(excinfo.value) == 'No space left on device'
@pytest.mark.parametrize('size, chunks', [
(-1, [b'1234567890']),
(0, [b'']),
(3, [b'123', b'456', b'789', b'0']),
(20, [b'1234567890'])
])
def test_read(self, size, chunks, pyqiodev):
"""Test reading with different sizes.
Args:
size: The size to pass to read()
chunks: A list of expected data chunks.
"""
with pyqiodev.open(QIODevice.WriteOnly) as f:
f.write(b'1234567890')
pyqiodev.open(QIODevice.ReadOnly)
for i, chunk in enumerate(chunks):
print("Expecting chunk {}: {!r}".format(i, chunk))
assert pyqiodev.read(size) == chunk
@pytest.mark.parametrize('method, args', [
('read', []),
('read', [5]),
('readline', []),
('readline', [5]),
])
def test_failing_reads(self, method, args, pyqiodev_failing):
"""Test reading with a FailingQIODevice.
Args:
method: The name of the method to call.
args: A list of arguments to pass.
"""
func = getattr(pyqiodev_failing, method)
with pytest.raises(OSError) as excinfo:
func(*args)
assert str(excinfo.value) == 'Reading failed'
@pytest.mark.usefixtures('qapp')
class TestEventLoop:
"""Tests for EventLoop.
Attributes:
loop: The EventLoop we're testing.
"""
def _assert_executing(self):
"""Slot which gets called from timers to be sure the loop runs."""
assert self.loop._executing
def _double_exec(self):
"""Slot which gets called from timers to assert double-exec fails."""
with pytest.raises(AssertionError):
self.loop.exec_()
def test_normal_exec(self):
"""Test exec_ without double-executing."""
self.loop = qtutils.EventLoop()
QTimer.singleShot(100, self._assert_executing)
QTimer.singleShot(200, self.loop.quit)
self.loop.exec_()
assert not self.loop._executing
def test_double_exec(self):
"""Test double-executing."""
self.loop = qtutils.EventLoop()
QTimer.singleShot(100, self._assert_executing)
QTimer.singleShot(200, self._double_exec)
QTimer.singleShot(300, self._assert_executing)
QTimer.singleShot(400, self.loop.quit)
self.loop.exec_()
assert not self.loop._executing
| gpl-3.0 |
patacrep/patacrep | patacrep/latex/detex.py | 1 | 2402 | """Render `very simple` TeX commands in a simple TeX code."""
import logging
LOGGER = logging.getLogger()
MATCH = [
# Diacritics: a
(r"\'a", "á"),
(r"\'A", "Á"),
(r"\`a", "à"),
(r"\`A", "À"),
(r"\^a", "â"),
(r"\^A", "Â"),
(r"\"a", "ä"),
(r"\"A", "Ä"),
# Diacritics: e
(r"\'e", "é"),
(r"\'E", "É"),
(r"\`e", "è"),
(r"\`E", "È"),
(r"\^e", "ê"),
(r"\^E", "Ê"),
(r"\"e", "ë"),
(r"\"E", "Ë"),
# Diacritics: i
(r"\'i", "í"),
(r"\'I", "Í"),
(r"\`i", "ì"),
(r"\`I", "Ì"),
(r"\^i", "î"),
(r"\^I", "Î"),
(r"\"i", "ï"),
(r"\"I", "Ï"),
(r"\'\i", "í"),
(r"\'\I", "Í"),
(r"\`\i", "ì"),
(r"\`\I", "Ì"),
(r"\^\i", "î"),
(r"\^\I", "Î"),
(r"\"\i", "ï"),
(r"\"\I", "Ï"),
# Diacritics: o
(r"\'o", "ó"),
(r"\'O", "Ó"),
(r"\`o", "ò"),
(r"\`O", "Ò"),
(r"\^o", "ô"),
(r"\^O", "Ô"),
(r"\"o", "ö"),
(r"\"O", "Ö"),
# Diacritics: u
(r"\'u", "ú"),
(r"\'U", "Ú"),
(r"\`u", "ù"),
(r"\`U", "Ù"),
(r"\^u", "û"),
(r"\^U", "Û"),
(r"\"u", "ü"),
(r"\"U", "Ü"),
# Cedille
(r"\c c", "ç"),
(r"\c C", "Ç"),
# œ, æ
(r"\oe", "œ"),
(r"\OE", "Œ"),
(r"\ae", "æ"),
(r"\AE", "Æ"),
# Spaces
(r"\ ", " "),
(r"\,", " "),
(r"\~", " "),
# IeC
(r"\IeC ", ""),
# Miscallenous
(r"\dots", "…"),
(r"\%", "%"),
(r"\&", "&"),
(r"\_", "_"),
]
def detex(arg):
"""Render very simple TeX commands from argument.
Argument can be:
- a string: it is processed;
- a list, dict or set: its values are processed.
"""
if isinstance(arg, dict):
return dict([
(key, detex(value))
for (key, value)
in arg.items()
])
elif isinstance(arg, list):
return [
detex(item)
for item
in arg
]
elif isinstance(arg, set):
return set(detex(list(arg)))
elif isinstance(arg, str):
string = arg
for (latex, plain) in MATCH:
string = string.replace(latex, plain)
if '\\' in string:
LOGGER.warning("Remaining command in string '{}'.".format(string))
return string.strip()
else:
return detex(str(arg))
| gpl-2.0 |
brownman/selenium-webdriver | selenium/src/py/lib/docutils/parsers/rst/languages/zh_tw.py | 5 | 5164 | # -*- coding: utf-8 -*-
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 4229 $
# Date: $Date: 2005-12-23 00:46:16 +0100 (Fri, 23 Dec 2005) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Traditional Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions (translation required)': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
u'日期': 'date',
'class (translation required)': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Traditional Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translation required)': 'acronym',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'sup (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Traditional Chinese role names to canonical role names for
interpreted text."""
| apache-2.0 |
Venturi/cms | env/lib/python2.7/site-packages/django/db/__init__.py | 146 | 2374 | from django.core import signals
from django.db.utils import (DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY,
DataError, OperationalError, IntegrityError, InternalError, ProgrammingError,
NotSupportedError, DatabaseError, InterfaceError, Error, ConnectionHandler,
ConnectionRouter)
__all__ = [
'backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'InternalError', 'ProgrammingError', 'DataError',
'NotSupportedError', 'Error', 'InterfaceError', 'OperationalError',
'DEFAULT_DB_ALIAS', 'DJANGO_VERSION_PICKLE_KEY'
]
connections = ConnectionHandler()
router = ConnectionRouter()
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so
# we manually create the dictionary from the settings, passing only the
# settings that the database backends care about. Note that TIME_ZONE is used
# by the PostgreSQL backends.
# We load all these up for backwards compatibility, you should use
# connections['default'] instead.
class DefaultConnectionProxy(object):
"""
Proxy for accessing the default DatabaseWrapper object's attributes. If you
need to access the DatabaseWrapper object itself, use
connections[DEFAULT_DB_ALIAS] instead.
"""
def __getattr__(self, item):
return getattr(connections[DEFAULT_DB_ALIAS], item)
def __setattr__(self, name, value):
return setattr(connections[DEFAULT_DB_ALIAS], name, value)
def __delattr__(self, name):
return delattr(connections[DEFAULT_DB_ALIAS], name)
def __eq__(self, other):
return connections[DEFAULT_DB_ALIAS] == other
def __ne__(self, other):
return connections[DEFAULT_DB_ALIAS] != other
connection = DefaultConnectionProxy()
# Register an event to reset saved queries when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries_log.clear()
signals.request_started.connect(reset_queries)
# Register an event to reset transaction state and close connections past
# their lifetime.
def close_old_connections(**kwargs):
for conn in connections.all():
conn.close_if_unusable_or_obsolete()
signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections)
| gpl-2.0 |
NoahKow/ziwuquan | crawler/ziwu/spiders/cnblog_spider.py | 3 | 2942 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ziwu.components.redis.spiders import RedisMixin
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from lxml.html.clean import Cleaner
from ziwu.misc.util import filter_tags
from ziwu.items import ZiwuItem
import re
import datetime
class CnblogSpider(RedisMixin, CrawlSpider):
name = 'cnblog'
redis_key = 'cnblog:start_urls'
allowed_domains = ['zzk.cnblogs.com', 'www.cnblogs.com', 'kb.cnblogs.com', 'q.cnblogs.com']
rules = (
Rule(SgmlLinkExtractor(), callback='parse', follow=True),
)
def set_crawler(self, crawler):
CrawlSpider.set_crawler(self, crawler)
RedisMixin.setup_redis(self)
def parse(self, response):
sel = Selector(response)
re_article_list = re.compile("zzk\.cnblogs\.com\/s\?w\=")
if re_article_list.search(response.url):
urlposts = sel.xpath('//div[@class="searchItem"]/h3[@class="searchItemTitle"]/a/@href').extract()
urlpages = sel.xpath('//div[@id="paging_block"]/div[@class="pager"]/a/@href').extract()
urls = urlposts + urlpages
for url in urls:
utf8_url = url.encode('utf-8')
base_url = get_base_url(response)
if not utf8_url.startswith('http://'):
url = urljoin_rfc(base_url, utf8_url)
yield Request(url, callback=self.parse)
# yield Request(url, meta={'renderjs': "true"}, callback=self.parse)
else:
title = sel.xpath('//*[@id="cb_post_title_url"]/text()').extract()
content = sel.xpath('//*[@id="cnblogs_post_body"]/node()').extract()
created = sel.xpath('//*[@id="post-date"]/text()').extract()
if len(content) != 0:
item = ZiwuItem()
item['url'] = response.url
if len(title) == 0:
title = sel.xpath('//title/text()').extract()
item['title'] = ''.join(title).strip()
item_content = ''.join(content).strip()
cleaner = Cleaner(page_structure=False, links=False, safe_attrs_only=True, safe_attrs = frozenset([]))
clean_content = cleaner.clean_html(item_content)
item['content'] = clean_content
item['description'] = filter_tags(clean_content)
if len(created) == 0:
created = ['1900-1-1 00:00']
item['created'] = datetime.datetime.strptime(''.join(created).strip(), "%Y-%m-%d %H:%M")
item['type'] = 1
item['pagerank'] = 1
yield item
else:
pass
| bsd-2-clause |
logston/cunidecode | data/x074.py | 252 | 4696 | data = (
'Han ', # 0x00
'Xuan ', # 0x01
'Yan ', # 0x02
'Qiu ', # 0x03
'Quan ', # 0x04
'Lang ', # 0x05
'Li ', # 0x06
'Xiu ', # 0x07
'Fu ', # 0x08
'Liu ', # 0x09
'Ye ', # 0x0a
'Xi ', # 0x0b
'Ling ', # 0x0c
'Li ', # 0x0d
'Jin ', # 0x0e
'Lian ', # 0x0f
'Suo ', # 0x10
'Chiisai ', # 0x11
'[?] ', # 0x12
'Wan ', # 0x13
'Dian ', # 0x14
'Pin ', # 0x15
'Zhan ', # 0x16
'Cui ', # 0x17
'Min ', # 0x18
'Yu ', # 0x19
'Ju ', # 0x1a
'Chen ', # 0x1b
'Lai ', # 0x1c
'Wen ', # 0x1d
'Sheng ', # 0x1e
'Wei ', # 0x1f
'Dian ', # 0x20
'Chu ', # 0x21
'Zhuo ', # 0x22
'Pei ', # 0x23
'Cheng ', # 0x24
'Hu ', # 0x25
'Qi ', # 0x26
'E ', # 0x27
'Kun ', # 0x28
'Chang ', # 0x29
'Qi ', # 0x2a
'Beng ', # 0x2b
'Wan ', # 0x2c
'Lu ', # 0x2d
'Cong ', # 0x2e
'Guan ', # 0x2f
'Yan ', # 0x30
'Diao ', # 0x31
'Bei ', # 0x32
'Lin ', # 0x33
'Qin ', # 0x34
'Pi ', # 0x35
'Pa ', # 0x36
'Que ', # 0x37
'Zhuo ', # 0x38
'Qin ', # 0x39
'Fa ', # 0x3a
'[?] ', # 0x3b
'Qiong ', # 0x3c
'Du ', # 0x3d
'Jie ', # 0x3e
'Hun ', # 0x3f
'Yu ', # 0x40
'Mao ', # 0x41
'Mei ', # 0x42
'Chun ', # 0x43
'Xuan ', # 0x44
'Ti ', # 0x45
'Xing ', # 0x46
'Dai ', # 0x47
'Rou ', # 0x48
'Min ', # 0x49
'Zhen ', # 0x4a
'Wei ', # 0x4b
'Ruan ', # 0x4c
'Huan ', # 0x4d
'Jie ', # 0x4e
'Chuan ', # 0x4f
'Jian ', # 0x50
'Zhuan ', # 0x51
'Yang ', # 0x52
'Lian ', # 0x53
'Quan ', # 0x54
'Xia ', # 0x55
'Duan ', # 0x56
'Yuan ', # 0x57
'Ye ', # 0x58
'Nao ', # 0x59
'Hu ', # 0x5a
'Ying ', # 0x5b
'Yu ', # 0x5c
'Huang ', # 0x5d
'Rui ', # 0x5e
'Se ', # 0x5f
'Liu ', # 0x60
'Shi ', # 0x61
'Rong ', # 0x62
'Suo ', # 0x63
'Yao ', # 0x64
'Wen ', # 0x65
'Wu ', # 0x66
'Jin ', # 0x67
'Jin ', # 0x68
'Ying ', # 0x69
'Ma ', # 0x6a
'Tao ', # 0x6b
'Liu ', # 0x6c
'Tang ', # 0x6d
'Li ', # 0x6e
'Lang ', # 0x6f
'Gui ', # 0x70
'Zhen ', # 0x71
'Qiang ', # 0x72
'Cuo ', # 0x73
'Jue ', # 0x74
'Zhao ', # 0x75
'Yao ', # 0x76
'Ai ', # 0x77
'Bin ', # 0x78
'Tu ', # 0x79
'Chang ', # 0x7a
'Kun ', # 0x7b
'Zhuan ', # 0x7c
'Cong ', # 0x7d
'Jin ', # 0x7e
'Yi ', # 0x7f
'Cui ', # 0x80
'Cong ', # 0x81
'Qi ', # 0x82
'Li ', # 0x83
'Ying ', # 0x84
'Suo ', # 0x85
'Qiu ', # 0x86
'Xuan ', # 0x87
'Ao ', # 0x88
'Lian ', # 0x89
'Man ', # 0x8a
'Zhang ', # 0x8b
'Yin ', # 0x8c
'[?] ', # 0x8d
'Ying ', # 0x8e
'Zhi ', # 0x8f
'Lu ', # 0x90
'Wu ', # 0x91
'Deng ', # 0x92
'Xiou ', # 0x93
'Zeng ', # 0x94
'Xun ', # 0x95
'Qu ', # 0x96
'Dang ', # 0x97
'Lin ', # 0x98
'Liao ', # 0x99
'Qiong ', # 0x9a
'Su ', # 0x9b
'Huang ', # 0x9c
'Gui ', # 0x9d
'Pu ', # 0x9e
'Jing ', # 0x9f
'Fan ', # 0xa0
'Jin ', # 0xa1
'Liu ', # 0xa2
'Ji ', # 0xa3
'[?] ', # 0xa4
'Jing ', # 0xa5
'Ai ', # 0xa6
'Bi ', # 0xa7
'Can ', # 0xa8
'Qu ', # 0xa9
'Zao ', # 0xaa
'Dang ', # 0xab
'Jiao ', # 0xac
'Gun ', # 0xad
'Tan ', # 0xae
'Hui ', # 0xaf
'Huan ', # 0xb0
'Se ', # 0xb1
'Sui ', # 0xb2
'Tian ', # 0xb3
'[?] ', # 0xb4
'Yu ', # 0xb5
'Jin ', # 0xb6
'Lu ', # 0xb7
'Bin ', # 0xb8
'Shou ', # 0xb9
'Wen ', # 0xba
'Zui ', # 0xbb
'Lan ', # 0xbc
'Xi ', # 0xbd
'Ji ', # 0xbe
'Xuan ', # 0xbf
'Ruan ', # 0xc0
'Huo ', # 0xc1
'Gai ', # 0xc2
'Lei ', # 0xc3
'Du ', # 0xc4
'Li ', # 0xc5
'Zhi ', # 0xc6
'Rou ', # 0xc7
'Li ', # 0xc8
'Zan ', # 0xc9
'Qiong ', # 0xca
'Zhe ', # 0xcb
'Gui ', # 0xcc
'Sui ', # 0xcd
'La ', # 0xce
'Long ', # 0xcf
'Lu ', # 0xd0
'Li ', # 0xd1
'Zan ', # 0xd2
'Lan ', # 0xd3
'Ying ', # 0xd4
'Mi ', # 0xd5
'Xiang ', # 0xd6
'Xi ', # 0xd7
'Guan ', # 0xd8
'Dao ', # 0xd9
'Zan ', # 0xda
'Huan ', # 0xdb
'Gua ', # 0xdc
'Bo ', # 0xdd
'Die ', # 0xde
'Bao ', # 0xdf
'Hu ', # 0xe0
'Zhi ', # 0xe1
'Piao ', # 0xe2
'Ban ', # 0xe3
'Rang ', # 0xe4
'Li ', # 0xe5
'Wa ', # 0xe6
'Dekaguramu ', # 0xe7
'Jiang ', # 0xe8
'Qian ', # 0xe9
'Fan ', # 0xea
'Pen ', # 0xeb
'Fang ', # 0xec
'Dan ', # 0xed
'Weng ', # 0xee
'Ou ', # 0xef
'Deshiguramu ', # 0xf0
'Miriguramu ', # 0xf1
'Thon ', # 0xf2
'Hu ', # 0xf3
'Ling ', # 0xf4
'Yi ', # 0xf5
'Ping ', # 0xf6
'Ci ', # 0xf7
'Hekutogura ', # 0xf8
'Juan ', # 0xf9
'Chang ', # 0xfa
'Chi ', # 0xfb
'Sarake ', # 0xfc
'Dang ', # 0xfd
'Meng ', # 0xfe
'Pou ', # 0xff
)
| gpl-2.0 |
wakatime/sublime-wakatime | packages/wakatime/packages/pytz/reference.py | 839 | 3649 | '''
Reference tzinfo implementations from the Python docs.
Used for testing against as they are only correct for the years
1987 to 2006. Do not use these for real code.
'''
from datetime import tzinfo, timedelta, datetime
from pytz import utc, UTC, HOUR, ZERO
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
# A class capturing the platform's idea of local time.
import time as _time
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
# A complete implementation of current DST rules for major US time zones.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
# which is the first Sunday on or after Oct 25.
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April & the last in October.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| bsd-3-clause |
dbaxa/django | tests/generic_views/test_list.py | 309 | 12129 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils.encoding import force_str
from django.views.generic.base import View
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class ListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIsInstance(res.context['view'], View)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
def test_explicitly_ordered_list_view(self):
Book.objects.create(name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/list/books/sorted/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, '2066')
self.assertEqual(res.context['object_list'][1].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][2].name, 'Zebras for Dummies')
res = self.client.get('/list/books/sortedbypagesandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][1].name, 'Zebras for Dummies')
self.assertEqual(res.context['object_list'][2].name, '2066')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| bsd-3-clause |
waheedahmed/edx-platform | lms/djangoapps/commerce/models.py | 14 | 1394 | """
Commerce-related models.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from config_models.models import ConfigurationModel
class CommerceConfiguration(ConfigurationModel):
""" Commerce configuration """
class Meta(object):
app_label = "commerce"
API_NAME = 'commerce'
CACHE_KEY = 'commerce.api.data'
checkout_on_ecommerce_service = models.BooleanField(
default=False,
help_text=_('Use the checkout page hosted by the E-Commerce service.')
)
single_course_checkout_page = models.CharField(
max_length=255,
default='/basket/single-item/',
help_text=_('Path to single course checkout page hosted by the E-Commerce service.')
)
cache_ttl = models.PositiveIntegerField(
verbose_name=_('Cache Time To Live'),
default=0,
help_text=_(
'Specified in seconds. Enable caching by setting this to a value greater than 0.'
)
)
receipt_page = models.CharField(
max_length=255,
default='/commerce/checkout/receipt/?orderNum=',
help_text=_('Path to order receipt page.')
)
def __unicode__(self):
return "Commerce configuration"
@property
def is_cache_enabled(self):
"""Whether responses from the Ecommerce API will be cached."""
return self.cache_ttl > 0
| agpl-3.0 |
sudosurootdev/external_chromium_org | tools/perf/page_sets/gmail_compose_discard.py | 33 | 2494 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
def _CreateXpathFunction(xpath):
return ('document.evaluate("%s",'
'document,'
'null,'
'XPathResult.FIRST_ORDERED_NODE_TYPE,'
'null)'
'.singleNodeValue' % re.escape(xpath))
class GmailComposeDiscardPage(page_module.Page):
""" Why: Compose and discard a new email """
def __init__(self, page_set):
super(GmailComposeDiscardPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set)
self.credentials_path = 'data/credentials.json'
self.credentials = 'google'
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
def ComposeClick(self, action_runner):
action_runner.ExecuteJavaScript('''
var button=document.evaluate('//div[text()="COMPOSE"]',
document,null,XPathResult.FIRST_ORDERED_NODE_TYPE,null)
.singleNodeValue;
var mousedownevent=new MouseEvent('mousedown',true,true,window,0,0,0,0,0,
false,false,false,false,0,null);
var mouseupevent=new MouseEvent('mouseup',true,true,window,0,0,0,0,0,
false,false,false,false,0,null);
button.dispatchEvent(mousedownevent);
button.dispatchEvent(mouseupevent);''')
def RunEndure(self, action_runner):
action_runner.WaitForElement(
element_function=_CreateXpathFunction('//div[text()="COMPOSE"]'))
self.ComposeClick(action_runner)
action_runner.Wait(1)
action_runner.WaitForElement(
'div[class~="oh"][data-tooltip="Discard draft"]')
action_runner.ClickElement('div[class~="oh"][data-tooltip="Discard draft"]')
action_runner.Wait(1)
class GmailComposeDiscardPageSet(page_set_module.PageSet):
"""
Description: Gmail endure test: compose and discard an email.
"""
def __init__(self):
super(GmailComposeDiscardPageSet, self).__init__(
credentials_path='data/credentials.json',
user_agent_type='desktop')
self.AddPage(GmailComposeDiscardPage(self))
| bsd-3-clause |
stephenfloor/tripseq-analysis | HumanCodonTable.py | 2 | 2046 | # Class to create a codon table for humans
# data from http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?species=9606&aa=1&style=N retrieved on 11/20/2014
# accessor methods to return avg codon frequency for a sequence or to find the stretch of N codons with minimum frequency, and what that frequency is
class HumanCodonTable:
def __init__(self):
# define the codon table here.
self.codonTable = {'TTT': .46,'TCT':0.19,'TCC':0.22,'TCA':0.15,'TCG':0.05,'CCT':0.29,'CCC':0.32,'CCA':0.28,'CCG':0.11,'ACT':0.25,'ACC':0.36,'ACA':0.28,'ACG':0.11\
,'GCT':0.27,'GCC':0.4,'GCA':0.23,'GCG':0.11,'TAT':0.44,'TAC':0.56,'TAA':0.3,'TAG':0.24,'CAT':0.42,'CAC':0.58,'CAA':0.27,'CAG':0.73,'AAT':0.47\
,'AAC':0.53,'AAA':0.43,'AAG':0.57,'GAT':0.46,'GAC':0.54,'GAA':0.42,'GAG':0.58,'TGT':0.46,'TGC':0.54,'TGA':0.47,'TGG':1,'CGT':0.08,'CGC':0.18\
,'CGA':0.11,'CGG':0.2,'AGT':0.15,'AGC':0.24,'AGA':0.21,'AGG':0.21,'GGT':0.16,'GGC':0.34,'GGA':0.25,'GGG':0.25,'TTT':0.46,'TTC':0.54,'TTA':0.08\
,'TTG':0.13,'CTT':0.13,'CTC':0.2,'CTA':0.07,'CTG':0.4,'ATT':0.36,'ATC':0.47,'ATA':0.17,'ATG':1,'GTT':0.18,'GTC':0.24,'GTA':0.12,'GTG':0.46}
# calculate the average codon usage frequency across the entire input sequence
def averageCodonFreq(self, sequence):
numCodons = len(sequence)/3
return sum([self.codonTable[sequence[i*3:i*3+3]] for i in range(numCodons)])/numCodons
# note - codonWindowSize here is interpreted as a number of codons, hence multiplied by 3 inside.
def minCodonFreq(self, sequence, codonWindowSize):
numCodons = len(sequence)/3
if (numCodons < codonWindowSize):
codonWindowSize = numCodons
allWindows = [sequence[i*3:(i+codonWindowSize)*3] for i in range(numCodons - codonWindowSize + 1)]
allFrequencies = map(self.averageCodonFreq, allWindows)
minFrequency = min(allFrequencies)
return minFrequency
| gpl-2.0 |
inessadl/kinect-2-libras | Kinect2Libras/KinectFingerTracking/Lib/distutils/spawn.py | 72 | 6467 | """distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
__revision__ = "$Id$"
import sys
import os
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils import log
def spawn(cmd, search_path=1, verbose=0, dry_run=0):
"""Run another program, specified as a command list 'cmd', in a new process.
'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable
search path will be used to find the program; otherwise, cmd[0]
must be the exact path to the executable. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, dry_run=dry_run)
elif os.name == 'os2':
_spawn_os2(cmd, search_path, dry_run=dry_run)
else:
raise DistutilsPlatformError, \
"don't know how to spawn programs on platform '%s'" % os.name
def _nt_quote_args(args):
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args
def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
# spawn for NT requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_os2(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
# spawnv for OS/2 EMX requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
log.debug("command '%s' failed with exit status %d" % (cmd[0], rc))
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
log.info(' '.join(cmd))
if dry_run:
return
exec_fn = search_path and os.execvp or os.execv
pid = os.fork()
if pid == 0: # in the child
try:
exec_fn(cmd[0], cmd)
except OSError, e:
sys.stderr.write("unable to execute %s: %s\n" %
(cmd[0], e.strerror))
os._exit(1)
sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
# (ie. keep waiting if it's merely stopped)
while 1:
try:
pid, status = os.waitpid(pid, 0)
except OSError, exc:
import errno
if exc.errno == errno.EINTR:
continue
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if os.WIFSIGNALED(status):
raise DistutilsExecError, \
"command '%s' terminated by signal %d" % \
(cmd[0], os.WTERMSIG(status))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % \
(cmd[0], exit_status)
elif os.WIFSTOPPED(status):
continue
else:
raise DistutilsExecError, \
"unknown error executing '%s': termination status %d" % \
(cmd[0], status)
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
| apache-2.0 |
seasidesun/shadowsocks-bak | shadowsocks/crypto/sodium.py | 1032 | 3778 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
__all__ = ['ciphers']
libsodium = None
loaded = False
buf_size = 2048
# for salsa20 and chacha20
BLOCK_SIZE = 64
def load_libsodium():
global loaded, libsodium, buf
libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic',
'libsodium')
if libsodium is None:
raise Exception('libsodium not found')
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
buf = create_string_buffer(buf_size)
loaded = True
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
ciphers = {
'salsa20': (32, 8, SodiumCrypto),
'chacha20': (32, 8, SodiumCrypto),
}
def test_salsa20():
cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_chacha20():
cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_chacha20()
test_salsa20()
| apache-2.0 |
jhaux/tensorflow | tensorflow/python/kernel_tests/candidate_sampler_ops_test.py | 110 | 5343 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CandidateSamplerOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RangeSamplerOpsTest(test.TestCase):
BATCH_SIZE = 3
NUM_TRUE = 2
RANGE = 5
NUM_SAMPLED = RANGE
TRUE_LABELS = [[1, 2], [0, 4], [3, 3]]
def testTrueCandidates(self):
with self.test_session() as sess:
indices = constant_op.constant([0, 0, 1, 1, 2, 2])
true_candidates_vec = constant_op.constant([1, 2, 0, 4, 3, 3])
true_candidates_matrix = array_ops.reshape(
true_candidates_vec, [self.BATCH_SIZE, self.NUM_TRUE])
indices_val, true_candidates_val = sess.run(
[indices, true_candidates_matrix])
self.assertAllEqual(indices_val, [0, 0, 1, 1, 2, 2])
self.assertAllEqual(true_candidates_val, self.TRUE_LABELS)
def testSampledCandidates(self):
with self.test_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
result = sampled_candidates.eval()
expected_ids = [0, 1, 2, 3, 4]
self.assertAllEqual(result, expected_ids)
self.assertEqual(sampled_candidates.get_shape(), [self.NUM_SAMPLED])
def testTrueLogExpectedCount(self):
with self.test_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
_, true_expected_count, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
true_log_expected_count = math_ops.log(true_expected_count)
result = true_log_expected_count.eval()
self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)
self.assertEqual(true_expected_count.get_shape(),
[self.BATCH_SIZE, self.NUM_TRUE])
self.assertEqual(true_log_expected_count.get_shape(),
[self.BATCH_SIZE, self.NUM_TRUE])
def testSampledLogExpectedCount(self):
with self.test_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
_, _, sampled_expected_count = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
sampled_log_expected_count = math_ops.log(sampled_expected_count)
result = sampled_log_expected_count.eval()
self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)
self.assertEqual(sampled_expected_count.get_shape(), [self.NUM_SAMPLED])
self.assertEqual(sampled_log_expected_count.get_shape(), [self.NUM_SAMPLED])
def testAccidentalHits(self):
with self.test_session() as sess:
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
accidental_hits = candidate_sampling_ops.compute_accidental_hits(
true_classes, sampled_candidates, self.NUM_TRUE)
indices, ids, weights = sess.run(accidental_hits)
self.assertEqual(1, accidental_hits[0].get_shape().ndims)
self.assertEqual(1, accidental_hits[1].get_shape().ndims)
self.assertEqual(1, accidental_hits[2].get_shape().ndims)
for index, id_, weight in zip(indices, ids, weights):
self.assertTrue(id_ in self.TRUE_LABELS[index])
self.assertLess(weight, -1.0e37)
def testSeed(self):
def draw(seed):
with self.test_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled, _, _ = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True, 5, seed=seed)
return sampled.eval()
# Non-zero seed. Repeatable.
for seed in [1, 12, 123, 1234]:
self.assertAllEqual(draw(seed), draw(seed))
# Seed=0 means random seeds.
num_same = 0
for _ in range(10):
if np.allclose(draw(None), draw(None)):
num_same += 1
# Accounts for the fact that the same random seed may be picked
# twice very rarely.
self.assertLessEqual(num_same, 2)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mrshu/scikit-learn | sklearn/base.py | 1 | 13740 | """Base class for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD Style
import copy
import inspect
import numpy as np
from scipy import sparse
from .metrics import r2_score
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator a"
" it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.iteritems():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
equality_test = new_object_params[name] == params_set[name]
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(params.iteritems())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their __init__ as explicit keyword
arguments (no *args, **kwargs).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
try:
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if not varargs is None:
raise RuntimeError('scikit learn estimators should always '
'specify their parameters in the signature'
' of their init (no varargs).')
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
except TypeError:
# No explicit __init__
args = []
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for the estimator
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key, None)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of the estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return
valid_params = self.get_params(deep=True)
for key, value in params.iteritems():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if not key in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
def __str__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=True),
offset=len(class_name), printer=str,),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn"""
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
z : float
"""
return np.mean(self.predict(X) == y)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn"""
def score(self, X, y):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the
regression sum of squares ((y - y_pred) ** 2).sum() and v is the
residual sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0, lower values are worse.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Returns
-------
z : float
"""
return r2_score(y, self.predict(X))
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn"""
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn"""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn"""
# this is just a tag for the moment
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
"""Returns the final estimator if there is any."""
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin)
| bsd-3-clause |
MetricsGrimoire/sortinghat | tests/test_matcher_github.py | 1 | 18031 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2017 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
import sys
import unittest
if '..' not in sys.path:
sys.path.insert(0, '..')
from sortinghat.db.model import UniqueIdentity, Identity, MatchingBlacklist
from sortinghat.matching.github import GitHubMatcher, GitHubUsernameIdentity
class TestGitHubMatcher(unittest.TestCase):
def test_match(self):
"""Test match method"""
# Let's define some identities first
jsmith = UniqueIdentity(uuid='jsmith')
jsmith.identities = [Identity(name='John Smith', email='jsmith@example.com', source='scm'),
Identity(name='John Smith', source='scm'),
Identity(username='jsmith', source='github'),
Identity(email='', source='scm')]
john_smith = UniqueIdentity(uuid='js')
john_smith.identities = [Identity(name='J. Smith', username='john_smith', source='scm'),
Identity(username='jsmith', source='scm'),
Identity(name='Smith. J', source='mls'),
Identity(name='Smith. J', email='JSmith@example.com', source='mls')]
jsmith_alt = UniqueIdentity(uuid='J. Smith')
jsmith_alt.identities = [Identity(name='J. Smith', username='john_smith', source='alt'),
Identity(name='John Smith', username='jsmith', source='GitHub-API'),
Identity(email='', source='alt'),
Identity(email='jsmith', source='alt')]
jsmith_not_email = UniqueIdentity(uuid='John Smith')
jsmith_not_email.identities = [Identity(email='jsmith', source='mls')]
# Tests
matcher = GitHubMatcher()
# First and third must match
result = matcher.match(jsmith, jsmith_alt)
self.assertEqual(result, True)
result = matcher.match(jsmith_alt, jsmith)
self.assertEqual(result, True)
# Comparing with the second and fourth does not produce any match
result = matcher.match(jsmith, john_smith)
self.assertEqual(result, False)
result = matcher.match(jsmith, jsmith_not_email)
self.assertEqual(result, False)
result = matcher.match(jsmith_alt, john_smith)
self.assertEqual(result, False)
result = matcher.match(jsmith_alt, jsmith_not_email)
self.assertEqual(result, False)
def test_match_with_blacklist(self):
"""Test match when there are entries in the blacklist"""
# Let's define some identities first
jsmith = UniqueIdentity(uuid='jsmith')
jsmith.identities = [Identity(name='John Smith', email='jsmith@example.com', source='scm'),
Identity(name='John Smith', source='scm'),
Identity(username='jsmith', source='github'),
Identity(email='', source='scm')]
john_smith = UniqueIdentity(uuid='js')
john_smith.identities = [Identity(name='J. Smith', username='john_smith', source='scm'),
Identity(username='jsmith', source='GitHub-API'),
Identity(name='Smith. J', source='mls'),
Identity(name='Smith. J', email='JSmith@example.com', source='mls')]
jrae = UniqueIdentity(uuid='jrae')
jrae.identities = [Identity(name='Jane Rae', source='scm', uuid='jrae'),
Identity(username='janerae', email='jane.rae@example.net', source='github', uuid='jrae'),
Identity(name='jrae', source='github', uuid='jrae'),
Identity(email='JRAE@example.net', source='scm', uuid='jrae')]
jane_rae = UniqueIdentity(uuid='Jane Rae')
jane_rae.identities = [Identity(username='janerae', source='github', uuid='Jane Rae'),
Identity(username='jrae', email='jane.rae@example.net', source='github', uuid='Jane Rae')]
# Check matching
matcher = GitHubMatcher()
# First identities must match
result = matcher.match(jsmith, john_smith)
self.assertEqual(result, True)
result = matcher.match(john_smith, jsmith)
self.assertEqual(result, True)
result = matcher.match(jrae, jane_rae)
self.assertEqual(result, True)
result = matcher.match(jane_rae, jrae)
self.assertEqual(result, True)
# Add a blacklist
bl = [MatchingBlacklist(excluded='jsmith'),
MatchingBlacklist(excluded='jrae')]
matcher = GitHubMatcher(blacklist=bl)
result = matcher.match(jsmith, john_smith)
self.assertEqual(result, False)
result = matcher.match(john_smith, jsmith)
self.assertEqual(result, False)
result = matcher.match(jrae, jane_rae)
self.assertEqual(result, True)
result = matcher.match(jane_rae, jrae)
self.assertEqual(result, True)
# In this case, no match will be found
bl = [MatchingBlacklist(excluded='jsmith'),
MatchingBlacklist(excluded='jrae'),
MatchingBlacklist(excluded='janerae')]
matcher = GitHubMatcher(blacklist=bl)
result = matcher.match(jsmith, john_smith)
self.assertEqual(result, False)
result = matcher.match(john_smith, jsmith)
self.assertEqual(result, False)
result = matcher.match(jrae, jane_rae)
self.assertEqual(result, False)
result = matcher.match(jane_rae, jrae)
self.assertEqual(result, False)
def test_match_with_sources_list(self):
"""Test match when a list of sources to filter is given"""
jsmith = UniqueIdentity(uuid='jsmith')
jsmith.identities = [Identity(name='John Smith', email='jsmith@example.com', source='scm', uuid='jsmith'),
Identity(name='John Smith', source='scm', uuid='jsmith'),
Identity(username='jsmith', source='github', uuid='jsmith'),
Identity(email='', source='scm', uuid='jsmith')]
john_smith = UniqueIdentity(uuid='js')
john_smith.identities = [Identity(name='J. Smith', username='john_smith', source='scm'),
Identity(username='jsmith', source='GitHub-API'),
Identity(name='Smith. J', source='mls'),
Identity(name='Smith. J', email='JSmith@example.com', source='mls')]
# With these lists there are not matches
matcher = GitHubMatcher(sources=['scm'])
result = matcher.match(jsmith, john_smith)
self.assertEqual(result, False)
matcher = GitHubMatcher(sources=['github'])
result = matcher.match(jsmith, john_smith)
self.assertEqual(result, False)
# Only when github-api and github are combined there is a match
matcher = GitHubMatcher(sources=['github-api', 'github'])
result = matcher.match(jsmith, john_smith)
self.assertEqual(result, True)
def test_match_same_identity(self):
"""Test whether there is a match comparing the same identity"""
uid = UniqueIdentity(uuid='John Smith')
matcher = GitHubMatcher()
result = matcher.match(uid, uid)
self.assertEqual(result, True)
def test_match_same_uuid(self):
"""Test if there is a match when compares identities with the same UUID"""
uid1 = UniqueIdentity(uuid='John Smith')
uid2 = UniqueIdentity(uuid='John Smith')
matcher = GitHubMatcher()
result = matcher.match(uid1, uid2)
self.assertEqual(result, True)
result = matcher.match(uid2, uid1)
self.assertEqual(result, True)
uid1 = UniqueIdentity(uuid=None)
uid2 = UniqueIdentity(uuid=None)
result = matcher.match(uid1, uid2)
self.assertEqual(result, False)
def test_match_identities_instances(self):
"""Test whether it raises an error when ids are not UniqueIdentities"""
uid = UniqueIdentity(uuid='John Smith')
matcher = GitHubMatcher()
self.assertRaises(ValueError, matcher.match, 'John Smith', uid)
self.assertRaises(ValueError, matcher.match, uid, 'John Smith')
self.assertRaises(ValueError, matcher.match, None, uid)
self.assertRaises(ValueError, matcher.match, uid, None)
self.assertRaises(ValueError, matcher.match, 'John Smith', 'John Doe')
def test_match_filtered_identities(self):
"""Test whether filtered identities match"""
jsmith = GitHubUsernameIdentity('1', None, 'jsmith', 'github')
jsmith_alt = GitHubUsernameIdentity('2', 'jsmith', 'jsmith', 'GitHub-API')
jsmith_uuid = GitHubUsernameIdentity('3', 'jsmith', 'john.smith', 'github')
matcher = GitHubMatcher()
result = matcher.match_filtered_identities(jsmith, jsmith_alt)
self.assertEqual(result, True)
result = matcher.match_filtered_identities(jsmith, jsmith_uuid)
self.assertEqual(result, False)
result = matcher.match_filtered_identities(jsmith_alt, jsmith)
self.assertEqual(result, True)
result = matcher.match_filtered_identities(jsmith_alt, jsmith_uuid)
self.assertEqual(result, True)
result = matcher.match_filtered_identities(jsmith_uuid, jsmith)
self.assertEqual(result, False)
result = matcher.match_filtered_identities(jsmith_uuid, jsmith_alt)
self.assertEqual(result, True)
def test_match_filtered_identities_with_blacklist(self):
"""Test whether filtered identities match when there is a blacklist"""
jsmith = GitHubUsernameIdentity('1', None, 'jsmith', 'github-commits')
jsmith_alt = GitHubUsernameIdentity('2', 'jsmith', 'jsmith', 'github')
jsmith_uuid = GitHubUsernameIdentity('3', 'jsmith', 'jsmith', 'GitHub-API')
john_none = GitHubUsernameIdentity('4', None, 'jsmith', 'github-issues')
bl = [MatchingBlacklist(excluded='jsmith')]
matcher = GitHubMatcher(blacklist=bl)
result = matcher.match_filtered_identities(jsmith, jsmith_alt)
self.assertEqual(result, False)
result = matcher.match_filtered_identities(jsmith, jsmith_uuid)
self.assertEqual(result, False)
result = matcher.match_filtered_identities(jsmith_alt, jsmith)
self.assertEqual(result, False)
# Same UUID
result = matcher.match_filtered_identities(jsmith_alt, jsmith_uuid)
self.assertEqual(result, True)
result = matcher.match_filtered_identities(jsmith_uuid, jsmith)
self.assertEqual(result, False)
# Although the UUID is equal to None, these two does not match
result = matcher.match_filtered_identities(jsmith, john_none)
self.assertEqual(result, False)
def test_match_filtered_identities_instances(self):
"""Test whether it raises an error when ids are not GitHubUsernameIdentity"""
fid = GitHubUsernameIdentity('1', None, 'jsmith', 'github')
matcher = GitHubMatcher()
self.assertRaises(ValueError, matcher.match_filtered_identities, 'John Smith', fid)
self.assertRaises(ValueError, matcher.match_filtered_identities, fid, 'John Smith')
self.assertRaises(ValueError, matcher.match_filtered_identities, None, fid)
self.assertRaises(ValueError, matcher.match_filtered_identities, fid, None)
self.assertRaises(ValueError, matcher.match_filtered_identities, 'John Smith', 'John Doe')
def test_filter_identities(self):
"""Test if identities are filtered"""
# Let's define some identities first
jsmith = UniqueIdentity(uuid='jsmith')
jsmith.identities = [Identity(name='John Smith', email='jsmith@example.com', source='scm', uuid='jsmith'),
Identity(name='John Smith', source='scm', uuid='jsmith'),
Identity(username='jsmith', source='github', uuid='jsmith'),
Identity(email='', source='scm', uuid='jsmith')]
jrae = UniqueIdentity(uuid='jrae')
jrae.identities = [Identity(username='janerae', source='GitHub-API', uuid='jrae'),
Identity(name='Jane Rae Doe', email='jane.rae@example.net', source='mls', uuid='jrae'),
Identity(username='jrae', source='github', uuid='jrae'),
Identity(email='JRAE@example.net', source='scm', uuid='jrae')]
matcher = GitHubMatcher()
result = matcher.filter(jsmith)
self.assertEqual(len(result), 1)
fid = result[0]
self.assertIsInstance(fid, GitHubUsernameIdentity)
self.assertEqual(fid.uuid, 'jsmith')
self.assertEqual(fid.username, 'jsmith')
self.assertEqual(fid.source, 'github')
result = matcher.filter(jrae)
self.assertEqual(len(result), 2)
fid = result[0]
self.assertIsInstance(fid, GitHubUsernameIdentity)
self.assertEqual(fid.uuid, 'jrae')
self.assertEqual(fid.username, 'janerae')
self.assertEqual(fid.source, 'GitHub-API')
fid = result[1]
self.assertIsInstance(fid, GitHubUsernameIdentity)
self.assertEqual(fid.uuid, 'jrae')
self.assertEqual(fid.username, 'jrae')
self.assertEqual(fid.source, 'github')
def test_filter_identities_with_blacklist(self):
"""Test if identities are filtered when there is a blacklist"""
# Let's define some identities first
jsmith = UniqueIdentity(uuid='jsmith')
jsmith.identities = [Identity(name='John Smith', email='jsmith@example.com', source='scm', uuid='jsmith'),
Identity(name='John Smith', source='scm', uuid='jsmith'),
Identity(username='jsmith', source='github', uuid='jsmith'),
Identity(email='', source='scm', uuid='jsmith')]
jrae = UniqueIdentity(uuid='jrae')
jrae.identities = [Identity(username='janerae', source='GitHub-API', uuid='jrae'),
Identity(name='Jane Rae Doe', email='jane.rae@example.net', source='mls', uuid='jrae'),
Identity(username='jrae', source='github', uuid='jrae'),
Identity(email='JRAE@example.net', source='scm', uuid='jrae')]
bl = [MatchingBlacklist(excluded='jrae')]
matcher = GitHubMatcher(blacklist=bl)
result = matcher.filter(jsmith)
self.assertEqual(len(result), 1)
fid = result[0]
self.assertIsInstance(fid, GitHubUsernameIdentity)
self.assertEqual(fid.uuid, 'jsmith')
self.assertEqual(fid.username, 'jsmith')
self.assertEqual(fid.source, 'github')
result = matcher.filter(jrae)
self.assertEqual(len(result), 1)
fid = result[0]
self.assertIsInstance(fid, GitHubUsernameIdentity)
self.assertEqual(fid.uuid, 'jrae')
self.assertEqual(fid.username, 'janerae')
self.assertEqual(fid.source, 'GitHub-API')
def test_filter_identities_with_sources_list(self):
"""Test if identities are filtered when there is a sources list"""
# Let's define some identities first
jsmith = UniqueIdentity(uuid='jsmith')
jsmith.identities = [Identity(name='John Smith', email='jsmith@example.com', source='scm', uuid='jsmith'),
Identity(name='John Smith', source='scm', uuid='jsmith'),
Identity(username='jsmith', source='github', uuid='jsmith'),
Identity(email='', source='scm', uuid='jsmith')]
jrae = UniqueIdentity(uuid='jrae')
jrae.identities = [Identity(username='janerae', source='GitHub-API', uuid='jrae'),
Identity(username='jane_rae', source='GitHub', uuid='jrae'),
Identity(name='Jane Rae Doe', email='jane.rae@example.net', source='mls', uuid='jrae'),
Identity(username='jrae', source='github', uuid='jrae'),
Identity(email='JRAE@example.net', source='scm', uuid='jrae')]
# Tests
matcher = GitHubMatcher(sources=['GitHub-API'])
result = matcher.filter(jsmith)
self.assertEqual(len(result), 0)
result = matcher.filter(jrae)
self.assertEqual(len(result), 1)
fid = result[0]
self.assertIsInstance(fid, GitHubUsernameIdentity)
self.assertEqual(fid.uuid, 'jrae')
self.assertEqual(fid.username, 'janerae')
def test_filter_identities_instances(self):
"""Test whether it raises an error when id is not a UniqueIdentity"""
matcher = GitHubMatcher()
self.assertRaises(ValueError, matcher.filter, 'John Smith')
self.assertRaises(ValueError, matcher.filter, None)
def test_matching_criteria(self):
"""Test whether it returns the matching criteria keys"""
criteria = GitHubMatcher.matching_criteria()
self.assertListEqual(criteria, ['username'])
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
googleapis/googleapis-gen | google/cloud/recommender/v1beta1/recommender-v1beta1-py/google/cloud/recommender_v1beta1/services/recommender/transports/base.py | 1 | 11953 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.recommender_v1beta1.types import insight
from google.cloud.recommender_v1beta1.types import recommendation
from google.cloud.recommender_v1beta1.types import recommender_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-recommender',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class RecommenderTransport(abc.ABC):
"""Abstract transport class for Recommender."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'recommender.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_insights: gapic_v1.method.wrap_method(
self.list_insights,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_insight: gapic_v1.method.wrap_method(
self.get_insight,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.mark_insight_accepted: gapic_v1.method.wrap_method(
self.mark_insight_accepted,
default_timeout=60.0,
client_info=client_info,
),
self.list_recommendations: gapic_v1.method.wrap_method(
self.list_recommendations,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_recommendation: gapic_v1.method.wrap_method(
self.get_recommendation,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.mark_recommendation_claimed: gapic_v1.method.wrap_method(
self.mark_recommendation_claimed,
default_timeout=60.0,
client_info=client_info,
),
self.mark_recommendation_succeeded: gapic_v1.method.wrap_method(
self.mark_recommendation_succeeded,
default_timeout=60.0,
client_info=client_info,
),
self.mark_recommendation_failed: gapic_v1.method.wrap_method(
self.mark_recommendation_failed,
default_timeout=60.0,
client_info=client_info,
),
}
@property
def list_insights(self) -> Callable[
[recommender_service.ListInsightsRequest],
Union[
recommender_service.ListInsightsResponse,
Awaitable[recommender_service.ListInsightsResponse]
]]:
raise NotImplementedError()
@property
def get_insight(self) -> Callable[
[recommender_service.GetInsightRequest],
Union[
insight.Insight,
Awaitable[insight.Insight]
]]:
raise NotImplementedError()
@property
def mark_insight_accepted(self) -> Callable[
[recommender_service.MarkInsightAcceptedRequest],
Union[
insight.Insight,
Awaitable[insight.Insight]
]]:
raise NotImplementedError()
@property
def list_recommendations(self) -> Callable[
[recommender_service.ListRecommendationsRequest],
Union[
recommender_service.ListRecommendationsResponse,
Awaitable[recommender_service.ListRecommendationsResponse]
]]:
raise NotImplementedError()
@property
def get_recommendation(self) -> Callable[
[recommender_service.GetRecommendationRequest],
Union[
recommendation.Recommendation,
Awaitable[recommendation.Recommendation]
]]:
raise NotImplementedError()
@property
def mark_recommendation_claimed(self) -> Callable[
[recommender_service.MarkRecommendationClaimedRequest],
Union[
recommendation.Recommendation,
Awaitable[recommendation.Recommendation]
]]:
raise NotImplementedError()
@property
def mark_recommendation_succeeded(self) -> Callable[
[recommender_service.MarkRecommendationSucceededRequest],
Union[
recommendation.Recommendation,
Awaitable[recommendation.Recommendation]
]]:
raise NotImplementedError()
@property
def mark_recommendation_failed(self) -> Callable[
[recommender_service.MarkRecommendationFailedRequest],
Union[
recommendation.Recommendation,
Awaitable[recommendation.Recommendation]
]]:
raise NotImplementedError()
__all__ = (
'RecommenderTransport',
)
| apache-2.0 |
MSOpenTech/edx-platform | lms/djangoapps/django_comment_client/base/tests.py | 39 | 47093 | import logging
import json
from django.test.client import Client, RequestFactory
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from mock import patch, ANY, Mock
from nose.tools import assert_true, assert_equal # pylint: disable=no-name-in-module
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from lms.lib.comment_client import Thread
from django_comment_client.base import views
from django_comment_client.tests.group_id import CohortedTopicGroupIdTestMixin, NonCohortedTopicGroupIdTestMixin, GroupIdAssertionMixin
from django_comment_client.tests.utils import CohortedTestCase
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_common.models import Role
from django_comment_common.utils import seed_permissions_roles
from student.tests.factories import CourseEnrollmentFactory, UserFactory, CourseAccessRoleFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
log = logging.getLogger(__name__)
CS_PREFIX = "http://localhost:4567/api/v1"
# pylint: disable=missing-docstring
class MockRequestSetupMixin(object):
def _create_response_mock(self, data):
return Mock(text=json.dumps(data), json=Mock(return_value=data))
def _set_mock_request_data(self, mock_request, data):
mock_request.return_value = self._create_response_mock(data)
@patch('lms.lib.comment_client.utils.requests.request')
class CreateThreadGroupIdTestCase(
MockRequestSetupMixin,
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
self._set_mock_request_data(mock_request, {})
mock_request.return_value.status_code = 200
request_data = {"body": "body", "title": "title", "thread_type": "discussion"}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().post("dummy_url", request_data)
request.user = user
request.view_name = "create_thread"
return views.create_thread(
request,
course_id=self.course.id.to_deprecated_string(),
commentable_id=commentable_id
)
def test_group_info_in_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
None
)
self._assert_json_response_contains_group_info(response)
@patch('lms.lib.comment_client.utils.requests.request')
class ThreadActionGroupIdTestCase(
MockRequestSetupMixin,
CohortedTestCase,
GroupIdAssertionMixin
):
def call_view(
self,
view_name,
mock_request,
user=None,
post_params=None,
view_args=None
):
self._set_mock_request_data(
mock_request,
{
"user_id": str(self.student.id),
"group_id": self.student_cohort.id,
"closed": False,
"type": "thread"
}
)
mock_request.return_value.status_code = 200
request = RequestFactory().post("dummy_url", post_params or {})
request.user = user or self.student
request.view_name = view_name
return getattr(views, view_name)(
request,
course_id=self.course.id.to_deprecated_string(),
thread_id="dummy",
**(view_args or {})
)
def test_update(self, mock_request):
response = self.call_view(
"update_thread",
mock_request,
post_params={"body": "body", "title": "title"}
)
self._assert_json_response_contains_group_info(response)
def test_delete(self, mock_request):
response = self.call_view("delete_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_vote(self, mock_request):
response = self.call_view(
"vote_for_thread",
mock_request,
view_args={"value": "up"}
)
self._assert_json_response_contains_group_info(response)
response = self.call_view("undo_vote_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_flag(self, mock_request):
response = self.call_view("flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
response = self.call_view("un_flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_pin(self, mock_request):
response = self.call_view(
"pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
response = self.call_view(
"un_pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
def test_openclose(self, mock_request):
response = self.call_view(
"openclose_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(
response,
lambda d: d['content']
)
@patch('lms.lib.comment_client.utils.requests.request')
class ViewsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp(create_user=False)
# create a course
self.course = CourseFactory.create(
org='MITx', course='999',
discussion_topics={"Some Topic": {"id": "some_topic"}},
display_name='Robot Super Course',
)
self.course_id = self.course.id
# seed the forums permissions and roles
call_command('seed_permissions_roles', self.course_id.to_deprecated_string())
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, password)
self.student.is_active = True
self.student.save()
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
self.client = Client()
assert_true(self.client.login(username='student', password='test'))
def test_create_thread(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"thread_type": "discussion",
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
thread = {
"thread_type": "discussion",
"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"],
}
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url, data=thread)
assert_true(mock_request.called)
mock_request.assert_called_with(
'post',
'{prefix}/i4x-MITx-999-course-Robot_Super_Course/threads'.format(prefix=CS_PREFIX),
data={
'thread_type': 'discussion',
'body': u'this is a post',
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False,
'course_id': u'MITx/999/Robot_Super_Course',
},
params={'request_id': ANY},
headers=ANY,
timeout=5
)
assert_equal(response.status_code, 200)
def test_delete_comment(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_comment_id = "test_comment_id"
request = RequestFactory().post("dummy_url", {"id": test_comment_id})
request.user = self.student
request.view_name = "delete_comment"
response = views.delete_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id=test_comment_id)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
args = mock_request.call_args[0]
self.assertEqual(args[0], "delete")
self.assertTrue(args[1].endswith("/{}".format(test_comment_id)))
def _setup_mock_request(self, mock_request, include_depth=False):
"""
Ensure that mock_request returns the data necessary to make views
function correctly
"""
mock_request.return_value.status_code = 200
data = {
"user_id": str(self.student.id),
"closed": False,
}
if include_depth:
data["depth"] = 0
self._set_mock_request_data(mock_request, data)
def _test_request_error(self, view_name, view_kwargs, data, mock_request):
"""
Submit a request against the given view with the given data and ensure
that the result is a 400 error and that no data was posted using
mock_request
"""
self._setup_mock_request(mock_request, include_depth=(view_name == "create_sub_comment"))
response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)
self.assertEqual(response.status_code, 400)
for call in mock_request.call_args_list:
self.assertEqual(call[0][0].lower(), "get")
def test_create_thread_no_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_create_thread_empty_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_create_thread_no_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_create_thread_empty_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_no_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_update_thread_empty_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_update_thread_no_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_update_thread_empty_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_course_topic(self, mock_request):
self._setup_mock_request(mock_request)
response = self.client.post(
reverse("update_thread", kwargs={"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()}),
data={"body": "foo", "title": "foo", "commentable_id": "some_topic"}
)
self.assertEqual(response.status_code, 200)
@patch('django_comment_client.base.views.get_discussion_categories_ids', return_value=["test_commentable"])
def test_update_thread_wrong_commentable_id(self, mock_get_discussion_id_map, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": "foo", "commentable_id": "wrong_commentable"},
mock_request
)
def test_create_comment_no_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_comment_empty_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_create_sub_comment_no_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_sub_comment_empty_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_no_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_update_comment_empty_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_basic(self, mock_request):
self._setup_mock_request(mock_request)
comment_id = "test_comment_id"
updated_body = "updated body"
response = self.client.post(
reverse(
"update_comment",
kwargs={"course_id": self.course_id.to_deprecated_string(), "comment_id": comment_id}
),
data={"body": updated_body}
)
self.assertEqual(response.status_code, 200)
mock_request.assert_called_with(
"put",
"{prefix}/comments/{comment_id}".format(prefix=CS_PREFIX, comment_id=comment_id),
headers=ANY,
params=ANY,
timeout=ANY,
data={"body": updated_body}
)
def test_flag_thread_open(self, mock_request):
self.flag_thread(mock_request, False)
def test_flag_thread_close(self, mock_request):
self.flag_thread(mock_request, True)
def flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1", "username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
url = reverse('flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_thread_open(self, mock_request):
self.un_flag_thread(mock_request, False)
def test_un_flag_thread_close(self, mock_request):
self.un_flag_thread(mock_request, True)
def un_flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0
})
url = reverse('un_flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_flag_comment_open(self, mock_request):
self.flag_comment(mock_request, False)
def test_flag_comment_close(self, mock_request):
self.flag_comment(mock_request, True)
def flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "comment",
"endorsed": False
})
url = reverse('flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_comment_open(self, mock_request):
self.un_flag_comment(mock_request, False)
def test_un_flag_comment_close(self, mock_request):
self.un_flag_comment(mock_request, True)
def un_flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "comment",
"endorsed": False
})
url = reverse('un_flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
@patch("lms.lib.comment_client.utils.requests.request")
class ViewPermissionsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewPermissionsTestCase, self).setUp()
self.password = "test password"
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create(password=self.password)
self.moderator = UserFactory.create(password=self.password)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
def test_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_un_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_un_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def _set_mock_request_thread_and_comment(self, mock_request, thread_data, comment_data):
def handle_request(*args, **kwargs):
url = args[1]
if "/threads/" in url:
return self._create_response_mock(thread_data)
elif "/comments/" in url:
return self._create_response_mock(comment_data)
else:
raise ArgumentError("Bad url to mock request")
mock_request.side_effect = handle_request
def test_endorse_response_as_staff(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_endorse_response_as_student(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.moderator.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_endorse_response_as_student_question_author(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
class CreateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
super(CreateThreadUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request,):
"""
Test to make sure unicode data in a thread doesn't break it.
"""
self._set_mock_request_data(mock_request, {})
request = RequestFactory().post("dummy_url", {"thread_type": "discussion", "body": text, "title": text})
request.user = self.student
request.view_name = "create_thread"
response = views.create_thread(request, course_id=self.course.id.to_deprecated_string(), commentable_id="test_commentable")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
class UpdateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
super(UpdateThreadUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('django_comment_client.base.views.get_discussion_categories_ids', return_value=["test_commentable"])
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request, mock_get_discussion_id_map):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text, "title": text, "thread_type": "question", "commentable_id": "test_commentable"})
request.user = self.student
request.view_name = "update_thread"
response = views.update_thread(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
self.assertEqual(mock_request.call_args[1]["data"]["thread_type"], "question")
self.assertEqual(mock_request.call_args[1]["data"]["commentable_id"], "test_commentable")
class CreateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
super(CreateCommentUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
})
# We have to get clever here due to Thread's setters and getters.
# Patch won't work with it.
try:
Thread.commentable_id = Mock()
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_comment"
response = views.create_comment(
request, course_id=unicode(self.course.id), thread_id="dummy_thread_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
finally:
del Thread.commentable_id
class UpdateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
super(UpdateCommentUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "update_comment"
response = views.update_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
class CreateSubCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
"""
Make sure comments under a response can handle unicode.
"""
def setUp(self):
super(CreateSubCommentUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
"""
Create a comment with unicode in it.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
"thread_id": "test_thread"
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_sub_comment"
Thread.commentable_id = Mock()
try:
response = views.create_sub_comment(
request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
finally:
del Thread.commentable_id
class ForumEventTestCase(ModuleStoreTestCase, MockRequestSetupMixin):
"""
Forum actions are expected to launch analytics events. Test these here.
"""
def setUp(self):
super(ForumEventTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.student.roles.add(Role.objects.get(name="Student", course_id=self.course.id))
CourseAccessRoleFactory(course_id=self.course.id, user=self.student, role='Wizard')
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request')
def test_thread_event(self, __, mock_emit):
request = RequestFactory().post(
"dummy_url", {
"thread_type": "discussion",
"body": "Test text",
"title": "Test",
"auto_subscribe": True
}
)
request.user = self.student
request.view_name = "create_thread"
views.create_thread(request, course_id=self.course.id.to_deprecated_string(), commentable_id="test_commentable")
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.thread.created')
self.assertEqual(event['body'], 'Test text')
self.assertEqual(event['title'], 'Test')
self.assertEqual(event['commentable_id'], 'test_commentable')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['options']['followed'], True)
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['anonymous'], False)
self.assertEqual(event['group_id'], None)
self.assertEqual(event['thread_type'], 'discussion')
self.assertEquals(event['anonymous_to_peers'], False)
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request')
def test_response_event(self, mock_request, mock_emit):
"""
Check to make sure an event is fired when a user responds to a thread.
"""
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"closed": False,
"commentable_id": 'test_commentable_id',
'thread_id': 'test_thread_id',
})
request = RequestFactory().post("dummy_url", {"body": "Test comment", 'auto_subscribe': True})
request.user = self.student
request.view_name = "create_comment"
views.create_comment(request, course_id=self.course.id.to_deprecated_string(), thread_id='test_thread_id')
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.response.created')
self.assertEqual(event['body'], "Test comment")
self.assertEqual(event['commentable_id'], 'test_commentable_id')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['discussion']['id'], 'test_thread_id')
self.assertEqual(event['options']['followed'], True)
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request')
def test_comment_event(self, mock_request, mock_emit):
"""
Ensure an event is fired when someone comments on a response.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
"thread_id": "test_thread_id",
"commentable_id": "test_commentable_id",
"parent_id": "test_response_id"
})
request = RequestFactory().post("dummy_url", {"body": "Another comment"})
request.user = self.student
request.view_name = "create_sub_comment"
views.create_sub_comment(
request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id"
)
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.comment.created")
self.assertEqual(event['body'], 'Another comment')
self.assertEqual(event['discussion']['id'], 'test_thread_id')
self.assertEqual(event['response']['id'], 'test_response_id')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['options']['followed'], False)
class UsersEndpointTestCase(ModuleStoreTestCase, MockRequestSetupMixin):
def set_post_counts(self, mock_request, threads_count=1, comments_count=1):
"""
sets up a mock response from the comments service for getting post counts for our other_user
"""
self._set_mock_request_data(mock_request, {
"threads_count": threads_count,
"comments_count": comments_count,
})
def setUp(self):
super(UsersEndpointTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
self.enrollment = CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.other_user = UserFactory.create(username="other")
CourseEnrollmentFactory(user=self.other_user, course_id=self.course.id)
def make_request(self, method='get', course_id=None, **kwargs):
course_id = course_id or self.course.id
request = getattr(RequestFactory(), method)("dummy_url", kwargs)
request.user = self.student
request.view_name = "users"
return views.users(request, course_id=course_id.to_deprecated_string())
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_exact_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)["users"],
[{"id": self.other_user.id, "username": self.other_user.username}]
)
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_no_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="othor")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
def test_requires_GET(self):
response = self.make_request(method='post', username="other")
self.assertEqual(response.status_code, 405)
def test_requires_username_param(self):
response = self.make_request()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_course_does_not_exist(self):
course_id = SlashSeparatedCourseKey.from_deprecated_string("does/not/exist")
response = self.make_request(course_id=course_id, username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_requires_requestor_enrolled_in_course(self):
# unenroll self.student from the course.
self.enrollment.delete()
response = self.make_request(username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
@patch('lms.lib.comment_client.utils.requests.request')
def test_requires_matched_user_has_forum_content(self, mock_request):
self.set_post_counts(mock_request, 0, 0)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
| agpl-3.0 |
andrewcmyers/tensorflow | tensorflow/python/util/protobuf/compare_test.py | 165 | 19171 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python.util.protobuf.compare."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import textwrap
import six
from google.protobuf import text_format
from tensorflow.python.platform import googletest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.protobuf import compare_test_pb2
def LargePbs(*args):
"""Converts ASCII string Large PBs to messages."""
pbs = []
for arg in args:
pb = compare_test_pb2.Large()
text_format.Merge(arg, pb)
pbs.append(pb)
return pbs
class ProtoEqTest(googletest.TestCase):
def assertNotEquals(self, a, b):
"""Asserts that ProtoEq says a != b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEquals(self, compare.ProtoEq(a, b), False)
def assertEquals(self, a, b):
"""Asserts that ProtoEq says a == b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEquals(self, compare.ProtoEq(a, b), True)
def testPrimitives(self):
googletest.TestCase.assertEqual(self, True, compare.ProtoEq('a', 'a'))
googletest.TestCase.assertEqual(self, False, compare.ProtoEq('b', 'a'))
def testEmpty(self):
self.assertEquals('', '')
def testPrimitiveFields(self):
self.assertNotEquals('string_: "a"', '')
self.assertEquals('string_: "a"', 'string_: "a"')
self.assertNotEquals('string_: "b"', 'string_: "a"')
self.assertNotEquals('string_: "ab"', 'string_: "aa"')
self.assertNotEquals('int64_: 0', '')
self.assertEquals('int64_: 0', 'int64_: 0')
self.assertNotEquals('int64_: -1', '')
self.assertNotEquals('int64_: 1', 'int64_: 0')
self.assertNotEquals('int64_: 0', 'int64_: -1')
self.assertNotEquals('float_: 0.0', '')
self.assertEquals('float_: 0.0', 'float_: 0.0')
self.assertNotEquals('float_: -0.1', '')
self.assertNotEquals('float_: 3.14', 'float_: 0')
self.assertNotEquals('float_: 0', 'float_: -0.1')
self.assertEquals('float_: -0.1', 'float_: -0.1')
self.assertNotEquals('bool_: true', '')
self.assertNotEquals('bool_: false', '')
self.assertNotEquals('bool_: true', 'bool_: false')
self.assertEquals('bool_: false', 'bool_: false')
self.assertEquals('bool_: true', 'bool_: true')
self.assertNotEquals('enum_: A', '')
self.assertNotEquals('enum_: B', 'enum_: A')
self.assertNotEquals('enum_: C', 'enum_: B')
self.assertEquals('enum_: C', 'enum_: C')
def testRepeatedPrimitives(self):
self.assertNotEquals('int64s: 0', '')
self.assertEquals('int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 1', 'int64s: 0')
self.assertNotEquals('int64s: 0 int64s: 0', '')
self.assertNotEquals('int64s: 0 int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 0 int64s: 1', 'int64s: 0')
self.assertNotEquals('int64s: 1', 'int64s: 0 int64s: 2')
self.assertNotEquals('int64s: 2 int64s: 0', 'int64s: 1')
self.assertEquals('int64s: 0 int64s: 0', 'int64s: 0 int64s: 0')
self.assertEquals('int64s: 0 int64s: 1', 'int64s: 0 int64s: 1')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 1')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 2')
self.assertNotEquals('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0 int64s: 2')
def testMessage(self):
self.assertNotEquals('small <>', '')
self.assertEquals('small <>', 'small <>')
self.assertNotEquals('small < strings: "a" >', '')
self.assertNotEquals('small < strings: "a" >', 'small <>')
self.assertEquals('small < strings: "a" >', 'small < strings: "a" >')
self.assertNotEquals('small < strings: "b" >', 'small < strings: "a" >')
self.assertNotEquals('small < strings: "a" strings: "b" >',
'small < strings: "a" >')
self.assertNotEquals('string_: "a"', 'small <>')
self.assertNotEquals('string_: "a"', 'small < strings: "b" >')
self.assertNotEquals('string_: "a"', 'small < strings: "b" strings: "c" >')
self.assertNotEquals('string_: "a" small <>', 'small <>')
self.assertNotEquals('string_: "a" small <>', 'small < strings: "b" >')
self.assertEquals('string_: "a" small <>', 'string_: "a" small <>')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'string_: "a" small <>')
self.assertEquals('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >', 'int64_: 1')
self.assertNotEquals('string_: "a"', 'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" int64_: 0 small < strings: "a" >',
'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" int64_: 1 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
self.assertEquals('string_: "a" int64_: 0 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
def testNestedMessage(self):
self.assertNotEquals('medium <>', '')
self.assertEquals('medium <>', 'medium <>')
self.assertNotEquals('medium < smalls <> >', 'medium <>')
self.assertEquals('medium < smalls <> >', 'medium < smalls <> >')
self.assertNotEquals('medium < smalls <> smalls <> >',
'medium < smalls <> >')
self.assertEquals('medium < smalls <> smalls <> >',
'medium < smalls <> smalls <> >')
self.assertNotEquals('medium < int32s: 0 >', 'medium < smalls <> >')
self.assertNotEquals('medium < smalls < strings: "a"> >',
'medium < smalls <> >')
def testTagOrder(self):
"""Tests that different fields are ordered by tag number.
For reference, here are the relevant tag numbers from compare_test.proto:
optional string string_ = 1;
optional int64 int64_ = 2;
optional float float_ = 3;
optional Small small = 8;
optional Medium medium = 7;
optional Small small = 8;
"""
self.assertNotEquals('string_: "a" ',
' int64_: 1 ')
self.assertNotEquals('string_: "a" int64_: 2 ',
' int64_: 1 ')
self.assertNotEquals('string_: "b" int64_: 1 ',
'string_: "a" int64_: 2 ')
self.assertEquals('string_: "a" int64_: 1 ',
'string_: "a" int64_: 1 ')
self.assertNotEquals('string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 ')
self.assertEquals('string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.0')
self.assertNotEquals('string_: "a" int64_: 1 float_: 0.1',
'string_: "a" int64_: 1 float_: 0.0')
self.assertNotEquals('string_: "a" int64_: 2 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.1')
self.assertNotEquals('string_: "a" ',
' int64_: 1 float_: 0.1')
self.assertNotEquals('string_: "a" float_: 0.0',
' int64_: 1 ')
self.assertNotEquals('string_: "b" float_: 0.0',
'string_: "a" int64_: 1 ')
self.assertNotEquals('string_: "a"', 'small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'small < strings: "b" >')
self.assertNotEquals('string_: "a" small < strings: "b" >',
'string_: "a" small < strings: "a" >')
self.assertEquals('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" medium <>',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" medium < smalls <> >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('medium <>', 'small < strings: "a" >')
self.assertNotEquals('medium <> small <>', 'small < strings: "a" >')
self.assertNotEquals('medium < smalls <> >', 'small < strings: "a" >')
self.assertNotEquals('medium < smalls < strings: "a" > >',
'small < strings: "b" >')
class NormalizeNumbersTest(googletest.TestCase):
"""Tests for NormalizeNumberFields()."""
def testNormalizesInts(self):
pb = compare_test_pb2.Large()
pb.int64_ = 4
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
pb.int64_ = 4
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
pb.int64_ = 9999999999999999
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
def testNormalizesRepeatedInts(self):
pb = compare_test_pb2.Large()
pb.int64s.extend([1, 400, 999999999999999])
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64s[0], six.integer_types))
self.assertTrue(isinstance(pb.int64s[1], six.integer_types))
self.assertTrue(isinstance(pb.int64s[2], six.integer_types))
def testNormalizesFloats(self):
pb1 = compare_test_pb2.Large()
pb1.float_ = 1.2314352351231
pb2 = compare_test_pb2.Large()
pb2.float_ = 1.231435
self.assertNotEqual(pb1.float_, pb2.float_)
compare.NormalizeNumberFields(pb1)
compare.NormalizeNumberFields(pb2)
self.assertEqual(pb1.float_, pb2.float_)
def testNormalizesRepeatedFloats(self):
pb = compare_test_pb2.Large()
pb.medium.floats.extend([0.111111111, 0.111111])
compare.NormalizeNumberFields(pb)
for value in pb.medium.floats:
self.assertAlmostEqual(0.111111, value)
def testNormalizesDoubles(self):
pb1 = compare_test_pb2.Large()
pb1.double_ = 1.2314352351231
pb2 = compare_test_pb2.Large()
pb2.double_ = 1.2314352
self.assertNotEqual(pb1.double_, pb2.double_)
compare.NormalizeNumberFields(pb1)
compare.NormalizeNumberFields(pb2)
self.assertEqual(pb1.double_, pb2.double_)
def testNormalizesMaps(self):
pb = compare_test_pb2.WithMap()
pb.value_message[4].strings.extend(['a', 'b', 'c'])
pb.value_string['d'] = 'e'
compare.NormalizeNumberFields(pb)
class AssertTest(googletest.TestCase):
"""Tests assertProtoEqual()."""
def assertProtoEqual(self, a, b, **kwargs):
if isinstance(a, six.string_types) and isinstance(b, six.string_types):
a, b = LargePbs(a, b)
compare.assertProtoEqual(self, a, b, **kwargs)
def assertAll(self, a, **kwargs):
"""Checks that all possible asserts pass."""
self.assertProtoEqual(a, a, **kwargs)
def assertSameNotEqual(self, a, b):
"""Checks that assertProtoEqual() fails."""
self.assertRaises(AssertionError, self.assertProtoEqual, a, b)
def assertNone(self, a, b, message, **kwargs):
"""Checks that all possible asserts fail with the given message."""
message = re.escape(textwrap.dedent(message))
self.assertRaisesRegexp(AssertionError, message, self.assertProtoEqual, a,
b, **kwargs)
def testCheckInitialized(self):
# neither is initialized
a = compare_test_pb2.Labeled()
a.optional = 1
self.assertNone(a, a, 'Initialization errors: ', check_initialized=True)
self.assertAll(a, check_initialized=False)
# a is initialized, b isn't
b = copy.deepcopy(a)
a.required = 2
self.assertNone(a, b, 'Initialization errors: ', check_initialized=True)
self.assertNone(
a,
b,
"""
- required: 2
optional: 1
""",
check_initialized=False)
# both are initialized
a = compare_test_pb2.Labeled()
a.required = 2
self.assertAll(a, check_initialized=True)
self.assertAll(a, check_initialized=False)
b = copy.deepcopy(a)
b.required = 3
message = """
- required: 2
? ^
+ required: 3
? ^
"""
self.assertNone(a, b, message, check_initialized=True)
self.assertNone(a, b, message, check_initialized=False)
def testAssertEqualWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
compare.assertProtoEqual(self, """
string_: 'abc'
float_: 1.234
""", pb)
def testNormalizesNumbers(self):
pb1 = compare_test_pb2.Large()
pb1.int64_ = 4
pb2 = compare_test_pb2.Large()
pb2.int64_ = 4
compare.assertProtoEqual(self, pb1, pb2)
def testNormalizesFloat(self):
pb1 = compare_test_pb2.Large()
pb1.double_ = 4.0
pb2 = compare_test_pb2.Large()
pb2.double_ = 4
compare.assertProtoEqual(self, pb1, pb2, normalize_numbers=True)
def testPrimitives(self):
self.assertAll('string_: "x"')
self.assertNone('string_: "x"', 'string_: "y"', """
- string_: "x"
? ^
+ string_: "y"
? ^
""")
def testRepeatedPrimitives(self):
self.assertAll('int64s: 0 int64s: 1')
self.assertSameNotEqual('int64s: 0 int64s: 1', 'int64s: 1 int64s: 0')
self.assertSameNotEqual('int64s: 0 int64s: 1 int64s: 2',
'int64s: 2 int64s: 1 int64s: 0')
self.assertSameNotEqual('int64s: 0', 'int64s: 0 int64s: 0')
self.assertSameNotEqual('int64s: 0 int64s: 1',
'int64s: 1 int64s: 0 int64s: 1')
self.assertNone('int64s: 0', 'int64s: 0 int64s: 2', """
int64s: 0
+ int64s: 2
""")
self.assertNone('int64s: 0 int64s: 1', 'int64s: 0 int64s: 2', """
int64s: 0
- int64s: 1
? ^
+ int64s: 2
? ^
""")
def testMessage(self):
self.assertAll('medium: {}')
self.assertAll('medium: { smalls: {} }')
self.assertAll('medium: { int32s: 1 smalls: {} }')
self.assertAll('medium: { smalls: { strings: "x" } }')
self.assertAll(
'medium: { smalls: { strings: "x" } } small: { strings: "y" }')
self.assertSameNotEqual('medium: { smalls: { strings: "x" strings: "y" } }',
'medium: { smalls: { strings: "y" strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } smalls: { strings: "y" } }',
'medium: { smalls: { strings: "y" } smalls: { strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" strings: "y" strings: "x" } }',
'medium: { smalls: { strings: "y" strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } int32s: 0 }',
'medium: { int32s: 0 smalls: { strings: "x" } int32s: 0 }')
self.assertNone('medium: {}', 'medium: { smalls: { strings: "x" } }', """
medium {
+ smalls {
+ strings: "x"
+ }
}
""")
self.assertNone('medium: { smalls: { strings: "x" } }',
'medium: { smalls: {} }', """
medium {
smalls {
- strings: "x"
}
}
""")
self.assertNone('medium: { int32s: 0 }', 'medium: { int32s: 1 }', """
medium {
- int32s: 0
? ^
+ int32s: 1
? ^
}
""")
def testMsgPassdown(self):
self.assertRaisesRegexp(
AssertionError,
'test message passed down',
self.assertProtoEqual,
'medium: {}',
'medium: { smalls: { strings: "x" } }',
msg='test message passed down')
def testRepeatedMessage(self):
self.assertAll('medium: { smalls: {} smalls: {} }')
self.assertAll('medium: { smalls: { strings: "x" } } medium: {}')
self.assertAll('medium: { smalls: { strings: "x" } } medium: { int32s: 0 }')
self.assertAll('medium: { smalls: {} smalls: { strings: "x" } } small: {}')
self.assertSameNotEqual('medium: { smalls: { strings: "x" } smalls: {} }',
'medium: { smalls: {} smalls: { strings: "x" } }')
self.assertSameNotEqual('medium: { smalls: {} }',
'medium: { smalls: {} smalls: {} }')
self.assertSameNotEqual('medium: { smalls: {} smalls: {} } medium: {}',
'medium: {} medium: {} medium: { smalls: {} }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } smalls: {} }',
'medium: { smalls: {} smalls: { strings: "x" } smalls: {} }')
self.assertNone('medium: {}', 'medium: {} medium { smalls: {} }', """
medium {
+ smalls {
+ }
}
""")
self.assertNone('medium: { smalls: {} smalls: { strings: "x" } }',
'medium: { smalls: {} smalls: { strings: "y" } }', """
medium {
smalls {
}
smalls {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
""")
class MixinTests(compare.ProtoAssertions, googletest.TestCase):
def testAssertEqualWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
self.assertProtoEqual("""
string_: 'abc'
float_: 1.234
""", pb)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
nvoron23/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 47 | 8566 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone
random_state = np.random.mtrand.RandomState(0)
def test_initialize_nn_output():
# Test that initialization does not return negative values
data = np.abs(random_state.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
@ignore_warnings
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(init=name).fit, A)
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(sparseness=name).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, nmf.NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = nmf.NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@ignore_warnings
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('pg', 'cd'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
@ignore_warnings
def test_nmf_fit_close():
# Test that the fit is not too far away
for solver in ('pg', 'cd'):
pnmf = nmf.NMF(5, solver=solver, init='nndsvd', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
@ignore_warnings
def test_nmf_transform():
# Test that NMF.transform returns close values
A = np.abs(random_state.randn(6, 5))
for solver in ('pg', 'cd'):
m = nmf.NMF(solver=solver, n_components=4, init='nndsvd',
random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
@ignore_warnings
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
@ignore_warnings
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
tol = 1e-2
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0,
tol=tol).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0,
tol=tol).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0,
tol=tol).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
@ignore_warnings
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('pg', 'cd'):
est1 = nmf.NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
@ignore_warnings
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
A = np.abs(random_state.randn(3, 2))
A[A > 1.0] = 0
A = csc_matrix(A)
for solver in ('pg', 'cd'):
model = nmf.NMF(solver=solver, random_state=0, tol=1e-4,
n_components=2)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@ignore_warnings
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('pg', 'cd'):
W_nmf, H, _ = nmf.non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = nmf.non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = nmf.NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
@ignore_warnings
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = nmf.non_negative_factorization
msg = "Number of components must be positive; got (n_components='2')"
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
def test_safe_compute_error():
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0)
error = nmf._safe_compute_error(A, W, H)
error_sparse = nmf._safe_compute_error(A_sparse, W, H)
assert_almost_equal(error, error_sparse)
| bsd-3-clause |
pjg101/SickRage | lib/guessit/rules/properties/screen_size.py | 11 | 4294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
screen_size property
"""
from rebulk.remodule import re
from rebulk import Rebulk, Rule, RemoveMatch
from ..common.validators import seps_surround
from ..common import dash, seps
def screen_size():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
def conflict_solver(match, other):
"""
Conflict solver for most screen_size.
"""
if other.name == 'screen_size':
if 'resolution' in other.tags:
# The chtouile to solve conflict in "720 x 432" string matching both 720p pattern
int_value = _digits_re.findall(match.raw)[-1]
if other.value.startswith(int_value):
return match
return other
return '__default__'
rebulk = Rebulk().string_defaults(ignore_case=True).regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(name="screen_size", validator=seps_surround, conflict_solver=conflict_solver)
rebulk.regex(r"(?:\d{3,}(?:x|\*))?360(?:i|p?x?)", value="360p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?368(?:i|p?x?)", value="368p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?480(?:i|p?x?)", value="480p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?576(?:i|p?x?)", value="576p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?720(?:i|p?(?:50|60)?x?)", value="720p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?720(?:p(?:50|60)?x?)", value="720p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?720p?hd", value="720p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?900(?:i|p?x?)", value="900p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080i", value="1080i")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080p?x?", value="1080p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080(?:p(?:50|60)?x?)", value="1080p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080p?hd", value="1080p")
rebulk.regex(r"(?:\d{3,}(?:x|\*))?2160(?:i|p?x?)", value="4K")
rebulk.string('4k', value='4K')
_digits_re = re.compile(r'\d+')
rebulk.defaults(name="screen_size", validator=seps_surround)
rebulk.regex(r'\d{3,}-?(?:x|\*)-?\d{3,}',
formatter=lambda value: 'x'.join(_digits_re.findall(value)),
abbreviations=[dash],
tags=['resolution'],
conflict_solver=lambda match, other: '__default__' if other.name == 'screen_size' else other)
rebulk.rules(ScreenSizeOnlyOne, RemoveScreenSizeConflicts)
return rebulk
class ScreenSizeOnlyOne(Rule):
"""
Keep a single screen_size pet filepath part.
"""
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
screensize = list(reversed(matches.range(filepart.start, filepart.end,
lambda match: match.name == 'screen_size')))
if len(screensize) > 1:
to_remove.extend(screensize[1:])
return to_remove
class RemoveScreenSizeConflicts(Rule):
"""
Remove season and episode matches which conflicts with screen_size match.
"""
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
screensize = matches.range(filepart.start, filepart.end, lambda match: match.name == 'screen_size', 0)
if not screensize:
continue
conflicts = matches.conflicting(screensize, lambda match: match.name in ('season', 'episode'))
if not conflicts:
continue
video_profile = matches.range(screensize.end, filepart.end, lambda match: match.name == 'video_profile', 0)
if video_profile and not matches.holes(screensize.end, video_profile.start,
predicate=lambda h: h.value and h.value.strip(seps)):
to_remove.extend(conflicts)
date = matches.previous(screensize, lambda match: match.name == 'date', 0)
if date and not matches.holes(date.end, screensize.start,
predicate=lambda h: h.value and h.value.strip(seps)):
to_remove.extend(conflicts)
return to_remove
| gpl-3.0 |
MungoRae/home-assistant | tests/components/test_updater.py | 6 | 6734 | """The tests for the Updater component."""
import asyncio
from datetime import timedelta
from unittest.mock import patch, Mock
from freezegun import freeze_time
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components import updater
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, mock_coro
NEW_VERSION = '10000.0'
MOCK_VERSION = '10.0'
MOCK_DEV_VERSION = '10.0.dev0'
MOCK_HUUID = 'abcdefg'
MOCK_RESPONSE = {
'version': '0.15',
'release-notes': 'https://home-assistant.io'
}
MOCK_CONFIG = {updater.DOMAIN: {
'reporting': True
}}
@pytest.fixture
def mock_get_newest_version():
"""Fixture to mock get_newest_version."""
with patch('homeassistant.components.updater.get_newest_version') as mock:
yield mock
@pytest.fixture
def mock_get_uuid():
"""Fixture to mock get_uuid."""
with patch('homeassistant.components.updater._load_uuid') as mock:
yield mock
@asyncio.coroutine
@freeze_time("Mar 15th, 2017")
def test_new_version_shows_entity_after_hour(
hass, mock_get_uuid, mock_get_newest_version):
"""Test if new entity is created if new version is available."""
mock_get_uuid.return_value = MOCK_HUUID
mock_get_newest_version.return_value = mock_coro((NEW_VERSION, ''))
res = yield from async_setup_component(
hass, updater.DOMAIN, {updater.DOMAIN: {}})
assert res, 'Updater failed to setup'
with patch('homeassistant.components.updater.current_version',
MOCK_VERSION):
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(hours=1))
yield from hass.async_block_till_done()
assert hass.states.is_state(updater.ENTITY_ID, NEW_VERSION)
@asyncio.coroutine
@freeze_time("Mar 15th, 2017")
def test_same_version_not_show_entity(
hass, mock_get_uuid, mock_get_newest_version):
"""Test if new entity is created if new version is available."""
mock_get_uuid.return_value = MOCK_HUUID
mock_get_newest_version.return_value = mock_coro((MOCK_VERSION, ''))
res = yield from async_setup_component(
hass, updater.DOMAIN, {updater.DOMAIN: {}})
assert res, 'Updater failed to setup'
with patch('homeassistant.components.updater.current_version',
MOCK_VERSION):
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(hours=1))
yield from hass.async_block_till_done()
assert hass.states.get(updater.ENTITY_ID) is None
@asyncio.coroutine
@freeze_time("Mar 15th, 2017")
def test_disable_reporting(hass, mock_get_uuid, mock_get_newest_version):
"""Test if new entity is created if new version is available."""
mock_get_uuid.return_value = MOCK_HUUID
mock_get_newest_version.return_value = mock_coro((MOCK_VERSION, ''))
res = yield from async_setup_component(
hass, updater.DOMAIN, {updater.DOMAIN: {
'reporting': False
}})
assert res, 'Updater failed to setup'
with patch('homeassistant.components.updater.current_version',
MOCK_VERSION):
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(hours=1))
yield from hass.async_block_till_done()
assert hass.states.get(updater.ENTITY_ID) is None
res = yield from updater.get_newest_version(hass, MOCK_HUUID, MOCK_CONFIG)
call = mock_get_newest_version.mock_calls[0][1]
assert call[0] is hass
assert call[1] is None
@asyncio.coroutine
def test_enabled_component_info(hass, mock_get_uuid):
"""Test if new entity is created if new version is available."""
with patch('homeassistant.components.updater.platform.system',
Mock(return_value="junk")):
res = yield from updater.get_system_info(hass, True)
assert 'components' in res, 'Updater failed to generate component list'
@asyncio.coroutine
def test_disable_component_info(hass, mock_get_uuid):
"""Test if new entity is created if new version is available."""
with patch('homeassistant.components.updater.platform.system',
Mock(return_value="junk")):
res = yield from updater.get_system_info(hass, False)
assert 'components' not in res, 'Updater failed, components generate'
@asyncio.coroutine
def test_get_newest_version_no_analytics_when_no_huuid(hass, aioclient_mock):
"""Test we do not gather analytics when no huuid is passed in."""
aioclient_mock.post(updater.UPDATER_URL, json=MOCK_RESPONSE)
with patch('homeassistant.components.updater.get_system_info',
side_effect=Exception):
res = yield from updater.get_newest_version(hass, None, False)
assert res == (MOCK_RESPONSE['version'],
MOCK_RESPONSE['release-notes'])
@asyncio.coroutine
def test_get_newest_version_analytics_when_huuid(hass, aioclient_mock):
"""Test we do not gather analytics when no huuid is passed in."""
aioclient_mock.post(updater.UPDATER_URL, json=MOCK_RESPONSE)
with patch('homeassistant.components.updater.get_system_info',
Mock(return_value=mock_coro({'fake': 'bla'}))):
res = yield from updater.get_newest_version(hass, MOCK_HUUID, False)
assert res == (MOCK_RESPONSE['version'],
MOCK_RESPONSE['release-notes'])
@asyncio.coroutine
def test_error_fetching_new_version_timeout(hass):
"""Test we do not gather analytics when no huuid is passed in."""
with patch('homeassistant.components.updater.get_system_info',
Mock(return_value=mock_coro({'fake': 'bla'}))), \
patch('async_timeout.timeout', side_effect=asyncio.TimeoutError):
res = yield from updater.get_newest_version(hass, MOCK_HUUID, False)
assert res is None
@asyncio.coroutine
def test_error_fetching_new_version_bad_json(hass, aioclient_mock):
"""Test we do not gather analytics when no huuid is passed in."""
aioclient_mock.post(updater.UPDATER_URL, text='not json')
with patch('homeassistant.components.updater.get_system_info',
Mock(return_value=mock_coro({'fake': 'bla'}))):
res = yield from updater.get_newest_version(hass, MOCK_HUUID, False)
assert res is None
@asyncio.coroutine
def test_error_fetching_new_version_invalid_response(hass, aioclient_mock):
"""Test we do not gather analytics when no huuid is passed in."""
aioclient_mock.post(updater.UPDATER_URL, json={
'version': '0.15'
# 'release-notes' is missing
})
with patch('homeassistant.components.updater.get_system_info',
Mock(return_value=mock_coro({'fake': 'bla'}))):
res = yield from updater.get_newest_version(hass, MOCK_HUUID, False)
assert res is None
| apache-2.0 |
Baz2013/blog_demo | leetcode/easy/binary_watch.py | 1 | 3526 | # -*- coding:utf-8 -*-
# 401. Binary Watch QuestionEditorial Solution My Submissions
# Difficulty: Easy
# A binary watch has 4 LEDs on the top which represent the hours (0-11), and the 6 LEDs on the bottom
# represent the minutes (0-59).
#
# Each LED represents a zero or one, with the least significant bit on the right.
#
# For example, the above binary watch reads "3:25".
#
# Given a non-negative integer n which represents the number of LEDs that are currently on,
# return all possible times the watch could represe
#
# Example:
# Input: n = 1
# Return: ["1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32"]
#
# Note:
# The order of output does not matter.
# The hour must not contain a leading zero, for example "01:00" is not valid, it should be "1:00".
# The minute must be consist of two digits and may contain a leading zero, for example "10:2" is not valid,
# it should be "10:02".
import itertools
class Solution(object):
def get_group(self, r_num):
"""
:param r_num:
:return:
"""
lst = list()
for i in range(0, 5):
for j in range(0, 7):
if i + j == r_num:
lst.append((i, j))
return lst
def get_binary_time(self, r_hour, r_min):
"""
:param r_item:
:return:
"""
rst_lst = list()
binary_hour_lst = self.get_binary_str(r_hour, 4)
# print binary_hour_lst
binary_min_lst = self.get_binary_str(r_min, 6)
# print binary_min_lst
for h in binary_hour_lst:
for m in binary_min_lst:
rst_lst.append((h, m))
# print rst_lst
return rst_lst
def get_binary_str(self, r_hour, r_max_num):
"""
:param r_hour:
:return:
"""
rst_lst = list()
h_lst = list()
b_h_lst = list()
for i in range(r_hour):
h_lst.append(1)
for i in range(r_max_num - r_hour):
h_lst.append(0)
gen_rst = itertools.permutations(h_lst)
for i in gen_rst:
# print ''.join([str(j) for j in i])
b_h_lst.append(''.join([str(j) for j in i]))
# print h_lst
b_h_lst = list(set(b_h_lst))
return b_h_lst
def get_real_time(self, r_item):
"""
:param r_item:
:return:
"""
hour = int(r_item[0], 2)
min = int(r_item[1], 2)
if hour > 11 or min > 59:
# print "#########################%d:%02d" % (hour, min)
tmp_time = None
else:
tmp_time = "%d:%02d" % (hour, min)
return tmp_time
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
rst_lst = list()
possible_group = self.get_group(num)
# print possible_group
possible_binary_group = list()
for item in possible_group:
tmp = self.get_binary_time(item[0], item[1])
possible_binary_group += tmp
for i in possible_binary_group:
real_time = self.get_real_time(i)
if real_time is not None:
rst_lst.append(real_time)
print rst_lst
return rst_lst
if __name__ == '__main__':
s = Solution()
s.readBinaryWatch(1)
s.readBinaryWatch(2)
s.readBinaryWatch(3)
s.readBinaryWatch(4)
s.readBinaryWatch(5)
s.readBinaryWatch(6)
s.readBinaryWatch(7)
s.readBinaryWatch(8)
| gpl-3.0 |
msimacek/koschei | test/model_test.py | 1 | 14719 | # Copyright (C) 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Author: Mikolaj Izdebski <mizdebsk@redhat.com>
from mock import patch
from sqlalchemy import literal_column
from datetime import datetime, timedelta
from koschei.models import (
Package, Collection, Build, ResourceConsumptionStats, ScalarStats, KojiTask,
PackageGroup,
)
from test.common import DBTest
class BuildURLTest(DBTest):
PRI_BUILD_URL = 'https://primary-koji.test/koji/taskinfo?taskID=123456'
SEC_BUILD_URL = 'https://secondary-koji.test/koji/taskinfo?taskID=123456'
PRI_TASK_URL = 'https://primary-koji.test/koji/taskinfo?taskID=456789'
SEC_TASK_URL = 'https://secondary-koji.test/koji/taskinfo?taskID=456789'
PRI_RESULT_URL = 'https://primary-koji.test/work/tasks/6789/456789'
SEC_RESULT_URL = 'https://secondary-koji.test/work/tasks/6789/456789'
def prepare_data(self, *, secondary_mode, real):
collection = self.prepare_collection('f29', secondary_mode=secondary_mode)
package = self.prepare_package('rnv', collection=collection)
build = self.prepare_build(package, real=real, task_id=123456, state='complete')
task = self.prepare_task(build, task_id=456789)
return build, task
def test_scratch_build_primary_url(self):
build, task = self.prepare_data(secondary_mode=False, real=False)
self.assertEqual(self.PRI_BUILD_URL, build.taskinfo_url)
self.assertEqual(self.PRI_TASK_URL, task.taskinfo_url)
self.assertEqual(self.PRI_RESULT_URL, task.results_url)
def test_scratch_build_secondary_url(self):
build, task = self.prepare_data(secondary_mode=True, real=False)
self.assertEqual(self.PRI_BUILD_URL, build.taskinfo_url)
self.assertEqual(self.PRI_TASK_URL, task.taskinfo_url)
self.assertEqual(self.PRI_RESULT_URL, task.results_url)
def test_real_build_primary_url(self):
build, task = self.prepare_data(secondary_mode=False, real=True)
self.assertEqual(self.PRI_BUILD_URL, build.taskinfo_url)
self.assertEqual(self.PRI_TASK_URL, task.taskinfo_url)
self.assertEqual(self.PRI_RESULT_URL, task.results_url)
def test_real_build_secondary_url(self):
build, task = self.prepare_data(secondary_mode=True, real=True)
self.assertEqual(self.SEC_BUILD_URL, build.taskinfo_url)
self.assertEqual(self.SEC_TASK_URL, task.taskinfo_url)
self.assertEqual(self.SEC_RESULT_URL, task.results_url)
class GroupTest(DBTest):
def test_group_name_format(self):
group1 = self.prepare_group('foo', content=['foo'])
group2 = self.prepare_group('bar', namespace='ns', content=['foo'])
self.assertEqual('foo', group1.full_name)
self.assertEqual('ns/bar', group2.full_name)
def test_group_name_parse(self):
self.assertEqual((None, 'foo'), PackageGroup.parse_name('foo'))
self.assertEqual(('ns', 'bar'), PackageGroup.parse_name('ns/bar'))
def test_group_cardinality(self):
group = self.prepare_group('xyzzy', content=['foo', 'bar', 'baz'])
self.assertEqual(3, group.package_count)
def test_group_cardinality_multiple_groups(self):
group1 = self.prepare_group('xyzzy', content=['foo', 'bar', 'baz'])
group2 = self.prepare_group('dsfla', content=['abc', 'def', 'ghi', 'jkl'])
self.assertEqual(3, group1.package_count)
self.assertEqual(4, group2.package_count)
def test_group_cardinality_multiple_collections(self):
group = self.prepare_group('xyzzy', content=['foo', 'bar', 'baz'])
collection = self.prepare_collection(
name="new", display_name="New",
target="foo", dest_tag="tag2", build_tag="build_tag2",
priority_coefficient=2.0,
)
pkg = self.prepare_package('bar', collection=collection)
self.assertEqual(3, group.package_count)
def test_group_cardinality_blocked(self):
group = self.prepare_group('xyzzy', content=['foo', 'bar', 'baz'])
self.prepare_packages('bar')[0].blocked = True
self.db.commit()
self.assertEqual(2, group.package_count)
def test_group_cardinality_partially_blocked(self):
# Package xalan-j2 is blocked in one collection only.
group = self.prepare_group('xyzzy', content=['xalan-j2'])
self.prepare_packages('xalan-j2')[0].blocked = True
self.db.commit()
collection = self.prepare_collection(
name="new", display_name="New",
target="foo", dest_tag="tag2", build_tag="build_tag2",
priority_coefficient=2.0,
)
pkg = self.prepare_package('xalan-j2', collection=collection)
self.assertEqual(1, group.package_count)
def test_group_cardinality_fully_blocked(self):
# Package xalan-j2 is blocked in all collections.
group = self.prepare_group('xyzzy', content=['xalan-j2'])
self.prepare_packages('xalan-j2')[0].blocked = True
self.db.commit()
collection = self.prepare_collection(
name="new", display_name="New",
target="foo", dest_tag="tag2", build_tag="build_tag2",
priority_coefficient=2.0,
)
pkg = self.prepare_package('xalan-j2', collection=collection, blocked=True)
self.assertEqual(0, group.package_count)
class PackageStateStringTest(DBTest):
def verify_state_string(self, state_string, **pkg_kwargs):
pkg = self.prepare_package(**pkg_kwargs)
self.assertEqual(state_string, pkg.state_string)
self.assertEqual(
state_string,
self.db.query(Package.state_string)
.filter(Package.id == pkg.id)
.scalar()
)
def test_state_string(self):
self.verify_state_string('blocked', blocked=True)
self.verify_state_string('untracked', tracked=False)
self.verify_state_string('unresolved', resolved=False)
self.verify_state_string('ok', resolved=True,
last_complete_build_state=Build.COMPLETE)
self.verify_state_string('failing', resolved=True,
last_complete_build_state=Build.FAILED)
self.verify_state_string('unknown', resolved=None,
last_complete_build_state=None)
self.verify_state_string('unknown', resolved=True,
last_complete_build_state=None)
@patch('sqlalchemy.sql.expression.func.clock_timestamp',
return_value=literal_column("'2017-10-10 10:00:00'"))
class PackagePriorityTest(DBTest):
def setUp(self):
super(PackagePriorityTest, self).setUp()
self.pkg = self.prepare_packages('rnv')[0]
self.pkg.resolved = True
self.build = self.prepare_build('rnv', state=True)
self.build.started = '2017-10-10 10:00:00'
def get_priority(self, pkg):
return self.db.query(
Package.current_priority_expression(
collection=pkg.collection,
last_build=pkg.last_build,
)
).filter(Package.id == pkg.id).scalar()
def get_priority_join(self, pkg):
return self.db.query(
Package.current_priority_expression(
collection=Collection,
last_build=Build,
)
).join(Package.collection)\
.join(Package.last_build)\
.filter(Package.id == pkg.id).scalar()
def verify_priority(self, expected, pkg=None):
pkg = pkg or self.pkg
self.db.commit()
priority = self.get_priority(pkg)
priority_join = self.get_priority_join(pkg)
if expected:
self.assertIsNotNone(priority)
self.assertIsNotNone(priority_join)
self.assertAlmostEqual(expected, priority)
self.assertAlmostEqual(expected, priority_join)
else:
self.assertIsNone(priority)
self.assertIsNone(priority_join)
def test_basic(self, _):
# time priority for just completed build, no other values
self.verify_priority(-30)
def test_coefficient(self, _):
self.pkg.manual_priority = 10
self.pkg.static_priority = 20
self.pkg.dependency_priority = 40
self.pkg.build_priority = 50
self.pkg.collection.priority_coefficient = 0.5
self.verify_priority(10 + 20 + 0.5 * (-30 + 40 + 50))
def test_time(self, _):
# 2 h difference
self.build.started = '2017-10-10 08:00:00'
self.verify_priority(-30)
# 10 h difference
self.build.started = '2017-10-10 00:00:00'
self.verify_priority(39.2446980024098)
# 1 day difference
self.build.started = '2017-10-9 00:00:00'
self.verify_priority(133.26248998925)
# 1 month difference
self.build.started = '2017-9-10 00:00:00'
self.verify_priority(368.863607520133)
def test_untracked(self, _):
self.pkg.tracked = False
self.verify_priority(None)
def test_blocked(self, _):
self.pkg.blocked = True
self.verify_priority(None)
def test_unresolved(self, _):
self.pkg.resolved = False
self.verify_priority(None)
def test_running_build(self, _):
self.prepare_build('rnv', started='2017-10-10 11:00:00')
self.verify_priority(None)
def test_no_build(self, _):
pkg = self.prepare_packages('foo')[0]
pkg.resolved = True
self.verify_priority(None, pkg)
def test_resolution_not_attempted(self, _):
self.pkg.resolved = None
self.verify_priority(None)
def test_resolution_skipped(self, _):
self.pkg.resolved = None
self.pkg.skip_resolution = True
self.verify_priority(-30)
class StatsTest(DBTest):
def add_task(self, build, arch, started, finished):
koji_task = KojiTask(task_id=7541,
arch=arch,
state=1,
started=datetime.fromtimestamp(started),
finished=(datetime.fromtimestamp(finished) if finished else None),
build_id=build.id)
self.db.add(koji_task)
self.db.commit()
def test_time_consumption_per_package(self):
rnv = self.prepare_build('rnv')
self.add_task(rnv, 'x86_64', 123, 456)
self.add_task(rnv, 'aarch64', 125, 666)
# Before refresh MV should be empty
self.assertEqual(0, self.db.query(ResourceConsumptionStats).count())
# After refresh it should contain some entries
self.db.refresh_materialized_view(ResourceConsumptionStats)
self.assertEqual(2, self.db.query(ResourceConsumptionStats).count())
# Now add more data
self.add_task(rnv, 'x86_64', 1000, 1100)
self.add_task(rnv, 'x86_64', 2000, 2500)
self.add_task(rnv, 'x86_64', 5000, None)
self.add_task(self.prepare_build('xpp3'), 'x86_64', 111, 444)
self.add_task(self.prepare_build('junit'), 'noarch', 24, 42)
# Until it's refreshed again, MV should still contain only 2 rows
self.assertEqual(2, self.db.query(ResourceConsumptionStats).count())
self.db.refresh_materialized_view(ResourceConsumptionStats)
self.assertEqual(4, self.db.query(ResourceConsumptionStats).count())
stats = self.db.query(ResourceConsumptionStats).order_by(ResourceConsumptionStats.time).all()
self.assertEqual('junit', stats[0].name)
self.assertEqual('noarch', stats[0].arch)
self.assertEqual(timedelta(0, 42 - 24), stats[0].time)
self.assertAlmostEqual(0.0099, stats[0].time_percentage, 4)
self.assertEqual('xpp3', stats[1].name)
self.assertEqual('x86_64', stats[1].arch)
self.assertEqual(timedelta(0, 333), stats[1].time)
self.assertAlmostEqual(0.1825, stats[1].time_percentage, 4)
self.assertEqual('rnv', stats[2].name)
self.assertEqual('aarch64', stats[2].arch)
self.assertEqual(timedelta(0, 666 - 125), stats[2].time)
self.assertAlmostEqual(0.2964, stats[2].time_percentage, 4)
self.assertEqual('rnv', stats[3].name)
self.assertEqual('x86_64', stats[3].arch)
self.assertEqual(timedelta(0, 333 + 100 + 500), stats[3].time)
self.assertAlmostEqual(0.5112, stats[3].time_percentage, 4)
def test_time_consumption_only_running(self):
rnv = self.prepare_build('rnv')
self.add_task(rnv, 'x86_64', 123, None)
self.db.refresh_materialized_view(ResourceConsumptionStats)
self.assertEqual(1, self.db.query(ResourceConsumptionStats).count())
stats = self.db.query(ResourceConsumptionStats).one()
self.assertEqual('rnv', stats.name)
self.assertEqual('x86_64', stats.arch)
self.assertIsNone(stats.time)
self.assertIsNone(stats.time_percentage)
def test_package_counts(self):
self.db.refresh_materialized_view(ScalarStats)
stats = self.db.query(ScalarStats).one()
self.assertEqual(0, stats.packages)
self.prepare_packages('rnv')[0].tracked = False
self.prepare_packages('junit')[0].blocked = True
self.prepare_packages('xpp3')
self.db.refresh_materialized_view(ScalarStats)
stats = self.db.query(ScalarStats).one()
self.assertEqual(3, stats.packages)
self.assertEqual(2, stats.tracked_packages)
self.assertEqual(1, stats.blocked_packages)
def test_build_counts(self):
for i in range(0, 7):
self.prepare_build('rnv', True).real = True
for i in range(0, 5):
self.prepare_build('rnv', False)
for i in range(0, 4):
self.prepare_build('rnv', None)
self.db.refresh_materialized_view(ScalarStats)
stats = self.db.query(ScalarStats).one()
self.assertEqual(16, stats.builds)
self.assertEqual(7, stats.real_builds)
self.assertEqual(9, stats.scratch_builds)
| gpl-2.0 |
azure-contrib/splunk-azure-website-logs | bin/azure/storage/tableservice.py | 2 | 20761 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (WindowsAzureError,
TABLE_SERVICE_HOST_BASE,
DEV_TABLE_HOST,
xml_escape,
_convert_class_to_xml,
_convert_response_to_feeds,
_dont_fail_not_exist,
_dont_fail_on_exist,
_get_request_body,
_int_or_none,
_parse_response,
_parse_response_for_dict,
_parse_response_for_dict_filter,
_str,
_str_or_none,
_update_request_uri_query_local_storage,
_validate_not_none,
)
from azure.http import HTTPRequest
from azure.http.batchclient import _BatchClient
from azure.storage import (StorageServiceProperties,
_convert_entity_to_xml,
_convert_response_to_entity,
_convert_table_to_xml,
_convert_xml_to_entity,
_convert_xml_to_table,
_sign_storage_table_request,
_update_storage_table_header,
)
from azure.storage.storageclient import _StorageClient
class TableService(_StorageClient):
'''
This is the main class managing Table resources.
'''
def __init__(self, account_name = None, account_key = None, protocol = 'https', host_base = TABLE_SERVICE_HOST_BASE, dev_host = DEV_TABLE_HOST):
'''
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
protocol: Optional. Protocol. Defaults to http.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
dev_host: Optional. Dev host url. Defaults to localhost.
'''
return super(TableService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
def begin_batch(self):
if self._batchclient is None:
self._batchclient = _BatchClient(service_instance=self, account_key=self.account_key, account_name=self.account_name)
return self._batchclient.begin_batch()
def commit_batch(self):
try:
ret = self._batchclient.commit_batch()
finally:
self._batchclient = None
return ret
def cancel_batch(self):
self._batchclient = None
def get_table_service_properties(self):
'''
Gets the properties of a storage account's Table service, including
Windows Azure Storage Analytics.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def set_table_service_properties(self, storage_service_properties):
'''
Sets the properties of a storage account's Table Service, including
Windows Azure Storage Analytics.
storage_service_properties: StorageServiceProperties object.
'''
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def query_tables(self, table_name = None, top=None, next_table_name=None):
'''
Returns a list of tables under the specified account.
table_name: Optional. The specific table to query.
top: Optional. Maximum number of tables to return.
next_table_name:
Optional. When top is used, the next table name is stored in
result.x_ms_continuation['NextTableName']
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
if table_name is not None:
uri_part_table_name = "('" + table_name + "')"
else:
uri_part_table_name = ""
request.path = '/Tables' + uri_part_table_name + ''
request.query = [
('$top', _int_or_none(top)),
('NextTableName', _str_or_none(next_table_name))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_table)
def create_table(self, table, fail_on_exist=False):
'''
Creates a new table in the storage account.
table:
Name of the table to create. Table name may contain only
alphanumeric characters and cannot begin with a numeric character.
It is case-insensitive and must be from 3 to 63 characters long.
fail_on_exist: Specify whether throw exception when table exists.
'''
_validate_not_none('table', table)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/Tables'
request.body = _get_request_body(_convert_table_to_xml(table))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
self._perform_request(request)
return True
def delete_table(self, table_name, fail_not_exist=False):
'''
table_name: Name of the table to delete.
fail_not_exist:
Specify whether throw exception when table doesn't exist.
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/Tables(\'' + _str(table_name) + '\')'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def get_entity(self, table_name, partition_key, row_key, select=''):
'''
Get an entity in a table; includes the $select options.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
select: Property names to select.
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('select', select)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(table_name) + '(PartitionKey=\'' + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')?$select=' + _str(select) + ''
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_entity(response)
def query_entities(self, table_name, filter=None, select=None, top=None, next_partition_key=None, next_row_key=None):
'''
Get entities in a table; includes the $filter and $select options.
table_name: Table to query.
filter:
Optional. Filter as described at
http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
select: Optional. Property names to select from the entities.
top: Optional. Maximum number of entities to return.
next_partition_key:
Optional. When top is used, the next partition key is stored in
result.x_ms_continuation['NextPartitionKey']
next_row_key:
Optional. When top is used, the next partition key is stored in
result.x_ms_continuation['NextRowKey']
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(table_name) + '()'
request.query = [
('$filter', _str_or_none(filter)),
('$select', _str_or_none(select)),
('$top', _int_or_none(top)),
('NextPartitionKey', _str_or_none(next_partition_key)),
('NextRowKey', _str_or_none(next_row_key))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_entity)
def insert_entity(self, table_name, entity, content_type='application/atom+xml'):
'''
Inserts a new entity into a table.
table_name: Table name.
entity:
Required. The entity object to insert. Could be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(table_name) + ''
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_entity(response)
def update_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
Updates an existing entity in a table. The Update Entity operation
replaces the entire entity and can be used to remove properties.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
entity:
Required. The entity object to insert. Could be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
if_match:
Optional. Specifies the condition for which the merge should be
performed. To force an unconditional merge, set to the wildcard
character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(table_name) + '(PartitionKey=\'' + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
def merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
Updates an existing entity by updating the entity's properties. This
operation does not replace the existing entity as the Update Entity
operation does.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
entity:
Required. The entity object to insert. Can be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
if_match:
Optional. Specifies the condition for which the merge should be
performed. To force an unconditional merge, set to the wildcard
character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
request.host = self._get_host()
request.path = '/' + _str(table_name) + '(PartitionKey=\'' + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
def delete_entity(self, table_name, partition_key, row_key, content_type='application/atom+xml', if_match='*'):
'''
Deletes an existing entity in a table.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
content_type: Required. Must be set to application/atom+xml
if_match:
Optional. Specifies the condition for which the delete should be
performed. To force an unconditional delete, set to the wildcard
character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('content_type', content_type)
_validate_not_none('if_match', if_match)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(table_name) + '(PartitionKey=\'' + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml'):
'''
Replaces an existing entity or inserts a new entity if it does not
exist in the table. Because this operation can insert or update an
entity, it is also known as an "upsert" operation.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
entity:
Required. The entity object to insert. Could be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(table_name) + '(PartitionKey=\'' + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml'):
'''
Merges an existing entity or inserts a new entity if it does not exist
in the table. Because this operation can insert or update an entity,
it is also known as an "upsert" operation.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
entity:
Required. The entity object to insert. Could be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
request.host = self._get_host()
request.path = '/' + _str(table_name) + '(PartitionKey=\'' + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
def _perform_request_worker(self, request):
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
return self._httpclient.perform_request(request)
| apache-2.0 |
japeto/Vigtech-Services | env/lib/python2.7/site-packages/django/conf/__init__.py | 46 | 7058 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import importlib
import os
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import LazyObject, empty
from django.utils import six
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"ALLOWED_INCLUDE_ROOTS",
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
isinstance(setting_value, six.string_types)):
raise ImproperlyConfigured("The %s setting must be a tuple. "
"Please fix your settings." % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if ('django.contrib.auth.middleware.AuthenticationMiddleware' in self.MIDDLEWARE_CLASSES and
'django.contrib.auth.middleware.SessionAuthenticationMiddleware' not in self.MIDDLEWARE_CLASSES):
warnings.warn(
"Session verification will become mandatory in Django 2.0. "
"Please add 'django.contrib.auth.middleware.SessionAuthenticationMiddleware' "
"to your MIDDLEWARE_CLASSES setting when you are ready to opt-in after "
"reading the upgrade considerations in the 1.8 release notes.",
RemovedInDjango20Warning
)
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return (deleted or set_locally or set_on_default)
settings = LazySettings()
| lgpl-3.0 |
classicboyir/BuildingMachineLearningSystemsWithPython | ch04/blei_lda.py | 21 | 2601 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from wordcloud import create_cloud
try:
from gensim import corpora, models, matutils
except:
print("import gensim failed.")
print()
print("Please install it")
raise
import matplotlib.pyplot as plt
import numpy as np
from os import path
NUM_TOPICS = 100
# Check that data exists
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
print('Please cd into ./data & run ./download_ap.sh')
# Load the data
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
# Build the topic model
model = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=None)
# Iterate over all the topics in the model
for ti in range(model.num_topics):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
with open('topics.txt', 'w') as output:
output.write('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
output.write("\n\n\n")
# We first identify the most discussed topic, i.e., the one with the
# highest total weight
topics = matutils.corpus2dense(model[corpus], num_terms=model.num_topics)
weight = topics.sum(1)
max_topic = weight.argmax()
# Get the top 64 words for this topic
# Without the argument, show_topic would return only 10 words
words = model.show_topic(max_topic, 64)
# This function will actually check for the presence of pytagcloud and is otherwise a no-op
create_cloud('cloud_blei_lda.png', words)
num_topics_used = [len(model[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist(num_topics_used, np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
fig.tight_layout()
fig.savefig('Figure_04_01.png')
# Now, repeat the same exercise using alpha=1.0
# You can edit the constant below to play around with this parameter
ALPHA = 1.0
model1 = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=ALPHA)
num_topics_used1 = [len(model1[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist([num_topics_used, num_topics_used1], np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
# The coordinates below were fit by trial and error to look good
ax.text(9, 223, r'default alpha')
ax.text(26, 156, 'alpha=1.0')
fig.tight_layout()
fig.savefig('Figure_04_02.png')
| mit |
bowang/tensorflow | tensorflow/contrib/seq2seq/python/ops/decoder.py | 30 | 11351 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq layer operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = ["Decoder", "dynamic_decode"]
_transpose_batch_time = rnn._transpose_batch_time # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class Decoder(object):
"""An RNN Decoder abstract interface object.
Concepts used by this interface:
- `inputs`: (structure of) tensors and TensorArrays that is passed as input to
the RNNCell composing the decoder, at each time step.
- `state`: (structure of) tensors and TensorArrays that is passed to the
RNNCell instance as the state.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `outputs`: Instance of BasicDecoderOutput. Result of the decoding, at each
time step.
"""
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape` object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
@abc.abstractmethod
def initialize(self, name=None):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
name: Name scope for any created operations.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
@abc.abstractmethod
def step(self, time, inputs, state, name=None):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNNCell input (possibly nested tuple of) tensor[s] for this time
step.
state: RNNCell state (possibly nested tuple of) tensor[s] from previous
time step.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an object
containing the decoder output, `next_state` is a (structure of) state tensors
and TensorArrays, `next_inputs` is the tensor that should be used as input for
the next step, `finished` is a boolean tensor telling whether the sequence
is complete, for each sequence in the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
raise NotImplementedError
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _t(s):
return (s if isinstance(s, ops.Tensor) else constant_op.constant(
tensor_shape.TensorShape(s).as_list(),
dtype=dtypes.int32,
name="zero_suffix_shape"))
def _create(s, d):
return array_ops.zeros(
array_ops.concat(
([batch_size], _t(s)), axis=0), dtype=d)
return nest.map_structure(_create, size, dtype)
def dynamic_decode(decoder,
output_time_major=False,
impute_finished=False,
maximum_iterations=None,
parallel_iterations=32,
swap_memory=False,
scope=None):
"""Perform dynamic decoding with `decoder`.
Calls initialize() once and step() repeatedly on the Decoder object.
Args:
decoder: A `Decoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is faster).
Otherwise, outputs are returned as batch major tensors (this adds extra
time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: `int32` scalar, maximum allowed number of decoding
steps. Default is `None` (decode until the decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
scope: Optional variable scope to use.
Returns:
`(final_outputs, final_state, final_sequence_lengths)`.
Raises:
TypeError: if `decoder` is not an instance of `Decoder`.
ValueError: if `maximum_iterations` is provided but is not a scalar.
"""
if not isinstance(decoder, Decoder):
raise TypeError("Expected decoder to be type Decoder, but saw: %s" %
type(decoder))
with variable_scope.variable_scope(scope, "decoder") as varscope:
# Properly cache variable values inside the while_loop
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
if maximum_iterations is not None:
maximum_iterations = ops.convert_to_tensor(
maximum_iterations, dtype=dtypes.int32, name="maximum_iterations")
if maximum_iterations.get_shape().ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
initial_finished, initial_inputs, initial_state = decoder.initialize()
zero_outputs = _create_zero_outputs(decoder.output_size,
decoder.output_dtype,
decoder.batch_size)
if maximum_iterations is not None:
initial_finished = math_ops.logical_or(
initial_finished, 0 >= maximum_iterations)
initial_sequence_lengths = array_ops.zeros_like(
initial_finished, dtype=dtypes.int32)
initial_time = constant_op.constant(0, dtype=dtypes.int32)
def _shape(batch_size, from_shape):
if not isinstance(from_shape, tensor_shape.TensorShape):
return tensor_shape.TensorShape(None)
else:
batch_size = tensor_util.constant_value(
ops.convert_to_tensor(
batch_size, name="batch_size"))
return tensor_shape.TensorShape([batch_size]).concatenate(from_shape)
def _create_ta(s, d):
return tensor_array_ops.TensorArray(
dtype=d,
size=0,
dynamic_size=True,
element_shape=_shape(decoder.batch_size, s))
initial_outputs_ta = nest.map_structure(_create_ta, decoder.output_size,
decoder.output_dtype)
def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs,
finished, unused_sequence_lengths):
return math_ops.logical_not(math_ops.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
next_finished = math_ops.logical_or(decoder_finished, finished)
if maximum_iterations is not None:
next_finished = math_ops.logical_or(
next_finished, time + 1 >= maximum_iterations)
next_sequence_lengths = array_ops.where(
math_ops.logical_and(math_ops.logical_not(finished), next_finished),
array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
sequence_lengths)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)
res = control_flow_ops.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_state, initial_inputs,
initial_finished, initial_sequence_lengths,
],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs_ta = res[1]
final_state = res[2]
final_sequence_lengths = res[5]
final_outputs = nest.map_structure(lambda ta: ta.stack(), final_outputs_ta)
try:
final_outputs, final_state = decoder.finalize(
final_outputs, final_state, final_sequence_lengths)
except NotImplementedError:
pass
if not output_time_major:
final_outputs = nest.map_structure(_transpose_batch_time, final_outputs)
return final_outputs, final_state, final_sequence_lengths
| apache-2.0 |
gautamMalu/rootfs_xen_arndale | usr/lib/python2.7/bsddb/dbutils.py | 157 | 2964 | #------------------------------------------------------------------------
#
# Copyright (C) 2000 Autonomous Zone Industries
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# Author: Gregory P. Smith <greg@krypto.org>
#
# Note: I don't know how useful this is in reality since when a
# DBLockDeadlockError happens the current transaction is supposed to be
# aborted. If it doesn't then when the operation is attempted again
# the deadlock is still happening...
# --Robin
#
#------------------------------------------------------------------------
#
# import the time.sleep function in a namespace safe way to allow
# "from bsddb.dbutils import *"
#
from time import sleep as _sleep
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
import db
# always sleep at least N seconds between retrys
_deadlock_MinSleepTime = 1.0/128
# never sleep more than N seconds between retrys
_deadlock_MaxSleepTime = 3.14159
# Assign a file object to this for a "sleeping" message to be written to it
# each retry
_deadlock_VerboseFile = None
def DeadlockWrap(function, *_args, **_kwargs):
"""DeadlockWrap(function, *_args, **_kwargs) - automatically retries
function in case of a database deadlock.
This is a function intended to be used to wrap database calls such
that they perform retrys with exponentially backing off sleeps in
between when a DBLockDeadlockError exception is raised.
A 'max_retries' parameter may optionally be passed to prevent it
from retrying forever (in which case the exception will be reraised).
d = DB(...)
d.open(...)
DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar"
"""
sleeptime = _deadlock_MinSleepTime
max_retries = _kwargs.get('max_retries', -1)
if 'max_retries' in _kwargs:
del _kwargs['max_retries']
while True:
try:
return function(*_args, **_kwargs)
except db.DBLockDeadlockError:
if _deadlock_VerboseFile:
_deadlock_VerboseFile.write(
'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
_sleep(sleeptime)
# exponential backoff in the sleep time
sleeptime *= 2
if sleeptime > _deadlock_MaxSleepTime:
sleeptime = _deadlock_MaxSleepTime
max_retries -= 1
if max_retries == -1:
raise
#------------------------------------------------------------------------
| gpl-2.0 |
kailIII/geraldo | site/newsite/site-geraldo/django/contrib/localflavor/fr/fr_department.py | 39 | 3391 | # -*- coding: utf-8 -*-
DEPARTMENT_ASCII_CHOICES = (
('01', '01 - Ain'),
('02', '02 - Aisne'),
('03', '03 - Allier'),
('04', '04 - Alpes-de-Haute-Provence'),
('05', '05 - Hautes-Alpes'),
('06', '06 - Alpes-Maritimes'),
('07', '07 - Ardeche'),
('08', '08 - Ardennes'),
('09', '09 - Ariege'),
('10', '10 - Aube'),
('11', '11 - Aude'),
('12', '12 - Aveyron'),
('13', '13 - Bouches-du-Rhone'),
('14', '14 - Calvados'),
('15', '15 - Cantal'),
('16', '16 - Charente'),
('17', '17 - Charente-Maritime'),
('18', '18 - Cher'),
('19', '19 - Correze'),
('21', '21 - Cote-d\'Or'),
('22', '22 - Cotes-d\'Armor'),
('23', '23 - Creuse'),
('24', '24 - Dordogne'),
('25', '25 - Doubs'),
('26', '26 - Drome'),
('27', '27 - Eure'),
('28', '28 - Eure-et-Loire'),
('29', '29 - Finistere'),
('2A', '2A - Corse-du-Sud'),
('2B', '2B - Haute-Corse'),
('30', '30 - Gard'),
('31', '31 - Haute-Garonne'),
('32', '32 - Gers'),
('33', '33 - Gironde'),
('34', '34 - Herault'),
('35', '35 - Ille-et-Vilaine'),
('36', '36 - Indre'),
('37', '37 - Indre-et-Loire'),
('38', '38 - Isere'),
('39', '39 - Jura'),
('40', '40 - Landes'),
('41', '41 - Loir-et-Cher'),
('42', '42 - Loire'),
('43', '43 - Haute-Loire'),
('44', '44 - Loire-Atlantique'),
('45', '45 - Loiret'),
('46', '46 - Lot'),
('47', '47 - Lot-et-Garonne'),
('48', '48 - Lozere'),
('49', '49 - Maine-et-Loire'),
('50', '50 - Manche'),
('51', '51 - Marne'),
('52', '52 - Haute-Marne'),
('53', '53 - Mayenne'),
('54', '54 - Meurthe-et-Moselle'),
('55', '55 - Meuse'),
('56', '56 - Morbihan'),
('57', '57 - Moselle'),
('58', '58 - Nievre'),
('59', '59 - Nord'),
('60', '60 - Oise'),
('61', '61 - Orne'),
('62', '62 - Pas-de-Calais'),
('63', '63 - Puy-de-Dome'),
('64', '64 - Pyrenees-Atlantiques'),
('65', '65 - Hautes-Pyrenees'),
('66', '66 - Pyrenees-Orientales'),
('67', '67 - Bas-Rhin'),
('68', '68 - Haut-Rhin'),
('69', '69 - Rhone'),
('70', '70 - Haute-Saone'),
('71', '71 - Saone-et-Loire'),
('72', '72 - Sarthe'),
('73', '73 - Savoie'),
('74', '74 - Haute-Savoie'),
('75', '75 - Paris'),
('76', '76 - Seine-Maritime'),
('77', '77 - Seine-et-Marne'),
('78', '78 - Yvelines'),
('79', '79 - Deux-Sevres'),
('80', '80 - Somme'),
('81', '81 - Tarn'),
('82', '82 - Tarn-et-Garonne'),
('83', '83 - Var'),
('84', '84 - Vaucluse'),
('85', '85 - Vendee'),
('86', '86 - Vienne'),
('87', '87 - Haute-Vienne'),
('88', '88 - Vosges'),
('89', '89 - Yonne'),
('90', '90 - Territoire de Belfort'),
('91', '91 - Essonne'),
('92', '92 - Hauts-de-Seine'),
('93', '93 - Seine-Saint-Denis'),
('94', '94 - Val-de-Marne'),
('95', '95 - Val-d\'Oise'),
('2A', '2A - Corse du sud'),
('2B', '2B - Haute Corse'),
('971', '971 - Guadeloupe'),
('972', '972 - Martinique'),
('973', '973 - Guyane'),
('974', '974 - La Reunion'),
('975', '975 - Saint-Pierre-et-Miquelon'),
('976', '976 - Mayotte'),
('984', '984 - Terres Australes et Antarctiques'),
('986', '986 - Wallis et Futuna'),
('987', '987 - Polynesie Francaise'),
('988', '988 - Nouvelle-Caledonie'),
)
| lgpl-3.0 |
PKRoma/python-for-android | pythonforandroid/recipes/libglob/__init__.py | 4 | 2376 | """
android libglob
available via '-lglob' LDFLAG
"""
from os.path import exists, join
from pythonforandroid.recipe import Recipe
from pythonforandroid.toolchain import current_directory
from pythonforandroid.logger import info, shprint
import sh
class LibGlobRecipe(Recipe):
"""Make a glob.h and glob.so for the python_install_dir()"""
version = '0.0.1'
url = None
#
# glob.h and glob.c extracted from
# https://github.com/white-gecko/TokyoCabinet, e.g.:
# https://raw.githubusercontent.com/white-gecko/TokyoCabinet/master/glob.h
# https://raw.githubusercontent.com/white-gecko/TokyoCabinet/master/glob.c
# and pushed in via patch
name = 'libglob'
built_libraries = {'libglob.so': '.'}
depends = ['hostpython3']
patches = ['glob.patch']
def should_build(self, arch):
"""It's faster to build than check"""
return True
def prebuild_arch(self, arch):
"""Make the build and target directories"""
path = self.get_build_dir(arch.arch)
if not exists(path):
info("creating {}".format(path))
shprint(sh.mkdir, '-p', path)
def build_arch(self, arch):
"""simple shared compile"""
env = self.get_recipe_env(arch, with_flags_in_cc=False)
for path in (
self.get_build_dir(arch.arch),
join(self.ctx.python_recipe.get_build_dir(arch.arch), 'Lib'),
join(self.ctx.python_recipe.get_build_dir(arch.arch), 'Include')):
if not exists(path):
info("creating {}".format(path))
shprint(sh.mkdir, '-p', path)
cli = env['CC'].split()[0]
# makes sure first CC command is the compiler rather than ccache, refs:
# https://github.com/kivy/python-for-android/issues/1399
if 'ccache' in cli:
cli = env['CC'].split()[1]
cc = sh.Command(cli)
with current_directory(self.get_build_dir(arch.arch)):
cflags = env['CFLAGS'].split()
cflags.extend(['-I.', '-c', '-l.', 'glob.c', '-I.'])
shprint(cc, *cflags, _env=env)
cflags = env['CFLAGS'].split()
cflags.extend(['-shared', '-I.', 'glob.o', '-o', 'libglob.so'])
cflags.extend(env['LDFLAGS'].split())
shprint(cc, *cflags, _env=env)
recipe = LibGlobRecipe()
| mit |
kevin-coder/tensorflow-fork | tensorflow/contrib/keras/api/keras/applications/inception_v3/__init__.py | 39 | 1125 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception V3 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.inception_v3 import decode_predictions
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.inception_v3 import preprocess_input
del absolute_import
del division
del print_function
| apache-2.0 |
dosiecki/NewsBlur | vendor/paypalapi/exceptions.py | 20 | 1422 | # coding=utf-8
"""
Various PayPal API related exceptions.
"""
class PayPalError(Exception):
"""
Used to denote some kind of generic error. This does not include errors
returned from PayPal API responses. Those are handled by the more
specific exception classes below.
"""
def __init__(self, message, error_code=None):
Exception.__init__(self, message, error_code)
self.message = message
self.error_code = error_code
def __str__(self):
if self.error_code:
return "%s (Error Code: %s)" % (repr(self.message), self.error_code)
else:
return repr(self.message)
class PayPalConfigError(PayPalError):
"""
Raised when a configuration problem arises.
"""
pass
class PayPalAPIResponseError(PayPalError):
"""
Raised when there is an error coming back with a PayPal NVP API response.
Pipe the error message from the API to the exception, along with
the error code.
"""
def __init__(self, response):
self.response = response
self.error_code = int(getattr(response, 'L_ERRORCODE0', -1))
self.message = getattr(response, 'L_LONGMESSAGE0', None)
self.short_message = getattr(response, 'L_SHORTMESSAGE0', None)
self.correlation_id = getattr(response, 'CORRELATIONID', None)
super(PayPalAPIResponseError, self).__init__(self.message, self.error_code)
| mit |
erdc/proteus | proteus/tests/sandbox/testRKDG.py | 1 | 25769 | #!/usr/bin/env python
from ScalarTransport import *
from ScalarTransportTests import *
from LevelSetTests import *
"""
test RKDG via ScalarTransport interface with quadrature for simple
advection problems
"""
def buildProblems(testFlag=0,
verbose=0):
"""
build data structures necessary for specifying test problems:
testFlag says which one to run?
0 --- LinearAD_DiracIC (1d)
1 --- rotating Cone (2d)
"""
testProblems = []
nd = {}
T = {}
coefficients = {}
getInitialConditions = {}
getDirichletConditions = {}
analyticalSolution = {}
if testFlag == 1:
test = 'RotatingCone2D'
testProblems.append(test)
nd[test]=2
getDirichletConditions[test]=getHomogeneousDBC2D
N=3.0
analyticalSolution[test] = RotatingCone2D(1.0/8.0)
#mwf correct one, but DG has problems for some reason
#coefficients[test]=UnitSquareRotation()
#mwf works better with this, still have no diffusion
coefficients[test]=UnitSquareRotationWithDiffusion(A0=0.0)
T[test]=0.5
getInitialConditions[test] = analyticalSolution[test]
coefficients[test].mass = 'linear'
coefficients[test].advection = 'linear'
#mwf correct coefficients[test].diffusion = None
#mwf correct coefficients[test].potential = None
#mwf worked better
coefficients[test].diffusion = 'constant'
#mwf worked better
coefficients[test].potential = 'linear'
coefficients[test].reaction = None
else:
#1d linear advection-diffusion with dirac initial condition:
#
#u_t + (bu - a u_x)_x = 0; u(0) = 1; u(1) = 0
#
test='LinearAD_DiracIC'
testProblems.append(test)
nd[test]=1
getDirichletConditions[test]=getDBC_hom
#a0=1.0e-4
a0=1.0e-2
#a0=1.0
#a0=0.0
A0=Numeric.array([[a0]])
b0=1.0
#b0=0.0
B0=Numeric.array([b0])
C0=1.0
M0=0.0
coefficients[test] = LinearADR_ConstantCoefficients(M=1.0,A=A0,B=B0,C=0.0)
analyticalSolution[test] = LinearAD_DiracIC(b=B0,a=a0,tStart=0.25)
T[test]=0.1 #0.5
getInitialConditions[test] = analyticalSolution[test]
coefficients[test].mass = 'linear'
coefficients[test].advection = 'linear'
coefficients[test].diffusion = 'constant'
coefficients[test].potential = 'linear'
coefficients[test].reaction = None
#end else on testFlag
#just put these in one dictionary so I know what all
#has to be specified
problems = {}
problems['testProblems'] =testProblems
problems['nd'] =nd
problems['T'] =T
problems['coefficients'] =coefficients
problems['initialConditions'] =getInitialConditions
problems['dirichletConditions']=getDirichletConditions
problems['analyticalSolution'] =analyticalSolution
return problems
#end buildProblems
def buildSimParams(test,TimeIntegrationClass,verbose=0):
"""
define the necessary flags and tolerances for performing test problems
"""
#mwf debug
print 'building simulation details for ',test
computeEigenvalues = False
if computeEigenvalues:
#set flags for eigenvalue computation
linearSolverType= levelLinearSolverType = 'DenseLU'
levelNonlinearSolverType = 'Newton'
nonlinearSolverType = 'NLNI'
else:
linearSolverType = levelLinearSolverType = 'SparseLU'
levelNonlinearSolverType = 'Newton'
nonlinearSolverType = 'Newton'
#end else on evalues
#tolerances
tolFac = 1.0e-4
linTolFac= 1.0e-2
#time stepping control
runCFL = 0.1
#order of approximation (1 unless doing SSPRK)
tOrder = 2
#pick finite element spaces
DG = True #False
if DG:
FemSpace = DG_AffineLinearOnSimplexWithNodalBasis
conservativeFlux = False
numericalFlux = True
stabilization= None
shockCapturing=None
#mwf added
shockCapturingDiffusion = None
quadratureOrder=3
preSmooths = None
postSmooths = None
cycles = None
nLevels=1
if test == 'RotatingCone2D':
nn =31
else:
nn = 51
else:
FemSpace = C0_AffineLinearOnSimplexWithNodalBasis
conservativeFlux = None#'pwc'
numericalFlux = None
stabilization='2'
shockCapturing= None#'1'
shockCapturingDiffusion = 0.15
quadratureOrder=3
preSmooths = 2
postSmooths = 2
cycles = 2
if test == 'RotatingCone2D':
nLevels = 3
else:
nLevels=6 #1d problem
nn=3 #number of nodes on the coarsest mesh
#end if on DG
#collect run parameters
par = {}
par['computeEigenvalues'] =computeEigenvalues
par['linearSolverType'] =linearSolverType
par['levelLinearSolverType'] =levelLinearSolverType
par['levelNonlinearSolverType']=levelNonlinearSolverType
par['nonlinearSolverType'] =nonlinearSolverType
par['tolFac'] =tolFac
par['linTolFac'] =linTolFac
par['runCFL'] =runCFL
par['DG'] =DG
par['FemSpace'] =FemSpace
par['conservativeFlux'] =conservativeFlux
par['numericalFlux'] =numericalFlux
par['stabilization'] =stabilization
par['shockCapturing'] =shockCapturing
par['shockCapturingDiffusion'] =shockCapturingDiffusion
par['quadratureOrder'] =quadratureOrder
par['preSmooths'] =preSmooths
par['postSmooths'] =postSmooths
par['cycles'] =cycles
par['nLevels'] =nLevels
par['nn'] =nn
par['timeIntegration'] =TimeIntegrationClass
par['fullNewtonFlag'] =False
par['timeIntOrder'] =tOrder
#par[''] =
#
return par
#end buildSimParams
def buildQuadrature(test,tpars,problems):
"""
setup numerical quadrature data structures
"""
quadrature = {}
gq = SimplexGaussQuadrature(problems['nd'][test])
gq.setOrder(tpars['quadratureOrder'])
for integral in OneLevelScalarTransport.integralKeys:
quadrature[integral] = gq
#end for
if tpars['stabilization'] is not None:
quadrature['stab'] = gq
if tpars['shockCapturing'] is not None:
quadrature['numDiff'] = gq
elementBoundaryQuadrature={}
ebgq = SimplexGaussQuadrature(problems['nd'][test]-1)
ebgq.setOrder(tpars['quadratureOrder'])
for elementBoundaryIntegral in OneLevelScalarTransport.elementBoundaryIntegralKeys:
elementBoundaryQuadrature[elementBoundaryIntegral] = ebgq
#end boundary quad integral
tpars['quadrature']= quadrature
tpars['elementBoundaryQuadrature']=elementBoundaryQuadrature
return tpars
#end build quadrature
def buildMultilevelMesh(test,tpars,problems):
mlMesh = []
nn = tpars['nn']
nLevels = tpars['nLevels']
if problems['nd'][test]==1:
mlMesh = MultiLevelEdgeMesh(nn,1,1,refinementLevels=nLevels)
elif problems['nd'][test]==2:
mlMesh = MultiLevelTriangularMesh(nn,nn,1,
refinementLevels=nLevels)
elif problems['nd'][test]==3:
mlMesh = MultiLevelTetrahedralMesh(nn,nn,nn,
refinementLevels=nLevels)
#end if on dim
return mlMesh
#end buildMultilevelMesh
def buildMultiLevelScalarTransport(test,tpars,problems,mlMesh):
"""
"""
tolList=[]
linTolList=[]
for l in range(tpars['nLevels']):
mlMesh.meshList[l].computeGeometricInfo()
tolList.append(tpars['tolFac']*(mlMesh.meshList[l].h**2))
linTolList.append(tpars['linTolFac']*(mlMesh.meshList[l].h**2))
#end l
atol = min(tolList)
lin_atol = min(linTolList)
if (tpars['computeEigenvalues'] or
tpars['linearSolverType'] == 'DenseLU'):
MatType = Mat
matType = 'dense'
else:
MatType = SparseMat
matType = 'csr'
#end if
mlScalarTransport = MultiLevelScalarTransport(
problems['nd'][test],
mlMesh,
tpars['FemSpace'],
tpars['FemSpace'],
matType,
problems['dirichletConditions'][test],
problems['coefficients'][test],
tpars['quadrature'],
tpars['elementBoundaryQuadrature'],
tpars['stabilization'],
tpars['shockCapturing'],
tpars['shockCapturingDiffusion'],
tpars['conservativeFlux'],
tpars['numericalFlux'],
tpars['timeIntegration'],
tpars['timeIntOrder'])
tpars['MatType'] =MatType
tpars['atol'] = atol
tpars['lin_atol']= lin_atol
tpars['tolList'] = tolList
tpars['linTolList']= linTolList
return mlScalarTransport,tpars
#end build mlScalarTransport
def buildSolvers(test,tpars,problems,mlScalarTransport,verbose=0):
"""
create linear and nonlinear solvers
"""
#how loud should nonlinear solver be
printNLinfo=False
if verbose > 3:
printNLinfo=True
levelLinearSolver = None
#force linearSolver to be SparseLU
if tpars['linearSolverType'] != 'SparseLU':
print 'WARNING setting linearSolverType to SparseLU'
print 'you need to check MatType to make sure SparseMat'
tpars['linearSolverType'] = 'SparseLU'
#end if
levelLinearSolverList=[]
for l in range(tpars['nLevels']):
levelLinearSolverList.append(
SparseLU(mlScalarTransport.jacobianList[l]))
#end l
levelLinearSolver = levelLinearSolverList
linearSolver = None
#do just plain Newton
linearSolver = levelLinearSolver
for l in range(tpars['nLevels']):
linearSolver[l].printInfo=False
#end l
directSolverFlag=True
#print "Setting up NonlinearSolver"
#for levelnonlinear solver to be Newton
if tpars['levelNonlinearSolverType'] != 'Newton':
print 'WARNING setting levelNonlinearSolverType to Newton'
tpars['levelNonlinearSolverType'] = 'Newton'
#end if
levelNonlinearSolverList=[]
for l in range(tpars['nLevels']):
levelNonlinearSolverList.append(
Newton(linearSolver=linearSolver[l],
F=mlScalarTransport.modelList[l],
J=mlScalarTransport.jacobianList[l],
rtol_r=tpars['tolList'][l],
atol_r=tpars['atol'],
maxIts=500,
convergenceTest = 'r',
printInfo=printNLinfo,
fullNewton=tpars['fullNewtonFlag'],
directSolver=directSolverFlag))
#end for l
#for nonlinear solver to be Newton
if tpars['nonlinearSolverType'] != 'Newton':
print 'WARNING setting nonlinearSolverType to Newton!'
tpars['nonlinearSolverType'] = 'Newton'
#end if
nonlinearSolver = levelNonlinearSolverList
return linearSolver,nonlinearSolver,levelLinearSolver
#end buildSolvers
def computeErrors(eSpace,eSpaceTime,eSpaceLast,
tn,mlScalarTransport,mlMesh,
test,pars,problems,verbose):
"""
go through and calculate errors on mesh hierarchy
"""
eCoarse=1.0
eFine=1.0
hCoarse=1.0
hFine=1.0
analyticalSolution = problems['analyticalSolution']
for m,jac,mesh in zip(mlScalarTransport.modelList,
mlScalarTransport.jacobianList,
mlMesh.meshList):
if analyticalSolution[test] is not None:
eCoarse=eFine
hCoarse=hFine
hFine = mesh.h
eFine = L2errorSFEMvsAF(analyticalSolution[test],
m.q['x'],
m.q['dx_m'],
m.q['u'],tn)
if eSpace.has_key(hFine):
eSpaceLast[hFine] = eSpace[hFine]
if eSpaceTime.has_key(hFine):
eSpaceTime[hFine] +=\
mlScalarTransport.DT*0.5*(eSpaceLast[hFine]**2 + eFine**2)
else:
eSpaceTime[hFine] =\
mlScalarTransport.DT*0.5*(eSpaceLast[hFine]**2 + eFine**2)
#end else on spaceTime
#end if on eSpace
eSpace[hFine] = eFine
#end analytical solution not none
#end for
if analyticalSolution[test] is not None:
hFine = 0
errors='||e||_{2}'
errorsSpaceTime=''
orders='|| ||e||_2 ||_2'
for mesh in mlMesh.meshList:
hCoarse=hFine
hFine = mesh.h
if hCoarse != 0:
if eSpace[hFine] != 0.0 and eSpace[hCoarse] != 0.0:
p = (log(eSpace[hFine]) - log(eSpace[hCoarse]))/(log(hFine) - log(hCoarse))
else:
p=0
else:
p = 0
#end if on hCoarse != 0
errors+="& %4.2e" % eSpace[hFine]
orders+="& %4.2e" % p
if eSpaceTime.has_key(hFine): #mwf added if
errorsSpaceTime+="& %4.2e" % sqrt(eSpaceTime[hFine])
#end for
print errors
print orders
print errorsSpaceTime
#end if analytical solution
return eSpace,eSpaceTime,eSpaceLast
def plotInitial(tn,test,tpars,problems,
mlMesh,mlScalarTransport):
"""
plot initial conditions and analytical solutions
"""
solPlot = None
aSolPlot= None
if tpars['DG'] == False:
solPlot = Gnuplot.Gnuplot()
solPlot("set terminal x11")
aSolPlot = Gnuplot.Gnuplot()
aSolPlot("set terminal x11")
if problems['nd'][test] == 1:
if problems['analyticalSolution'][test] is not None:
solPlot.title(test)
nap=101
dxap=Numeric.array([1.0/(nap - 1.0),0.0,0.0])
P = [(i*dxap) for i in range(nap)]
Px = [x[0] for x in P]
solPlot.plot(Gnuplot.Data(mlMesh.meshList[-1].nodeArray[:,0],
mlScalarTransport.modelList[-1].u.dof,
with='linespoints',
title='numerical solution'),
Gnuplot.Data(Px,
[problems['analyticalSolution'][test].uOfXT(x,tn) for x in P],
with='lines',
title='analytical solution'))
aSolPlot.plot(Gnuplot.Data(Px,
[problems['analyticalSolution'][test].uOfXT(x,tn) for x in P],
with='lines'))
#end if on analytical solution
elif problems['nd'][test]==2:
nx = (tpars['nn']-1)*(2**(tpars['nLevels']-1))+1
ny = nx
x = Numeric.arange(nx)/float(nx-1)
y = Numeric.arange(nx)/float(nx-1)
nSol = Numeric.reshape(mlScalarTransport.modelList[-1].u.dof,
(nx,ny))
solPlot('set parametric')
solPlot('set data style lines')
solPlot('set hidden')
solPlot('set contour base')
solPlot('set cntrparam levels incremental 0.1,0.1,1.0')
solPlot.xlabel('x')
solPlot.ylabel('y')
solPlot.splot(Gnuplot.GridData(nSol,
x,
y,
binary=0,
inline=0))
if problems['analyticalSolution'][test] is not None:
aSol = Numeric.zeros((nx,ny),Numeric.Float)
for i in range(nx):
for j in range(ny):
aSol[i,j]=problems['analyticalSolution'][test].uOfXT(Numeric.array([x[i],y[j],0.0]),tn)
aSolPlot('set parametric')
aSolPlot('set data style lines')
aSolPlot('set hidden')
aSolPlot('set contour base')
aSolPlot('set cntrparam levels incremental 0.1,0.1,1.0')
aSolPlot.xlabel('x')
aSolPlot.ylabel('y')
aSolPlot.splot(Gnuplot.GridData(aSol,
x,
y,
binary=0,
inline=0))
#end if on analytical solution
#end if on nd ==2
#end if on not DG
return solPlot,aSolPlot
def plotTimeStep(solPlot,aSolPlot,tn,test,tpars,problems,
mlMesh,mlScalarTransport,testOut):
"""
plot initial conditions and analytical solutions
"""
if solPlot is None or aSolPlot is None:
return solPlot,aSolPlot
#end nothing to plot with
if tpars['DG'] == False:
if problems['nd'][test] == 1:
if problems['analyticalSolution'][test] is not None:
solPlot.title(testOut)
nap=101
dxap=Numeric.array([1.0/(nap - 1.0),0.0,0.0])
P = [(i*dxap) for i in range(nap)]
Px = [x[0] for x in P]
solPlot.plot(Gnuplot.Data(mlMesh.meshList[-1].nodeArray[:,0],
mlScalarTransport.modelList[-1].u.dof,
with='linespoints',
title='numerical solution'),
Gnuplot.Data(Px,
[problems['analyticalSolution'][test].uOfXT(x,tn) for x in P],
with='lines',
title='analytical solution'))
else:
solPlot.title(testOut)
solPlot.plot(Gnuplot.Data(mlMesh.meshList[-1].nodeArray[:,0],
mlScalarTransport.modelList[-1].u.dof,
with='linespoints',
title='numerical solution'))
#end if on analytical solution
elif problems['nd'][test]==2:
nx = (tpars['nn']-1)*(2**(tpars['nLevels']-1))+1
ny = nx
x = Numeric.arange(nx)/float(nx-1)
y = Numeric.arange(nx)/float(nx-1)
nSol = Numeric.reshape(mlScalarTransport.modelList[-1].u.dof,
(nx,ny))
solPlot('set parametric')
solPlot('set data style lines')
solPlot('set hidden')
solPlot('set contour base')
solPlot('set cntrparam levels incremental 0.1,0.1,1.0')
solPlot.xlabel('x')
solPlot.ylabel('y')
solPlot.splot(Gnuplot.GridData(nSol,
x,
y,
binary=0,
inline=0))
if problems['analyticalSolution'][test] is not None:
aSol = Numeric.zeros((nx,ny),Numeric.Float)
for i in range(nx):
for j in range(ny):
aSol[i,j]=problems['analyticalSolution'][test].uOfXT(Numeric.array([x[i],y[j],0.0]),tn)
#end j
#end i
aSolPlot('set parametric')
aSolPlot('set data style lines')
aSolPlot('set hidden')
aSolPlot('set contour base')
aSolPlot('set cntrparam levels incremental 0.1,0.1,1.0')
aSolPlot.xlabel('x')
aSolPlot.ylabel('y')
aSolPlot.splot(Gnuplot.GridData(aSol,
x,
y,
binary=0,
inline=0))
#end if on analytical solution
#end if on nd ==2
#end if on not DG
return solPlot,aSolPlot
def plotFinal(solPlot,aSolPlot,tn,test,tpars,problems,
mlMesh,mlScalarTransport,testOut):
"""
plot out solution and mesh at last step in a couple of formats
"""
if tpars['DG'] == False:
solPlot.hardcopy(testOut+'_sol.eps', eps=1,enhanced=1,color=1)
aSolPlot.hardcopy(testOut+'_asol.eps', eps=1,enhanced=1,color=1)
#end if
mlMesh.meshList[-1].writeMeshEnsight(test,test)
mlScalarTransport.modelList[-1].u.name='u'
#mlScalarTransport.modelList[-1].writeBoundaryTermsEnsight(test)
mlScalarTransport.modelList[-1].u.writeFunctionEnsight(test,append=False)
return solPlot,aSolPlot
if __name__ == '__main__':
import sys
import numpy
from ScalarTransport import *
from LinearSolvers import *
from TimeIntegrationTools import *
verbose = 5
#testFlag = 0 # LinearAD_Dirac_IC
testFlag = 1 # rotating clone
problems = buildProblems(testFlag,verbose)
test = problems['testProblems'][0] #first test I hope
#pars = buildSimParams(test,BackwardEuler)
#pars = buildSimParams(test,ForwardEuler)
pars = buildSimParams(test,SSPRKintegration)
pars = buildQuadrature(test,pars,problems)
mlMesh = buildMultilevelMesh(test,pars,problems)
mlScalarTransport,pars = buildMultiLevelScalarTransport(test,pars,
problems,
mlMesh)
linearSolver,nonlinearSolver,levelLinearSolver = \
buildSolvers(test,pars,problems,mlScalarTransport,verbose=verbose)
#start time loop?
nstages= pars['timeIntOrder']
tn = 0.0
nSteps = 0
maxSteps= 1000
eSpace={}
eSpaceTime={}
eSpaceLast={}
mlScalarTransport.setInitialConditions(problems['initialConditions'][test],
tn)
#end ic set
solPlot,aSolPlot = plotInitial(tn,test,pars,problems,
mlMesh,mlScalarTransport)
mlScalarTransport.modelList[-1].timeIntegration.runCFL= pars['runCFL']
done = False
while not done:
mlScalarTransport.chooseDT()
dtMin = min(problems['T'][test]-tn,mlScalarTransport.DT)
mlScalarTransport.chooseDT(DTSET=dtMin)
if nSteps == 0:
mlScalarTransport.initializeTimeIntegration()
mlScalarTransport.initializeTimeIntegration()
#end if
tn += mlScalarTransport.DT
print 'taking step to t= ',tn
nSteps += 1
testOut = test + ('%4.4i' % nSteps)
#only Newton iteration for now
if pars['nonlinearSolverType'] != 'Newton':
print 'nonlinearSolverType must be Newton'
sys.exit(1)
#end if
#loop through stages
for s in range(nstages):
for l in range(pars['nLevels']):
mlScalarTransport.modelList[l].getResidual(u =
mlScalarTransport.uList[l],
r =
mlScalarTransport.rList[l])
nonlinearSolver[l].solve(u = mlScalarTransport.uList[l],
r = mlScalarTransport.rList[l])
#end l loop
mlScalarTransport.updateStage()
#end s loop
print 'max u on fine= ',max(mlScalarTransport.modelList[-1].u.dof.flat)
print 'min u on fine= ',min(mlScalarTransport.modelList[-1].u.dof.flat)
mlScalarTransport.modelList[-1].u.name = test
mlScalarTransport.modelList[-1].u.writeFunctionGnuplot(test,
append=False)
if pars['conservativeFlux'] == 'pwc':
mlScalarTransport.modelList[-1].getConservationFluxPWC()
elif pars['conservativeFlux'] == 'pwl':
mlScalarTransport.modelList[-1].getConservationFluxPWL()
elif pars['numericalFlux'] is not None:
mlScalarTransport.modelList[-1].e['conservationResidual'].flat[:]=0.0
for eN in range(mlScalarTransport.modelList[-1].mesh.nElements_global):
for i in range(mlScalarTransport.modelList[-1].nDOF_element):
mlScalarTransport.modelList[-1].e['conservationResidual'][eN]+=mlScalarTransport.modelList[-1].elementResidual[eN,i]
#end for eN
#print 'consRes=',mlScalarTransport.modelList[-1].e['conservationResidual']
print "Max mass cons error "+`max(abs(mlScalarTransport.modelList[-1].e['conservationResidual']))`
#end numerical flux is not None
mlScalarTransport.updateTimeHistory()
solPlot,aSolPlot = plotTimeStep(solPlot,aSolPlot,tn,test,pars,problems,
mlMesh,mlScalarTransport,testOut)
#compute error
eSpace,eSpaceTime,eSpaceLast = computeErrors(
eSpace,eSpaceTime,eSpaceLast,
tn,mlScalarTransport,mlMesh,test,pars,problems,verbose)
#figure out if done or not
done = (abs(tn - problems['T'][test]) < 1.0e-10
or nSteps >= maxSteps)
#end while
solPlot,aSolPlot = plotFinal(solPlot,aSolPlot,tn,test,pars,problems,
mlMesh,mlScalarTransport,testOut)
| mit |
fnordahl/nova | nova/virt/libvirt/volume/volume.py | 29 | 5066 | # Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
from oslo_config import cfg
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _LE
from nova.i18n import _LW
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in six.iteritems(specs):
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
| apache-2.0 |
Opticalp/instrumentall | testsuite/python/parameterGetterTest.py | 1 | 4696 | # -*- coding: utf-8 -*-
## @file testsuite/python/parameterGetterTest.py
## @date Jul 2016
## @author PhRG - opticalp.fr
##
## Test the features of the parameter getters
#
# Copyright (c) 2016 Ph. Renaud-Goud / Opticalp
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def myMain(baseDir):
"""Main function. Run the tests. """
print("Test the basic features of the parameter getters. ")
from instru import *
fac = Factory("DemoRootFactory")
print("Retrieved factory: " + fac.name)
print("Create module from leafParam factory")
modParam = fac.select("branch").select("leafParam").create("modParam")
print("module " + modParam.name + " created. ")
print("Retrieve the module parameters: ")
params = modParam.getParameterSet()
for param in params:
value = modParam.getParameterValue(param["name"])
if not isinstance(value, basestring):
value = str(value)
print ( " - " + param["name"] + ": " + param["descr"] +
" ; value = " + value )
print("Trying to set some parameter values: ")
print(" - set intParam to 666")
modParam.setParameterValue("intParam", 666)
print(" - set floatParam to 0")
modParam.setParameterValue("floatParam", 0)
print(" - set strParam to mojo")
modParam.setParameterValue("strParam", "mojo")
print("Build param getters using those parameters...")
getInt = modParam.buildParameterGetter("intParam")
getFloat = modParam.buildParameterGetter("floatParam")
getStr = modParam.buildParameterGetter("strParam")
print("Parameter names: ")
print(" - " + getInt.parameterName())
print(" - " + getFloat.parameterName())
print(" - " + getStr.parameterName())
print("Create Data loggers to print the output of the param getters")
loggerInt = DataLogger("DataPocoLogger")
loggerFloat = DataLogger("DataPocoLogger")
loggerStr = DataLogger("DataPocoLogger")
print("Test DataTarget cast (DataLogger)")
targetLogger = DataTarget(loggerInt)
print("target: " + targetLogger.name + " (" + targetLogger.description + ")")
print("Test DataSource cast (ParameterGetter)")
sourceInt = DataSource(getInt)
print("source: " + sourceInt.name + " (" + sourceInt.description + ")")
print("Bind the loggers to the param getters")
bind(DataSource(getInt), DataTarget(loggerInt))
bind(DataSource(getFloat), DataTarget(loggerFloat))
bind(DataSource(getStr), DataTarget(loggerStr))
print("Run the parent module of the getters")
runModule(modParam)
waitAll()
if (DataSource(getInt).getDataValue() != 666):
raise RuntimeError("wrong param getter int forwarded value")
if (abs(DataSource(getFloat).getDataValue()) > 0.1):
raise RuntimeError("wrong param getter float forwarded value")
if (DataSource(getStr).getDataValue() != "mojo"):
raise RuntimeError("wrong param getter str forwarded value")
print("Returned values OK")
print("The data attribute forwarding is not tested here...")
print("End of script parameterGetterTest.py")
# main body
import sys
import os
from os.path import dirname
if len(sys.argv) >= 1:
# probably called from InstrumentAll
checker = os.path.basename(sys.argv[0])
if checker == "instrumentall" or checker == "instrumentall.exe":
print("current script: ",os.path.realpath(__file__))
baseDir = dirname(dirname(__file__))
myMain(baseDir)
exit(0)
print("Presumably not called from InstrumentAll >> Exiting...")
exit("This script has to be launched from inside InstrumentAll")
| mit |
anirudhSK/chromium | build/android/pylib/utils/test_environment.py | 2 | 1407 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import psutil
import signal
from pylib import android_commands
def _KillWebServers():
for s in [signal.SIGTERM, signal.SIGINT, signal.SIGQUIT, signal.SIGKILL]:
signalled = []
for server in ['lighttpd', 'webpagereplay']:
for p in psutil.process_iter():
try:
if not server in ' '.join(p.cmdline):
continue
logging.info('Killing %s %s %s', s, server, p.pid)
p.send_signal(s)
signalled.append(p)
except Exception as e:
logging.warning('Failed killing %s %s %s', server, p.pid, e)
for p in signalled:
try:
p.wait(1)
except Exception as e:
logging.warning('Failed waiting for %s to die. %s', p.pid, e)
def CleanupLeftoverProcesses():
"""Clean up the test environment, restarting fresh adb and HTTP daemons."""
_KillWebServers()
did_restart_host_adb = False
for device in android_commands.GetAttachedDevices():
adb = android_commands.AndroidCommands(device)
# Make sure we restart the host adb server only once.
if not did_restart_host_adb:
adb.RestartAdbServer()
did_restart_host_adb = True
adb.RestartAdbdOnDevice()
adb.EnableAdbRoot()
adb.WaitForDevicePm()
| bsd-3-clause |
elainenaomi/sciwonc-dataflow-examples | sbbd2016/experiments/1-postgres/3_workflow_full_10files_primary_nosh_nors_annot_with_proj_3s/pegasus.bDkvI/pegasus-4.6.0/lib/python2.7/dist-packages/Pegasus/service/monitoring/queries.py | 1 | 57269 | # Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
import hashlib
from sqlalchemy.orm import aliased, defer
from sqlalchemy.orm.exc import NoResultFound
from Pegasus.db import connection
from Pegasus.db.modules import SQLAlchemyInit
from Pegasus.db.schema import *
from Pegasus.db.errors import StampedeDBNotFoundError
from Pegasus.db.admin.admin_loader import DBAdminError
from Pegasus.service import cache
from Pegasus.service.base import PagedResponse, BaseQueryParser, BaseOrderParser, InvalidQueryError, InvalidOrderError
from Pegasus.service.base import OrderedSet, OrderedDict
from Pegasus.service.monitoring.resources import RootWorkflowResource, RootWorkflowstateResource, CombinationResource
from Pegasus.service.monitoring.resources import WorkflowResource, WorkflowMetaResource, WorkflowstateResource
from Pegasus.service.monitoring.resources import RCLFNResource, RCPFNResource, RCMetaResource
from Pegasus.service.monitoring.resources import JobResource, HostResource, JobInstanceResource, JobstateResource
from Pegasus.service.monitoring.resources import TaskResource, TaskMetaResource, InvocationResource
from Pegasus.service.monitoring.utils import csv_to_json
log = logging.getLogger(__name__)
class WorkflowQueries(SQLAlchemyInit):
def __init__(self, connection_string, use_cache=True):
if connection_string is None:
raise ValueError('Connection string is required')
self._conn_string_csum = hashlib.md5(connection_string).hexdigest()
try:
SQLAlchemyInit.__init__(self, connection_string)
except (connection.ConnectionError, DBAdminError) as e:
log.exception(e)
raise StampedeDBNotFoundError
self._use_cache = True
self.use_cache = use_cache
@property
def use_cache(self):
return self._use_cache
@use_cache.setter
def use_cache(self, use_cache):
if isinstance(use_cache, bool):
self._use_cache = use_cache
else:
raise TypeError('Expecting boolean, found %s' % type(use_cache))
def _cache_key_from_query(self, q):
statement = q.with_labels().statement
compiled = statement.compile()
params = compiled.params
cache_key = ' '.join([self._conn_string_csum, str(compiled)] + [str(params[k]) for k in sorted(params)])
return hashlib.md5(cache_key).hexdigest()
def _get_count(self, q, use_cache=True, timeout=60):
cache_key = '%s.count' % self._cache_key_from_query(q)
if use_cache and cache.get(cache_key):
log.debug('Cache Hit: %s' % cache_key)
count = cache.get(cache_key)
else:
log.debug('Cache Miss: %s' % cache_key)
count = q.count()
t = timeout(count) if hasattr(timeout, '__call__') else timeout
cache.set(cache_key, count, t)
return count
def _get_all(self, q, use_cache=True, timeout=60):
cache_key = '%s.all' % self._cache_key_from_query(q)
if use_cache and cache.get(cache_key):
log.debug('Cache Hit: %s' % cache_key)
record = cache.get(cache_key)
else:
log.debug('Cache Miss: %s' % cache_key)
record = q.all()
t = timeout(record) if hasattr(timeout, '__call__') else timeout
cache.set(cache_key, record, t)
return record
def _get_one(self, q, use_cache=True, timeout=60):
cache_key = '%s.one' % self._cache_key_from_query(q)
if use_cache and cache.get(cache_key):
log.debug('Cache Hit: %s' % cache_key)
record = cache.get(cache_key)
else:
log.debug('Cache Miss: %s' % cache_key)
record = q.one()
t = timeout(record) if hasattr(timeout, '__call__') else timeout
cache.set(cache_key, record, t)
return record
@staticmethod
def _evaluate_query(q, query, resource):
if not query:
return q
comparator = {
'=': '__eq__',
'!=': '__ne__',
'<': '__lt__',
'<=': '__le__',
'>': '__gt__',
'>=': '__ge__',
'LIKE': 'like',
'IN': 'in_'
}
operators = {
'AND': and_,
'OR': or_
}
operands = []
def condition_expansion(expr, field):
operands.append(getattr(field, comparator[expr[1]])(expr[2]))
try:
expression = BaseQueryParser(query).evaluate()
for token in expression:
if isinstance(token, tuple):
if isinstance(token[2], tuple):
identifier = token[2][1]
token = (token[0], token[1], resource.get_mapped_field(token[2][1]))
identifier = token[0]
condition_expansion(token, resource.get_mapped_field(identifier))
elif isinstance(token, str) or isinstance(token, unicode):
operand_2 = operands.pop()
operand_1 = operands.pop()
if token in operators:
operands.append(operators[token](operand_1, operand_2))
q = q.filter(operands.pop())
except (KeyError, AttributeError):
log.exception('Invalid field %s' % identifier)
raise InvalidQueryError('Invalid field %s' % identifier)
except IndexError:
log.exception('Invalid expression %s' % query)
raise InvalidQueryError('Invalid expression %s' % query)
return q
@staticmethod
def _add_ordering(q, order, resource):
if not q or not order or not resource:
return q
order_parser = BaseOrderParser(order)
sort_order = order_parser.get_sort_order()
for identifier, sort_dir in sort_order:
try:
if isinstance(resource, CombinationResource):
field = resource.get_mapped_field(identifier)
else:
field = resource.get_mapped_field(identifier, ignore_prefix=True)
if sort_dir == 'ASC':
q = q.order_by(field)
else:
q = q.order_by(desc(field))
except (KeyError, AttributeError):
log.exception('Invalid field %r' % identifier)
raise InvalidOrderError('Invalid field %r' % identifier)
return q
@staticmethod
def _add_pagination(q, start_index=None, max_results=None, total_records=None):
"""
LIMIT <skip>, <count> - Valid
LIMIT <count> OFFSET <skip> - Valid
OFFSET <skip> - Invalid
If only start_index is provided and total_records is known then we can compute both limit and offset to
effectively support OFFSET <skip>
"""
if start_index and max_results:
q = q.offset(start_index)
q = q.limit(max_results)
elif not start_index and not max_results:
return q
else:
if max_results:
q = q.limit(max_results)
elif total_records:
q = q.offset(start_index)
q = q.limit(total_records)
return q
class MainWorkflowQueries(WorkflowQueries):
def get_root_workflows(self, start_index=None, max_results=None, query=None, order=None, use_cache=True, **kwargs):
"""
Returns a collection of the Root Workflow objects.
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Collection of tuples (DashboardWorkflow, DashboardWorkflowstate)
"""
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(DashboardWorkflow)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
log.debug('total_records 0')
return PagedResponse([], 0, 0)
#
# Finish Construction of Base SQLAlchemy Query `q`
#
qws = self._get_max_main_workflow_state()
qws = qws.subquery('main_workflowstate')
alias = aliased(DashboardWorkflowstate, qws)
q = q.outerjoin(qws, DashboardWorkflow.wf_id == qws.c.wf_id)
q = q.add_entity(alias)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
resource = CombinationResource(RootWorkflowResource(), RootWorkflowstateResource(alias))
q = self._evaluate_query(q, query, resource)
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, RootWorkflowResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
for i in range(len(records)):
new_record = records[i][0]
new_record.workflow_state = records[i][1]
records[i] = new_record
return PagedResponse(records, total_records, total_filtered)
def get_root_workflow(self, m_wf_id, use_cache=True):
"""
Returns a Root Workflow object identified by m_wf_id.
:param m_wf_id: m_wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param use_cache: If available, use cached results
:return: Root Workflow object
"""
q = self.session.query(DashboardWorkflow)
if m_wf_id is None:
raise ValueError('m_wf_id cannot be None')
m_wf_id = str(m_wf_id)
if m_wf_id.isdigit():
q = q.filter(DashboardWorkflow.wf_id == m_wf_id)
else:
q = q.filter(DashboardWorkflow.wf_uuid == m_wf_id)
#
# Finish Construction of Base SQLAlchemy Query `q`
#
qws = self._get_max_main_workflow_state(m_wf_id=m_wf_id)
qws = qws.subquery('main_workflowstate')
q = q.outerjoin(qws, DashboardWorkflow.wf_id == qws.c.wf_id)
q = q.add_entity(aliased(DashboardWorkflowstate, qws))
try:
record_tuple = self._get_one(q, use_cache)
record = record_tuple[0]
record.workflow_state = record_tuple[1]
return record
except NoResultFound as e:
log.exception('Not Found: Root Workflow for given m_wf_id (%s)' % m_wf_id)
raise e
def _get_max_main_workflow_state(self, m_wf_id=None, mws=DashboardWorkflowstate):
qmax = self._get_recent_main_workflow_state(m_wf_id, mws)
qmax = qmax.subquery('max_timestamp')
q = self.session.query(mws)
q = q.join(qmax, and_(mws.wf_id == qmax.c.wf_id,
mws.timestamp == qmax.c.max_time))
return q
def _get_recent_main_workflow_state(self, m_wf_id=None, mws=DashboardWorkflowstate):
q = self.session.query(mws.wf_id)
q = q.add_column(func.max(mws.timestamp).label('max_time'))
if m_wf_id:
log.debug('filter on m_wf_id')
q = q.filter(mws.wf_id == m_wf_id)
q = q.group_by(mws.wf_id)
return q
class StampedeWorkflowQueries(WorkflowQueries):
def wf_uuid_to_wf_id(self, wf_id):
if wf_id is None:
raise ValueError('wf_id cannot be None')
wf_id = str(wf_id)
if not wf_id.isdigit():
q = self.session.query(Workflow.wf_id)
q = q.filter(Workflow.wf_uuid == wf_id)
try:
q = self._get_one(q, True, timeout=600)
wf_id = q.wf_id
except NoResultFound, e:
raise e
return wf_id
# Workflow
def get_workflows(self, m_wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=False,
**kwargs):
"""
Returns a collection of the Workflow objects.
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Collection of Workflow objects
"""
m_wf_id = self.wf_uuid_to_wf_id(m_wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Workflow)
q = q.filter(Workflow.root_wf_id == m_wf_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, WorkflowResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, WorkflowResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def get_workflow(self, wf_id, use_cache=True):
"""
Returns a Workflow object identified by wf_id.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:return: Workflow object
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
q = self.session.query(Workflow)
q = q.filter(Workflow.wf_id == wf_id)
try:
return self._get_one(q, use_cache)
except NoResultFound, e:
raise e
# Workflow Meta
def get_workflow_meta(self, wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=False,
**kwargs):
"""
Returns a collection of the Workflowstate objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Workflow Meta collection, total records count, total filtered records count
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(WorkflowMeta)
q = q.filter(WorkflowMeta.wf_id == wf_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, WorkflowMetaResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, WorkflowMetaResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
# Workflow Files
def get_workflow_files(self, wf_id, start_index=None, max_results=None, query=None, order=None,
use_cache=False, **kwargs):
"""
Returns a collection of all files associated with the Workflow.
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Collection of Workflow Files
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(WorkflowFiles)
q = q.filter(WorkflowFiles.wf_id == wf_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
q_in = self.session.query(distinct(RCLFN.lfn_id).label('lfn_id'))
q_in = q_in.join(WorkflowFiles, WorkflowFiles.wf_id == wf_id)
q_in = q_in.outerjoin(RCPFN, RCLFN.lfn_id == RCPFN.lfn_id)
q_in = q_in.outerjoin(RCMeta, RCLFN.lfn_id == RCMeta.lfn_id)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q_in = self._evaluate_query(q_in, query,
CombinationResource(RCLFNResource(), RCPFNResource(), RCMetaResource()))
total_filtered = self._get_count(q_in, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to paginate.
#
q_in = WorkflowQueries._add_pagination(q_in, start_index, max_results, total_filtered)
#
# Finish Construction of Base SQLAlchemy Query `q`
#
q_in = q_in.subquery('distinct_lfns')
q = self.session.query(RCLFN, WorkflowFiles, RCPFN, RCMeta)
q = q.outerjoin(WorkflowFiles, RCLFN.lfn_id == WorkflowFiles.lfn_id)
q = q.outerjoin(RCPFN, RCLFN.lfn_id == RCPFN.lfn_id)
q = q.outerjoin(RCMeta, RCLFN.lfn_id == RCMeta.lfn_id)
q = q.join(q_in, RCLFN.lfn_id == q_in.c.lfn_id)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, CombinationResource(RCLFNResource(), RCPFNResource(), RCMetaResource()))
records = self._get_all(q, use_cache)
schema = OrderedDict([
(RCLFN, 'root'),
(WorkflowFiles, ('extras', RCLFN, None)),
(RCPFN, ('pfns', RCLFN, OrderedSet)),
(RCMeta, ('meta', RCLFN, OrderedSet))
])
index = OrderedDict([
(RCLFN, 0),
(WorkflowFiles, 1),
(RCPFN, 2),
(RCMeta, 3)
])
records = csv_to_json(records, schema, index)
return PagedResponse(records, total_records, total_filtered)
# Workflow State
def get_workflow_state(self, wf_id, recent=False, start_index=None, max_results=None, query=None, order=None,
use_cache=True, **kwargs):
"""
Returns a collection of the Workflowstate objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param recent: Get the most recent results
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Workflow States collection, total records count, total filtered records count
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
# Use shorter caching timeout
timeout = 5
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Workflowstate)
q = q.filter(Workflowstate.wf_id == wf_id)
total_records = total_filtered = self._get_count(q, use_cache, timeout=timeout)
if total_records == 0:
return PagedResponse([], 0, 0)
if recent:
qws = self._get_recent_workflow_state(wf_id)
qws = qws.subquery('max_ws')
q = q.join(qws, and_(Workflowstate.wf_id == qws.c.wf_id, Workflowstate.timestamp == qws.c.max_time))
#
# Construct SQLAlchemy Query `q` to filter.
#
if query or recent:
q = self._evaluate_query(q, query, WorkflowstateResource())
total_filtered = self._get_count(q, use_cache, timeout=timeout)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, WorkflowstateResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache, timeout=timeout)
return PagedResponse(records, total_records, total_filtered)
def _get_max_workflow_state(self, wf_id=None, ws=Workflowstate):
qmax = self._get_recent_workflow_state(wf_id, ws)
qmax = qmax.subquery('max_timestamp')
q = self.session.query(ws)
q = q.join(qmax, and_(ws.wf_id == qmax.c.wf_id, ws.timestamp == qmax.c.max_time))
return q
def _get_recent_workflow_state(self, wf_id=None, ws=Workflowstate):
q = self.session.query(ws.wf_id)
q = q.add_column(func.max(ws.timestamp).label('max_time'))
if wf_id:
log.debug('filter on wf_id')
q = q.filter(ws.wf_id == wf_id)
q = q.group_by(ws.wf_id)
return q
# Job
def get_workflow_jobs(self, wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=True,
**kwargs):
"""
Returns a collection of the Job objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Jobs collection, total jobs count, filtered jobs count
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Job)
q = q.filter(Job.wf_id == wf_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, JobResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, JobResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def get_job(self, job_id, use_cache=True):
"""
Returns a Job object identified by job_id.
:param job_id: ID of the job
:param use_cache: If available, use cached results
:return: job record
"""
if job_id is None:
raise ValueError('job_id cannot be None')
q = self.session.query(Job)
q = q.filter(Job.job_id == job_id)
try:
return self._get_one(q, use_cache, timeout=600)
except NoResultFound, e:
raise e
# Host
def get_workflow_hosts(self, wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=True,
**kwargs):
"""
Returns a collection of the Host objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: hosts collection, total jobs count, filtered jobs count
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Host)
q = q.filter(Host.wf_id == wf_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, HostResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, HostResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def get_host(self, host_id, use_cache=True):
"""
Returns a Host object identified by host_id.
:param host_id: Id of the host
:param use_cache: If available, use cached results
:return: host record
"""
if host_id is None or not str(host_id).isdigit():
raise ValueError('host_id cannot be None')
q = self.session.query(Host)
q = q.filter(Host.host_id == host_id)
try:
return self._get_one(q, use_cache)
except NoResultFound, e:
raise e
# Job State
def get_job_instance_states(self, wf_id, job_id, job_instance_id, recent=False, start_index=None, max_results=None,
query=None, order=None, use_cache=True, **kwargs):
"""
Returns a collection of the JobInstanceState objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param job_id: job_id associated with the job instance states
:param job_instance_id: job_instance_id associated with the job instance states
:param recent: Get the most recent results
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: state record
"""
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Jobstate)
q = q.join(JobInstance, JobInstance.job_instance_id == Jobstate.job_instance_id)
q = q.join(Job, Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id == wf_id)
q = q.filter(Job.job_id == job_id)
q = q.filter(JobInstance.job_instance_id == job_instance_id)
q = q.filter(Jobstate.job_instance_id == job_instance_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
if recent:
qjsss = self._get_recent_job_state(job_instance_id)
qjsss = qjsss.subquery('max_jsss')
q = q.join(qjsss, and_(Jobstate.job_instance_id == qjsss.c.job_instance_id,
Jobstate.jobstate_submit_seq == qjsss.c.max_jsss))
#
# Construct SQLAlchemy Query `q` to filter.
#
if query or recent:
q = self._evaluate_query(q, query, JobstateResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, JobstateResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def _get_recent_job_state(self, job_instance_id=None, js=Jobstate):
q = self.session.query(js.job_instance_id)
q = q.add_column(func.max(js.jobstate_submit_seq).label('max_jsss'))
if job_instance_id:
log.debug('filter on job_instance_id')
q = q.filter(js.job_instance_id == job_instance_id)
q = q.group_by(js.job_instance_id)
return q
# Task
def get_workflow_tasks(self, wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=True,
**kwargs):
"""
Returns a collection of the Task objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Collection of Task objects
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Task)
q = q.filter(Task.wf_id == wf_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, TaskResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, TaskResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def get_job_tasks(self, wf_id, job_id, start_index=None, max_results=None, query=None, order=None, use_cache=True,
**kwargs):
"""
Returns a collection of the Task objects.
:param job_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Collection of Task objects
"""
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Task)
q = q.filter(Task.wf_id == wf_id)
q = q.filter(Task.job_id == job_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, TaskResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, TaskResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def get_task(self, task_id, use_cache=True):
"""
Returns a Task object identified by task_id.
:param task_id: Id of the task
:param use_cache: If available, use cached results
:return: task record
"""
q = self.session.query(Task)
if task_id is None:
raise ValueError('task_id cannot be None')
q = q.filter(Task.task_id == task_id)
try:
return self._get_one(q, use_cache)
except NoResultFound, e:
raise e
# Task Meta
def get_task_meta(self, task_id, start_index=None, max_results=None, query=None, order=None, use_cache=False,
**kwargs):
"""
Returns a collection of the TaskMeta objects.
:param task_id: Id of the task
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Workflow Meta collection, total records count, total filtered records count
"""
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(TaskMeta)
q = q.filter(TaskMeta.task_id == task_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, TaskMetaResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, TaskMetaResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
# Job Instance
def get_job_instances(self, wf_id, job_id, recent=False, start_index=None, max_results=None, query=None, order=None,
use_cache=True, **kwargs):
"""
Returns a collection of the JobInstance objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param job_id: job_id associated with the job instances
:param recent: Get the most recent results
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: job-instance collection, total jobs count, filtered jobs count
"""
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(JobInstance)
q = q.join(Job, Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id == wf_id)
q = q.filter(Job.job_id == job_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
if recent:
qjss = self._get_recent_job_instance(job_id)
qjss = qjss.subquery('max_jss')
q = q.join(qjss, and_(JobInstance.job_id == qjss.c.job_id, JobInstance.job_submit_seq == qjss.c.max_jss))
#
# Construct SQLAlchemy Query `q` to filter.
#
if query or recent:
q = self._evaluate_query(q, query, JobInstanceResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, JobInstanceResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def get_job_instance(self, job_instance_id, use_cache=True, timeout=5):
"""
Returns a JobInstance object identified by job_instance_id.
:param job_instance_id: Id of the job instance
:param use_cache: If available, use cached results
:param timeout: Duration for which the job-instance should be cached, if exitcode is None i.e. Job is running
:return: job-instance record
"""
def timeout_duration(ji):
return 300 if ji and ji.exitcode is not None else timeout
if job_instance_id is None:
raise ValueError('job_instance_id cannot be None')
q = self.session.query(JobInstance)
q = q.filter(JobInstance.job_instance_id == job_instance_id)
try:
return self._get_one(q, use_cache, timeout=timeout_duration)
except NoResultFound, e:
raise e
def _get_recent_job_instance(self, job_id=None, ji=JobInstance):
q = self.session.query(ji.job_id)
q = q.add_column(func.max(ji.job_submit_seq).label('max_jss'))
if job_id:
log.debug('filter on job_id')
q = q.filter(ji.job_id == job_id)
q = q.group_by(ji.job_id)
return q
# Invocation
def get_workflow_invocations(self, wf_id, start_index=None, max_results=None, query=None, order=None,
use_cache=True, **kwargs):
"""
Returns a collection of the Invocation objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: invocations record
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Invocation)
q = q.filter(Invocation.wf_id == wf_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, InvocationResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, InvocationResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def get_job_instance_invocations(self, wf_id, job_id, job_instance_id, start_index=None, max_results=None,
query=None, order=None, use_cache=True, **kwargs):
"""
Returns a collection of the Invocation objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param job_id: Id of the job associated with the invocation
:param job_instance_id: Id of the job instance associated with the invocation
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: invocations record
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Invocation)
q = q.filter(Invocation.wf_id == wf_id)
q = q.filter(Invocation.job_instance_id == job_instance_id)
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, InvocationResource())
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, InvocationResource())
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
return PagedResponse(records, total_records, total_filtered)
def get_invocation(self, invocation_id, use_cache=True):
"""
Returns a Invocation object identified by invocation_id.
:param invocation_id: Id of the invocation
:param use_cache: If available, use cached results
:return: invocation record
"""
if invocation_id is None or not str(invocation_id).isdigit():
raise ValueError('invocation_id cannot be None')
q = self.session.query(Invocation)
q = q.filter(Invocation.invocation_id == invocation_id)
try:
return self._get_one(q, use_cache)
except NoResultFound, e:
raise e
# Views
def get_running_jobs(self, wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=True,
**kwargs):
"""
Returns a collection of the running Job objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Jobs collection, total jobs count, filtered jobs count
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Job, JobInstance).options(defer(JobInstance.stdout_text), defer(JobInstance.stderr_text))
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id == wf_id)
q = q.filter(JobInstance.exitcode == None)
# Recent
qjss = self._get_recent_job_instance()
qjss = qjss.filter(JobInstance.exitcode != None)
qjss = qjss.subquery('max_jss')
q = q.join(qjss, and_(JobInstance.job_id == qjss.c.job_id, JobInstance.job_submit_seq == qjss.c.max_jss))
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, CombinationResource(JobResource(), JobInstanceResource()))
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, CombinationResource(JobResource(), JobInstanceResource()))
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
records = self._merge_job_instance(records)
return PagedResponse(records, total_records, total_filtered)
def get_successful_jobs(self, wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=True,
**kwargs):
"""
Returns a collection of the successful Job objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Jobs collection, total jobs count, filtered jobs count
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Job, JobInstance).options(defer(JobInstance.stdout_text), defer(JobInstance.stderr_text))
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id == wf_id)
q = q.filter(JobInstance.exitcode != None).filter(JobInstance.exitcode == 0)
# Recent
qjss = self._get_recent_job_instance()
qjss = qjss.filter(JobInstance.exitcode != None).filter(JobInstance.exitcode == 0)
qjss = qjss.subquery('max_jss')
q = q.join(qjss, and_(JobInstance.job_id == qjss.c.job_id, JobInstance.job_submit_seq == qjss.c.max_jss))
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, CombinationResource(JobResource(), JobInstanceResource()))
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, CombinationResource(JobResource(), JobInstanceResource()))
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
records = self._merge_job_instance(records)
return PagedResponse(records, total_records, total_filtered)
def get_failed_jobs(self, wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=True,
**kwargs):
"""
Returns a collection of the failed Job objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Jobs collection, total jobs count, filtered jobs count
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Job, JobInstance).options(defer(JobInstance.stdout_text), defer(JobInstance.stderr_text))
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id == wf_id)
q = q.filter(JobInstance.exitcode != None).filter(JobInstance.exitcode != 0)
# Recent
qjss = self._get_recent_job_instance()
qjss = qjss.filter(JobInstance.exitcode != None).filter(JobInstance.exitcode != 0)
qjss = qjss.subquery('max_jss')
q = q.join(qjss, and_(JobInstance.job_id == qjss.c.job_id, JobInstance.job_submit_seq == qjss.c.max_jss))
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, CombinationResource(JobResource(), JobInstanceResource()))
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, CombinationResource(JobResource(), JobInstanceResource()))
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
records = self._merge_job_instance(records)
return PagedResponse(records, total_records, total_filtered)
def get_failing_jobs(self, wf_id, start_index=None, max_results=None, query=None, order=None, use_cache=True,
**kwargs):
"""
Returns a collection of the failing Job objects.
:param wf_id: wf_id is wf_id iff it consists only of digits, otherwise it is wf_uuid
:param start_index: Return results starting from record `start_index`
:param max_results: Return a maximum of `max_results` records
:param query: Filtering criteria
:param order: Sorting criteria
:param use_cache: If available, use cached results
:return: Jobs collection, total jobs count, filtered jobs count
"""
wf_id = self.wf_uuid_to_wf_id(wf_id)
#
# Construct SQLAlchemy Query `q` to count.
#
q = self.session.query(Job, JobInstance).options(defer(JobInstance.stdout_text), defer(JobInstance.stderr_text))
q = q.filter(Job.wf_id == wf_id)
q = q.filter(JobInstance.exitcode != None).filter(JobInstance.exitcode != 0)
q = q.filter(Job.job_id == JobInstance.job_id)
# Running
j = orm.aliased(Job, name='j')
ji = orm.aliased(JobInstance, name='ji')
qr = self.session.query(distinct(j.job_id))
qr = qr.filter(j.wf_id == wf_id)
qr = qr.filter(ji.exitcode == None)
qr = qr.filter(j.job_id == ji.job_id)
qr = qr.subquery()
q = q.filter(Job.job_id.in_(qr))
# Recent
qjss = self._get_recent_job_instance()
qjss = qjss.filter(Job.wf_id == wf_id)
qjss = qjss.filter(JobInstance.exitcode != None).filter(JobInstance.exitcode != 0)
qjss = qjss.filter(Job.job_id == JobInstance.job_id)
qjss = qjss.subquery('allmaxjss')
q = q.filter(and_(JobInstance.job_id == qjss.c.job_id, JobInstance.job_submit_seq == qjss.c.max_jss))
total_records = total_filtered = self._get_count(q, use_cache)
if total_records == 0:
return PagedResponse([], 0, 0)
#
# Construct SQLAlchemy Query `q` to filter.
#
if query:
q = self._evaluate_query(q, query, CombinationResource(JobResource(), JobInstanceResource()))
total_filtered = self._get_count(q, use_cache)
if total_filtered == 0 or (start_index and start_index >= total_filtered):
log.debug('total_filtered is 0 or start_index >= total_filtered')
return PagedResponse([], total_records, total_filtered)
#
# Construct SQLAlchemy Query `q` to sort
#
if order:
q = self._add_ordering(q, order, CombinationResource(JobResource(), JobInstanceResource()))
#
# Construct SQLAlchemy Query `q` to paginate.
#
q = WorkflowQueries._add_pagination(q, start_index, max_results, total_filtered)
records = self._get_all(q, use_cache)
records = self._merge_job_instance(records)
return PagedResponse(records, total_records, total_filtered)
@staticmethod
def _merge_job_instance(records):
if records:
for i in range(len(records)):
new_record = records[i][0]
new_record.job_instance = records[i][1]
records[i] = new_record
return records
| gpl-3.0 |
misisnik/ExternalInterface | ENV/Lib/encodings/iso2022_jp_ext.py | 816 | 1069 | #
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
jumpstarter-io/horizon | openstack_dashboard/dashboards/admin/volumes/volumes/views.py | 12 | 2478 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.volumes.volumes \
import forms as volumes_forms
from openstack_dashboard.dashboards.project.volumes.volumes \
import views as volumes_views
class DetailView(volumes_views.DetailView):
template_name = "admin/volumes/volumes/detail.html"
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
class CreateVolumeTypeView(forms.ModalFormView):
form_class = volumes_forms.CreateVolumeType
template_name = 'admin/volumes/volumes/create_volume_type.html'
success_url = 'horizon:admin:volumes:volumes_tab'
def get_success_url(self):
return reverse(self.success_url)
class UpdateStatusView(forms.ModalFormView):
form_class = volumes_forms.UpdateStatus
template_name = 'admin/volumes/volumes/update_status.html'
success_url = reverse_lazy('horizon:admin:volumes:index')
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context["volume_id"] = self.kwargs['volume_id']
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'status': volume.status}
| apache-2.0 |
simonmulser/bitcoin | test/functional/test_framework/bignum.py | 59 | 1914 | #!/usr/bin/env python3
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Big number routines.
This file is copied from python-bitcoinlib.
"""
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# bitcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
| mit |
sudosurootdev/external_chromium_org | third_party/protobuf/python/google/protobuf/internal/cpp_message.py | 223 | 23539 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
__author__ = 'petar@google.com (Petar Petrov)'
import copy_reg
import operator
from google.protobuf.internal import _net_proto2___python
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import message
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
def GetDescriptorPool():
"""Creates a new DescriptorPool C++ object."""
return _net_proto2___python.NewCDescriptorPool()
_pool = GetDescriptorPool()
def GetFieldDescriptor(full_field_name):
"""Searches for a field descriptor given a full field name."""
return _pool.FindFieldByName(full_field_name)
def BuildFile(content):
"""Registers a new proto file in the underlying C++ descriptor pool."""
_net_proto2___python.BuildFile(content)
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name)
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
def CompositeProperty(cdescriptor, message_type):
"""Returns a Python property the given composite field."""
def Getter(self):
sub_message = self._composite_fields.get(cdescriptor.name, None)
if sub_message is None:
cmessage = self._cmsg.NewSubMessage(cdescriptor)
sub_message = message_type._concrete_class(__cmessage=cmessage)
self._composite_fields[cdescriptor.name] = sub_message
return sub_message
return property(Getter)
class RepeatedScalarContainer(object):
"""Container for repeated scalar fields."""
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor):
self._message = msg
self._cmsg = msg._cmsg
self._cfield_descriptor = cfield_descriptor
def append(self, value):
self._cmsg.AddRepeatedScalar(
self._cfield_descriptor, value)
def extend(self, sequence):
for element in sequence:
self.append(element)
def insert(self, key, value):
values = self[slice(None, None, None)]
values.insert(key, value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def remove(self, value):
values = self[slice(None, None, None)]
values.remove(value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __setitem__(self, key, value):
values = self[slice(None, None, None)]
values[key] = value
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __getitem__(self, key):
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
def __len__(self):
return len(self[slice(None, None, None)])
def __eq__(self, other):
if self is other:
return True
if not operator.isSequenceType(other):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
return other == self[slice(None, None, None)]
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, *args, **kwargs):
# Maintain compatibility with the previous interface.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor,
sorted(self, *args, **kwargs))
def RepeatedScalarProperty(cdescriptor):
"""Returns a Python property the given repeated scalar field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedScalarContainer(self, cdescriptor)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class RepeatedCompositeContainer(object):
"""Container for repeated composite fields."""
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor, subclass):
self._message = msg
self._cmsg = msg._cmsg
self._subclass = subclass
self._cfield_descriptor = cfield_descriptor
def add(self, **kwargs):
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
for message in elem_seq:
self.add().MergeFrom(message)
def remove(self, value):
# TODO(protocol-devel): This is inefficient as it needs to generate a
# message pointer for each message only to do index(). Move this to a C++
# extension function.
self.__delitem__(self[slice(None, None, None)].index(value))
def MergeFrom(self, other):
for message in other[:]:
self.add().MergeFrom(message)
def __getitem__(self, key):
cmessages = self._cmsg.GetRepeatedMessage(
self._cfield_descriptor, key)
subclass = self._subclass
if not isinstance(cmessages, list):
return subclass(__cmessage=cmessages, __owner=self._message)
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(
self._cfield_descriptor, key)
def __len__(self):
return self._cmsg.FieldLength(self._cfield_descriptor)
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
messages = self[slice(None, None, None)]
other_messages = other[slice(None, None, None)]
return messages == other_messages
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, cmp=None, key=None, reverse=False, **kwargs):
# Maintain compatibility with the old interface.
if cmp is None and 'sort_function' in kwargs:
cmp = kwargs.pop('sort_function')
# The cmp function, if provided, is passed the results of the key function,
# so we only need to wrap one of them.
if key is None:
index_key = self.__getitem__
else:
index_key = lambda i: key(self[i])
# Sort the list of current indexes by the underlying object.
indexes = range(len(self))
indexes.sort(cmp=cmp, key=index_key, reverse=reverse)
# Apply the transposition.
for dest, src in enumerate(indexes):
if dest == src:
continue
self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src)
# Don't swap the same value twice.
indexes[src] = src
def RepeatedCompositeProperty(cdescriptor, message_type):
"""Returns a Python property for the given repeated composite field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedCompositeContainer(
self, cdescriptor, message_type._concrete_class)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class ExtensionDict(object):
"""Extension dictionary added to each protocol message."""
def __init__(self, msg):
self._message = msg
self._cmsg = msg._cmsg
self._values = {}
def __setitem__(self, extension, value):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_OPTIONAL or
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
raise TypeError('Extension %r is repeated and/or a composite type.' % (
extension.full_name,))
self._cmsg.SetScalar(cdescriptor, value)
self._values[extension] = value
def __getitem__(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
return self._cmsg.GetScalar(cdescriptor)
ext = self._values.get(extension, None)
if ext is not None:
return ext
ext = self._CreateNewHandle(extension)
self._values[extension] = ext
return ext
def ClearExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
if extension in self._values:
del self._values[extension]
def HasExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._message._extensions_by_name.get(name, None)
def _CreateNewHandle(self, extension):
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
cmessage = self._cmsg.NewSubMessage(cdescriptor)
return extension.message_type._concrete_class(__cmessage=cmessage)
if cdescriptor.label == _LABEL_REPEATED:
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
return RepeatedCompositeContainer(
self._message, cdescriptor, extension.message_type._concrete_class)
else:
return RepeatedScalarContainer(self._message, cdescriptor)
# This shouldn't happen!
assert False
return None
def NewMessage(bases, message_descriptor, dictionary):
"""Creates a new protocol message *class*."""
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
_AddEnumValues(message_descriptor, dictionary)
_AddDescriptors(message_descriptor, dictionary)
return bases
def InitMessage(message_descriptor, cls):
"""Constructs a new message instance (called before instance's __init__)."""
cls._extensions_by_name = {}
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
copy_reg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _AddDescriptors(message_descriptor, dictionary):
"""Sets up a new protocol message class dictionary.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__descriptors'] = {}
for field in message_descriptor.fields:
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS']
def _AddEnumValues(message_descriptor, dictionary):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
message_descriptor: Descriptor object for this message type.
dictionary: Class dictionary that should be populated.
"""
for enum_type in message_descriptor.enum_types:
dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type)
for enum_value in enum_type.values:
dictionary[enum_value.name] = enum_value.number
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
# Create and attach message field properties to the message class.
# This can be done just once per message class, since property setters and
# getters are passed the message instance.
# This makes message instantiation extremely fast, and at the same time it
# doesn't require the creation of property objects for each message instance,
# which saves a lot of memory.
for field in message_descriptor.fields:
field_cdescriptor = cls.__descriptors[field.name]
if field.label == _LABEL_REPEATED:
if field.cpp_type == _CPPTYPE_MESSAGE:
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
else:
value = RepeatedScalarProperty(field_cdescriptor)
elif field.cpp_type == _CPPTYPE_MESSAGE:
value = CompositeProperty(field_cdescriptor, field.message_type)
else:
value = ScalarProperty(field_cdescriptor)
setattr(cls, field.name, value)
# Attach a constant with the field number.
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
def Init(self, **kwargs):
"""Message constructor."""
cmessage = kwargs.pop('__cmessage', None)
if cmessage:
self._cmsg = cmessage
else:
self._cmsg = NewCMessage(message_descriptor.full_name)
# Keep a reference to the owner, as the owner keeps a reference to the
# underlying protocol buffer message.
owner = kwargs.pop('__owner', None)
if owner:
self._owner = owner
if message_descriptor.is_extendable:
self.Extensions = ExtensionDict(self)
else:
# Reference counting in the C++ code is broken and depends on
# the Extensions reference to keep this object alive during unit
# tests (see b/4856052). Remove this once b/4945904 is fixed.
self._HACK_REFCOUNTS = self
self._composite_fields = {}
for field_name, field_value in kwargs.iteritems():
field_cdescriptor = self.__descriptors.get(field_name, None)
if not field_cdescriptor:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field_cdescriptor.label == _LABEL_REPEATED:
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
field_name = getattr(self, field_name)
for val in field_value:
field_name.add().MergeFrom(val)
else:
getattr(self, field_name).extend(field_value)
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
getattr(self, field_name).MergeFrom(field_value)
else:
setattr(self, field_name, field_value)
Init.__module__ = None
Init.__doc__ = None
cls.__init__ = Init
def _IsMessageSetExtension(field):
"""Checks if a field is a message set extension."""
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _LABEL_OPTIONAL)
def _AddMessageMethods(message_descriptor, cls):
"""Adds the methods to a protocol message class."""
if message_descriptor.is_extendable:
def ClearExtension(self, extension):
self.Extensions.ClearExtension(extension)
def HasExtension(self, extension):
return self.Extensions.HasExtension(extension)
def HasField(self, field_name):
return self._cmsg.HasField(field_name)
def ClearField(self, field_name):
child_cmessage = None
if field_name in self._composite_fields:
child_field = self._composite_fields[field_name]
del self._composite_fields[field_name]
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
child_cmessage = child_field._cmsg
if child_cmessage is not None:
self._cmsg.ClearField(field_name, child_cmessage)
else:
self._cmsg.ClearField(field_name)
def Clear(self):
cmessages_to_release = []
for field_name, child_field in self._composite_fields.iteritems():
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
cmessages_to_release.append((child_cdescriptor, child_field._cmsg))
self._composite_fields.clear()
self._cmsg.Clear(cmessages_to_release)
def IsInitialized(self, errors=None):
if self._cmsg.IsInitialized():
return True
if errors is not None:
errors.extend(self.FindInitializationErrors());
return False
def SerializeToString(self):
if not self.IsInitialized():
raise message.EncodeError(
'Message %s is missing required fields: %s' % (
self._cmsg.full_name, ','.join(self.FindInitializationErrors())))
return self._cmsg.SerializeToString()
def SerializePartialToString(self):
return self._cmsg.SerializePartialToString()
def ParseFromString(self, serialized):
self.Clear()
self.MergeFromString(serialized)
def MergeFromString(self, serialized):
byte_size = self._cmsg.MergeFromString(serialized)
if byte_size < 0:
raise message.DecodeError('Unable to merge from string.')
return byte_size
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
self._cmsg.MergeFrom(msg._cmsg)
def CopyFrom(self, msg):
self._cmsg.CopyFrom(msg._cmsg)
def ByteSize(self):
return self._cmsg.ByteSize()
def SetInParent(self):
return self._cmsg.SetInParent()
def ListFields(self):
all_fields = []
field_list = self._cmsg.ListFields()
fields_by_name = cls.DESCRIPTOR.fields_by_name
for is_extension, field_name in field_list:
if is_extension:
extension = cls._extensions_by_name[field_name]
all_fields.append((extension, self.Extensions[extension]))
else:
field_descriptor = fields_by_name[field_name]
all_fields.append(
(field_descriptor, getattr(self, field_name)))
all_fields.sort(key=lambda item: item[0].number)
return all_fields
def FindInitializationErrors(self):
return self._cmsg.FindInitializationErrors()
def __str__(self):
return self._cmsg.DebugString()
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return self.ListFields() == other.ListFields()
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __unicode__(self):
# Lazy import to prevent circular import when text_format imports this file.
from google.protobuf import text_format
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
# Attach the local methods to the message class.
for key, value in locals().copy().iteritems():
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
# Static methods:
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
cls._extensions_by_name[extension_handle.full_name] = extension_handle
if _IsMessageSetExtension(extension_handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(string):
msg = cls()
msg.MergeFromString(string)
return msg
cls.FromString = staticmethod(FromString)
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
| bsd-3-clause |
CENDARI/editorsnotes | cendari/forms.py | 1 | 9303 | from django import forms
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
class ImportFromJigsawForm(forms.Form):
file = forms.FileField(label='Select a Jigsaw file')
topics = forms.CharField(max_length=100,required=False)
def _clean(self):
super(ImportFromJigsawForm, self).clean()
upload_to = 'jigsaw'
if not 'filename' in self.cleaned_data:
return self.cleaned_data
upload_to += self.cleaned_data['file'].name
print "Finishing cleaning jigsaw with path %s" % upload_to
from django.utils.translation import ugettext, ugettext_lazy as _
import django.contrib.auth.forms
from django.forms.models import (
BaseModelFormSet, ModelForm, modelformset_factory, ValidationError)
from editorsnotes.main.models import (
User, Project, ProjectInvitation, ProjectRole)
from editorsnotes.settings import AUTHENTICATION_BACKENDS
import cendari.utils
has_remote_auth = 'django.contrib.auth.backends.RemoteUserBackend' in AUTHENTICATION_BACKENDS
content_type = ContentType.objects.get_for_model(Project)
if not Permission.objects.filter(content_type=content_type, codename='view_project'):
obj=Permission.objects.create(content_type=content_type, codename='view_project')
obj.save()
_permission_add = Permission.objects.get(content_type=content_type, codename='add_project')
_permission_chg = Permission.objects.get(content_type=content_type, codename='change_project')
_permission_del = Permission.objects.get(content_type=content_type, codename='delete_project')
_permission_view = Permission.objects.get(content_type=content_type, codename='view_project')
def _add_project_perms(user):
if not user.has_perm('main.add_project'):
user.user_permissions.add(_permission_add)
if not user.has_perm('main.change_project'):
user.user_permissions.add(_permission_chg)
if not user.has_perm('main.delete_project'):
user.user_permissions.add(_permission_del)
if not user.has_perm('main.view_project'):
user.user_permissions.add(_permission_view)
def _remove_project_perms(user):
if user.has_perm('main.add_project'):
user.user_permissions.remove(_permission_add)
if user.has_perm('main.change_project'):
user.user_permissions.remove(_permission_chg)
if user.has_perm('main.delete_project'):
user.user_permissions.remove(_permission_del)
if user.has_perm('main.view_project'):
user.user_permissions.remove(_permission_view)
class UserCreationForm(ModelForm):
"""
Form for creating a user
"""
if not has_remote_auth:
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
is_project_creator = forms.BooleanField(required=False, label=_("Can create projects"), help_text="The user (not necessarily superuser) can create/edit projects and manage access privileges on his own projects", widget=forms.CheckboxInput)
class Meta:
model = User
fields = ("username","first_name","last_name","email","groups","is_superuser","is_staff","is_project_creator","is_active")
def save(self, commit=True):
user = super(UserCreationForm, self).save()
if not has_remote_auth:
user.set_password(self.cleaned_data["password1"])
if self.cleaned_data["is_project_creator"]:
user.is_staff = True
_add_project_perms(user)
user.save()
return user
class UserChangeForm(ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
if not has_remote_auth:
password = django.contrib.auth.forms.ReadOnlyPasswordHashField(
label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
is_project_creator = forms.BooleanField(
required=False,
label=_("Can create projects"),
help_text="The user (not necessarily superuser) can create/edit projects and manage access privileges on his own projects",
widget=forms.CheckboxInput)
class Meta:
model = User
if not has_remote_auth:
fields = ("username","password","first_name","last_name","email","groups","is_superuser","is_staff","is_project_creator","is_active")
else:
fields = ("username","first_name","last_name","email","groups","is_superuser","is_staff","is_project_creator","is_active")
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
self.fields['is_project_creator'].initial = self.instance.is_project_creator()
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
def save(self, commit=True):
user = super(UserChangeForm, self).save()
if self.cleaned_data["is_project_creator"]:
user.is_staff = True
_add_project_perms(user)
else:
#user.is_staff = False
_remove_project_perms(user)
user.save()
return user
class ProjectCreationForm(ModelForm):
editors = forms.MultipleChoiceField( widget=forms.CheckboxSelectMultiple,
choices=[])
class Meta:
model = Project
fields = ('name', 'slug', 'image', 'description', 'default_license','editors')
def __init__(self, *args, **kwargs):
user = kwargs['user']
del kwargs['user']
super(ProjectCreationForm, self).__init__(*args, **kwargs)
project = self.instance
owners = [user for role in project.roles.filter(role='Owner') for user in role.group.user_set.all()]
users = [(u.username, "{} {}".format(u.first_name,u.last_name) if u.last_name else u.username) for u in User.objects.all() if u not in owners]
self.fields["editors"].choices = [u for u in users if u not in owners]
current_members = [u.username for u in project.members if u not in owners]
self.fields["editors"].initial = current_members
def save(self, commit=True):
project = super(ProjectCreationForm, self).save()
previous_members = [u.username for u in project.members]
current_members = self.cleaned_data["editors"]
to_remove = [e for e in previous_members if e not in current_members]
to_add = [e for e in current_members if e not in previous_members]
role = project.roles.get(role='Editor')
for user in User.objects.filter(username__in=to_add):
role.users.add(user)
for user in User.objects.filter(username__in=to_remove):
role.users.remove(user)
return project
class ProjectChangeForm(ModelForm):
editors = forms.MultipleChoiceField( widget=forms.CheckboxSelectMultiple,
choices=[])
class Meta:
model = Project
fields = ('name', 'slug', 'image', 'description', 'default_license','editors')
def __init__(self, *args, **kwargs):
super(ProjectChangeForm, self).__init__(*args, **kwargs)
project = self.instance
owners = [user for role in project.roles.filter(role='Owner') for user in role.group.user_set.all()]
users = [(u.username, "{} {}".format(u.first_name,u.last_name) if u.last_name else u.username) for u in User.objects.all() if u not in owners]
self.fields["editors"].choices = [u for u in users if u not in owners]
current_members = [u.username for u in project.members if u not in owners]
self.fields["editors"].initial = current_members
def save(self, commit=True):
project = super(ProjectChangeForm, self).save(commit=False)
previous_members = [u.username for u in project.members]
current_members = self.cleaned_data["editors"]
to_remove = [e for e in previous_members if e not in current_members]
to_add = [e for e in current_members if e not in previous_members]
role = project.roles.get(role='Editor')
for user in User.objects.filter(username__in=to_add):
role.users.add(user)
for user in User.objects.filter(username__in=to_remove):
role.users.remove(user)
return project
| agpl-3.0 |
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/bricker_v2-2-1/ui/view_3d/debugging_tools.py | 1 | 5277 | # Copyright (C) 2020 Christopher Gearhart
# chris@bblanimation.com
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
# NONE!
# Blender imports
from addon_utils import check, paths, enable
import bpy
from bpy.types import Panel
from bpy.props import *
# Module imports
from ..panel_info import *
from ...lib.caches import cache_exists
from ...functions import *
class VIEW3D_PT_bricker_debugging_tools(BrickerPanel, Panel):
bl_label = "Debugging Tools"
bl_idname = "VIEW3D_PT_bricker_debugging_tools"
bl_options = {"DEFAULT_CLOSED"}
@classmethod
def poll(self, context):
if not settings_can_be_drawn():
return False
prefs = get_addon_preferences()
return prefs.show_debugging_tools
def draw(self, context):
layout = self.layout
scn, cm, n = get_active_context_info()
col = layout.column(align=True)
row = col.row(align=True)
row.operator("bricker.clear_cache", text="Clear Cache", icon="CON_TRANSFORM_CACHE")
source_name = cm.source_obj.name if cm.source_obj else ""
layout.operator("bricker.generate_brick", icon="MOD_BUILD")
layout.operator("bricker.debug_toggle_view_source", icon="RESTRICT_VIEW_OFF" if source_name in scn.objects else "RESTRICT_VIEW_ON")
class VIEW3D_PT_bricker_matrix_details(BrickerPanel, Panel):
""" Display Matrix details for specified brick location """
bl_label = "Brick Details"
bl_idname = "VIEW3D_PT_bricker_matrix_details"
bl_parent_id = "VIEW3D_PT_bricker_debugging_tools"
bl_options = {"DEFAULT_CLOSED"}
@classmethod
def poll(self, context):
if not settings_can_be_drawn():
return False
scn, cm, _ = get_active_context_info()
# if created_with_unsupported_version(cm):
# return False
if not (cm.model_created or cm.animated):
return False
return True
def draw(self, context):
layout = self.layout
scn, cm, _ = get_active_context_info()
if matrix_really_is_dirty(cm):
layout.label(text="Matrix is dirty!")
return
if not cache_exists(cm):
layout.label(text="Matrix not cached!")
return
col1 = layout.column(align=True)
row = col1.row(align=True)
row.prop(cm, "active_key", text="")
if cm.animated:
bricksdict = get_bricksdict(cm, d_type="ANIM", cur_frame=get_anim_adjusted_frame(scn.frame_current, cm.last_start_frame, cm.last_stop_frame, cm.last_step_frame))
elif cm.model_created:
bricksdict = get_bricksdict(cm)
if bricksdict is None:
layout.label(text="Matrix not available")
return
try:
dkey = list_to_str(tuple(cm.active_key))
brick_d = bricksdict[dkey]
except Exception as e:
layout.label(text="No brick details available")
if len(bricksdict) == 0:
print("[Bricker] Skipped drawing Brick Details")
elif str(e)[1:-1] == dkey:
pass
# print("[Bricker] Key '" + str(dkey) + "' not found")
elif dkey is None:
print("[Bricker] Key not set (entered else)")
else:
print("[Bricker] Error fetching brick_d:", e)
return
col1 = layout.column(align=True)
split = layout_split(col1, factor=0.35)
# hard code keys so that they are in the order I want
keys = [
"name",
"val",
"draw",
"co",
"omitted",
"near_face",
"near_intersection",
"near_normal",
"mat_name",
"custom_mat_name",
"rgba",
"parent",
"size",
"attempted_merge",
"top_exposed",
"bot_exposed",
"type",
"flipped",
"rotated",
"created_from",
]
# draw keys
col = split.column(align=True)
col.scale_y = 0.65
row = col.row(align=True)
row.label(text="key:")
for key in keys:
row = col.row(align=True)
row.label(text=key + ":")
# draw values
col = split.column(align=True)
col.scale_y = 0.65
row = col.row(align=True)
row.label(text=dkey)
for key in keys:
try:
row = col.row(align=True)
row.label(text=str(brick_d[key]))
except KeyError:
continue
| gpl-3.0 |
TeachAtTUM/edx-platform | openedx/core/lib/course_tabs.py | 17 | 1412 | """
Tabs for courseware.
"""
from openedx.core.lib.plugins import PluginManager
# Stevedore extension point namespaces
COURSE_TAB_NAMESPACE = 'openedx.course_tab'
class CourseTabPluginManager(PluginManager):
"""
Manager for all of the course tabs that have been made available.
All course tabs should implement `CourseTab`.
"""
NAMESPACE = COURSE_TAB_NAMESPACE
@classmethod
def get_tab_types(cls):
"""
Returns the list of available course tabs in their canonical order.
"""
def compare_tabs(first_type, second_type):
"""Compares two course tabs, for use in sorting."""
first_priority = first_type.priority
second_priority = second_type.priority
if first_priority != second_priority:
if first_priority is None:
return 1
elif second_priority is None:
return -1
else:
return first_priority - second_priority
first_type = first_type.type
second_type = second_type.type
if first_type < second_type:
return -1
elif first_type == second_type:
return 0
else:
return 1
tab_types = cls.get_available_plugins().values()
tab_types.sort(cmp=compare_tabs)
return tab_types
| agpl-3.0 |
Deepakkothandan/ansible | test/legacy/cleanup_gce.py | 83 | 2720 | '''
Find and delete GCE resources matching the provided --match string. Unless
--yes|-y is provided, the prompt for confirmation prior to deleting resources.
Please use caution, you can easily delete your *ENTIRE* GCE infrastructure.
'''
import optparse
import os
import re
import sys
import yaml
try:
from libcloud.common.google import (
GoogleBaseError,
QuotaExceededError,
ResourceExistsError,
ResourceInUseError,
ResourceNotFoundError,
)
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
_ = Provider.GCE
except ImportError:
print("failed=True msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
import gce_credentials
from ansible.module_utils.six.moves import input
def delete_gce_resources(get_func, attr, opts):
for item in get_func():
val = getattr(item, attr)
if re.search(opts.match_re, val, re.IGNORECASE):
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = input(prompt).lower() == 'y'
assert hasattr(item, 'destroy'), "Class <%s> has no delete attribute" % item.__class__
if assumeyes:
item.destroy()
print("Deleted %s" % item)
def parse_args():
parser = optparse.OptionParser(
usage="%s [options]" % sys.argv[0],
description=__doc__
)
gce_credentials.add_credentials_options(parser)
parser.add_option(
"--yes", "-y",
action="store_true", dest="assumeyes",
default=False,
help="Don't prompt for confirmation"
)
parser.add_option(
"--match",
action="store", dest="match_re",
default="^ansible-testing-",
help="Regular expression used to find GCE resources (default: %default)"
)
(opts, args) = parser.parse_args()
gce_credentials.check_required(opts, parser)
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
# Connect to GCE
gce = gce_credentials.get_gce_driver(opts)
try:
# Delete matching instances
delete_gce_resources(gce.list_nodes, 'name', opts)
# Delete matching snapshots
def get_snapshots():
for volume in gce.list_volumes():
for snapshot in gce.list_volume_snapshots(volume):
yield snapshot
delete_gce_resources(get_snapshots, 'name', opts)
# Delete matching disks
delete_gce_resources(gce.list_volumes, 'name', opts)
except KeyboardInterrupt as e:
print("\nExiting on user command.")
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pywinauto/win32functions.py | 2 | 9589 | # GUI Application automation and testing library
# Copyright (C) 2015 Intel Corporation
# Copyright (C) 2010 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Defines Windows(tm) functions"
from __future__ import absolute_import
__revision__ = "$Revision$"
import ctypes
from ctypes import *
import sys
if sys.platform == "cygwin":
windll = cdll
HRESULT = c_long
UINT = c_uint
SHORT = c_short
CreateBrushIndirect = ctypes.windll.gdi32.CreateBrushIndirect
CreateDC = ctypes.windll.gdi32.CreateDCW
CreateFontIndirect = ctypes.windll.gdi32.CreateFontIndirectW
CreatePen = ctypes.windll.gdi32.CreatePen
DeleteDC = ctypes.windll.gdi32.DeleteDC
GetObject = ctypes.windll.gdi32.GetObjectW
DeleteObject = ctypes.windll.gdi32.DeleteObject
DrawText = ctypes.windll.user32.DrawTextW
TextOut = ctypes.windll.gdi32.TextOutW
Rectangle = ctypes.windll.gdi32.Rectangle
SelectObject = ctypes.windll.gdi32.SelectObject
GetStockObject = ctypes.windll.gdi32.GetStockObject
GetSystemMetrics = ctypes.windll.user32.GetSystemMetrics
GetSystemMetrics.restype = ctypes.c_int
GetSystemMetrics.argtypes = (ctypes.c_int, )
GetTextMetrics = ctypes.windll.gdi32.GetTextMetricsW
EnumChildWindows = ctypes.windll.user32.EnumChildWindows
EnumDesktopWindows = ctypes.windll.user32.EnumDesktopWindows
EnumWindows = ctypes.windll.user32.EnumWindows
GetDC = ctypes.windll.user32.GetDC
GetDesktopWindow = ctypes.windll.user32.GetDesktopWindow
SendInput = ctypes.windll.user32.SendInput
SetCursorPos = ctypes.windll.user32.SetCursorPos
GetCursorPos = ctypes.windll.user32.GetCursorPos
GetCaretPos = ctypes.windll.user32.GetCaretPos
# menu functions
DrawMenuBar = ctypes.windll.user32.DrawMenuBar
GetMenu = ctypes.windll.user32.GetMenu
GetMenuBarInfo = ctypes.windll.user32.GetMenuBarInfo
GetMenuInfo = ctypes.windll.user32.GetMenuInfo
GetMenuItemCount = ctypes.windll.user32.GetMenuItemCount
GetMenuItemInfo = ctypes.windll.user32.GetMenuItemInfoW
SetMenuItemInfo = ctypes.windll.user32.SetMenuItemInfoW
GetMenuItemRect = ctypes.windll.user32.GetMenuItemRect
CheckMenuItem = ctypes.windll.user32.CheckMenuItem
GetMenuState = ctypes.windll.user32.GetMenuState
GetSubMenu = ctypes.windll.user32.GetSubMenu
GetSystemMenu = ctypes.windll.user32.GetSystemMenu
HiliteMenuItem = ctypes.windll.user32.HiliteMenuItem
IsMenu = ctypes.windll.user32.IsMenu
MenuItemFromPoint = ctypes.windll.user32.MenuItemFromPoint
BringWindowToTop = ctypes.windll.user32.BringWindowToTop
GetVersion = ctypes.windll.kernel32.GetVersion
GetParent = ctypes.windll.user32.GetParent
GetWindow = ctypes.windll.user32.GetWindow
ShowWindow = ctypes.windll.user32.ShowWindow
GetWindowContextHelpId = ctypes.windll.user32.GetWindowContextHelpId
GetWindowLong = ctypes.windll.user32.GetWindowLongW
GetWindowPlacement = ctypes.windll.user32.GetWindowPlacement
GetWindowRect = ctypes.windll.user32.GetWindowRect
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
GetClassName = ctypes.windll.user32.GetClassNameW
GetClientRect = ctypes.windll.user32.GetClientRect
IsChild = ctypes.windll.user32.IsChild
IsWindow = ctypes.windll.user32.IsWindow
IsWindowUnicode = ctypes.windll.user32.IsWindowUnicode
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
IsWindowEnabled = ctypes.windll.user32.IsWindowEnabled
ClientToScreen = ctypes.windll.user32.ClientToScreen
ScreenToClient = ctypes.windll.user32.ScreenToClient
GetCurrentThreadId = ctypes.windll.Kernel32.GetCurrentThreadId
GetWindowThreadProcessId = ctypes.windll.user32.GetWindowThreadProcessId
GetGUIThreadInfo = ctypes.windll.user32.GetGUIThreadInfo
AttachThreadInput = ctypes.windll.user32.AttachThreadInput
#GetWindowThreadProcessId = ctypes.windll.user32.GetWindowThreadProcessId
GetLastError = ctypes.windll.kernel32.GetLastError
OpenProcess = ctypes.windll.kernel32.OpenProcess
CloseHandle = ctypes.windll.kernel32.CloseHandle
CreateProcess = ctypes.windll.kernel32.CreateProcessW
TerminateProcess = ctypes.windll.kernel32.TerminateProcess
ExitProcess = ctypes.windll.kernel32.ExitProcess
ReadProcessMemory = ctypes.windll.kernel32.ReadProcessMemory
GlobalAlloc = ctypes.windll.kernel32.GlobalAlloc
GlobalLock = ctypes.windll.kernel32.GlobalLock
GlobalUnlock = ctypes.windll.kernel32.GlobalUnlock
SendMessage = ctypes.windll.user32.SendMessageW
SendMessageTimeout = ctypes.windll.user32.SendMessageTimeoutW
SendMessageA = ctypes.windll.user32.SendMessageA
PostMessage = ctypes.windll.user32.PostMessageW
GetMessage = ctypes.windll.user32.GetMessageW
MoveWindow = ctypes.windll.user32.MoveWindow
EnableWindow = ctypes.windll.user32.EnableWindow
SetActiveWindow = ctypes.windll.user32.SetActiveWindow
GetFocus = ctypes.windll.user32.GetFocus
SetFocus = ctypes.windll.user32.SetFocus
SetForegroundWindow = ctypes.windll.user32.SetForegroundWindow
GetForegroundWindow = ctypes.windll.user32.GetForegroundWindow
SetWindowLong = ctypes.windll.user32.SetWindowLongW
SystemParametersInfo = ctypes.windll.user32.SystemParametersInfoW
VirtualAllocEx = ctypes.windll.kernel32.VirtualAllocEx
VirtualAllocEx.restype = ctypes.c_void_p
VirtualFreeEx = ctypes.windll.kernel32.VirtualFreeEx
DebugBreakProcess = ctypes.windll.kernel32.DebugBreakProcess
VirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
VirtualFree = ctypes.windll.kernel32.VirtualFree
WriteProcessMemory = ctypes.windll.kernel32.WriteProcessMemory
GetActiveWindow = ctypes.windll.user32.GetActiveWindow
GetLastActivePopup = ctypes.windll.user32.GetLastActivePopup
FindWindow = ctypes.windll.user32.FindWindowW
GetTopWindow = ctypes.windll.user32.GetTopWindow
SetCapture = ctypes.windll.user32.SetCapture
ReleaseCapture = ctypes.windll.user32.ReleaseCapture
ShowOwnedPopups = ctypes.windll.user32.ShowOwnedPopups
WindowFromPoint = ctypes.windll.user32.WindowFromPoint
WideCharToMultiByte = ctypes.windll.kernel32.WideCharToMultiByte
GetACP = ctypes.windll.kernel32.GetACP
WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
WaitForInputIdle = ctypes.windll.user32.WaitForInputIdle
GetModuleFileNameEx = ctypes.windll.psapi.GetModuleFileNameExW
GetClipboardData = ctypes.windll.user32.GetClipboardData
OpenClipboard = ctypes.windll.user32.OpenClipboard
EmptyClipboard = ctypes.windll.user32.EmptyClipboard
CloseClipboard = ctypes.windll.user32.CloseClipboard
CountClipboardFormats = ctypes.windll.user32.CountClipboardFormats
EnumClipboardFormats = ctypes.windll.user32.EnumClipboardFormats
GetClipboardFormatName = ctypes.windll.user32.GetClipboardFormatNameW
GetQueueStatus = ctypes.windll.user32.GetQueueStatus
LoadString = ctypes.windll.user32.LoadStringW
#def VkKeyScanW(p1):
# # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4225
# return VkKeyScanW._api_(p1)
#VkKeyScan = stdcall(SHORT, 'user32', [c_wchar]) (VkKeyScanW)
#
#def MapVirtualKeyExW(p1, p2, p3):
# # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4376
# return MapVirtualKeyExW._api_(p1, p2, p3)
#MapVirtualKeyEx = stdcall(
# UINT, 'user32', [c_uint, c_uint, c_long]) (MapVirtualKeyExW)
#
#def MapVirtualKeyW(p1, p2):
# # C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4355
# return MapVirtualKeyW._api_(p1, p2)
#MapVirtualKey = stdcall(UINT, 'user32', [c_uint, c_uint]) (MapVirtualKeyW)
#====================================================================
def MakeLong(high, low):
"Pack high into the high word of a long and low into the low word"
# we need to AND each value with 0xFFFF to account for numbers
# greater then normal WORD (short) size
return ((high & 0xFFFF) << 16) | (low & 0xFFFF)
#====================================================================
def HiWord(value):
"Return the high word from a long"
#return (value & (~ 0xFFFF)) / 0xFFFF
return (value >> 16) & 0xffff
#====================================================================
def LoWord(value):
"Return the low word from a long"
return value & 0xFFFF
#====================================================================
def WaitGuiThreadIdle(handle, timeout = 1):
"Wait until the thread of the specified handle is ready"
from . import win32defines
process_id = ctypes.c_int()
GetWindowThreadProcessId(handle, ctypes.byref(process_id))
# ask the control if it has finished processing the message
hprocess = OpenProcess(
win32defines.PROCESS_QUERY_INFORMATION,
0,
process_id.value)
# wait timout number of seconds
ret = WaitForInputIdle(hprocess, timeout * 1000)
CloseHandle(hprocess)
return ret
| gpl-3.0 |
sureshthalamati/spark | examples/src/main/python/ml/multilayer_perceptron_classification.py | 123 | 2172 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.classification import MultilayerPerceptronClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder.appName("multilayer_perceptron_classification_example").getOrCreate()
# $example on$
# Load training data
data = spark.read.format("libsvm")\
.load("data/mllib/sample_multiclass_classification_data.txt")
# Split the data into train and test
splits = data.randomSplit([0.6, 0.4], 1234)
train = splits[0]
test = splits[1]
# specify layers for the neural network:
# input layer of size 4 (features), two intermediate of size 5 and 4
# and output of size 3 (classes)
layers = [4, 5, 4, 3]
# create the trainer and set its parameters
trainer = MultilayerPerceptronClassifier(maxIter=100, layers=layers, blockSize=128, seed=1234)
# train the model
model = trainer.fit(train)
# compute accuracy on the test set
result = model.transform(test)
predictionAndLabels = result.select("prediction", "label")
evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
print("Test set accuracy = " + str(evaluator.evaluate(predictionAndLabels)))
# $example off$
spark.stop()
| apache-2.0 |
double-y/django | tests/template_tests/filter_tests/test_iriencode.py | 388 | 1603 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
| bsd-3-clause |
xy515258/moose | framework/scripts/mooseExamplesLateX/gen_tex.py | 35 | 2444 | #!/usr/bin/env python
import os, re
from subprocess import Popen
moose_examples_dir = "../../../moose_examples"
preamble_file = 'preamble.tex'
line_template = '\\lstinputlisting[style=<STYLE>, caption=<CAPTION>]{<PATH>}\n\clearpage\n\n'
def genPreamble(out_file):
in_file = open(preamble_file, 'r')
out_file.write(in_file.read())
def genFileList(out_file):
for name in os.listdir(moose_examples_dir):
if os.path.isdir(moose_examples_dir + '/' + name):
readOrTraverseDirectory(out_file, moose_examples_dir, name)
def readOrTraverseDirectory(out_file, dirpath, dir):
# First make sure that this is an example directory (i.e. of the form exXX where XX are digits)
m = re.search(r'ex0?(\d+)', dir)
if not m:
return
# get the example number
example_number = m.group(1)
# see if there is a file list
curr_path = dirpath + '/' + dir
if os.path.isfile(curr_path + '/training_list.txt'):
f = open(curr_path + '/training_list.txt')
for line in f.readlines():
line = line.strip()
if os.path.isfile(curr_path + '/' + line):
writeTex(out_file, curr_path + '/' + line, example_number)
elif line <> '': # ignore blank lines
print 'Warning: File ' + curr_path + '/' + line + ' does not exist\n'
# file list doesn't exist so recurse and pick up the common files
else:
for dirpath, dirnames, filenames in os.walk(curr_path):
for file in filenames:
suffix = os.path.splitext(file)
if suffix[-1] == '.C' or suffix[-1] == '.h' or suffix[-1] == '.i':
writeTex(out_file, dirpath + '/' + file, example_number)
def writeTex(out_file, file_name, example_number):
file_only = file_name.split("/")[-1].replace('_', r'\_')
suffix = os.path.splitext(file_name)
#First substitute in the file name
text = line_template.replace('<PATH>', file_name)
#Now substitute in the caption
text = text.replace('<CAPTION>', 'Example ' + example_number + ': ' + file_only)
if (suffix[-1] == '.h' or suffix[-1] == '.C'):
style = 'C++'
elif (suffix[-1] == '.i'):
style = 'ini'
#Now substitute in the sytle
text = text.replace('<STYLE>', style)
out_file.write(text)
def genPostscript(out_file):
out_file.write('\n\\end{document}')
if __name__ == '__main__':
tex_file = open('examples.tex', 'w')
genPreamble(tex_file)
genFileList(tex_file)
genPostscript(tex_file)
p = Popen('pdflatex examples.tex', shell=True)
| lgpl-2.1 |
tlevine/be | libbe/command/comment.py | 5 | 7305 | # Copyright (C) 2005-2012 Aaron Bentley <abentley@panoramicfeedback.com>
# Chris Ball <cjb@laptop.org>
# Gianluca Montecchi <gian@grys.it>
# Niall Douglas (s_sourceforge@nedprod.com) <spam@spamtrap.com>
# Robert Lehmann <mail@robertlehmann.de>
# W. Trevor King <wking@tremily.us>
#
# This file is part of Bugs Everywhere.
#
# Bugs Everywhere is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# Bugs Everywhere is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Bugs Everywhere. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import libbe
import libbe.command
import libbe.command.util
import libbe.comment
import libbe.ui.util.editor
import libbe.util.id
class Comment (libbe.command.Command):
"""Add a comment to a bug
>>> import time
>>> import libbe.bugdir
>>> import libbe.util.id
>>> bd = libbe.bugdir.SimpleBugDir(memory=False)
>>> io = libbe.command.StringInputOutput()
>>> io.stdout = sys.stdout
>>> ui = libbe.command.UserInterface(io=io)
>>> ui.storage_callbacks.set_storage(bd.storage)
>>> cmd = Comment(ui=ui)
>>> uuid_gen = libbe.util.id.uuid_gen
>>> libbe.util.id.uuid_gen = lambda: 'X'
>>> ui._user_id = u'Fran\\xe7ois'
>>> ret = ui.run(cmd, args=['/a', 'This is a comment about a'])
Created comment with ID abc/a/X
>>> libbe.util.id.uuid_gen = uuid_gen
>>> bd.flush_reload()
>>> bug = bd.bug_from_uuid('a')
>>> bug.load_comments(load_full=False)
>>> comment = bug.comment_root[0]
>>> comment.id.storage() == comment.uuid
True
>>> print comment.body
This is a comment about a
<BLANKLINE>
>>> comment.author
u'Fran\\xe7ois'
>>> comment.time <= int(time.time())
True
>>> comment.in_reply_to is None
True
>>> if 'EDITOR' in os.environ:
... del os.environ['EDITOR']
>>> if 'VISUAL' in os.environ:
... del os.environ['VISUAL']
>>> ui._user_id = u'Frank'
>>> ret = ui.run(cmd, args=['/b'])
Traceback (most recent call last):
UserError: No comment supplied, and EDITOR not specified.
>>> os.environ['EDITOR'] = "echo 'I like cheese' > "
>>> libbe.util.id.uuid_gen = lambda: 'Y'
>>> ret = ui.run(cmd, args=['/b'])
Created comment with ID abc/b/Y
>>> libbe.util.id.uuid_gen = uuid_gen
>>> bd.flush_reload()
>>> bug = bd.bug_from_uuid('b')
>>> bug.load_comments(load_full=False)
>>> comment = bug.comment_root[0]
>>> print comment.body
I like cheese
<BLANKLINE>
>>> ui.cleanup()
>>> bd.cleanup()
>>> del os.environ["EDITOR"]
"""
name = 'comment'
def __init__(self, *args, **kwargs):
libbe.command.Command.__init__(self, *args, **kwargs)
self.options.extend([
libbe.command.Option(name='author', short_name='a',
help='Set the comment author',
arg=libbe.command.Argument(
name='author', metavar='AUTHOR')),
libbe.command.Option(name='alt-id',
help='Set an alternate comment ID',
arg=libbe.command.Argument(
name='alt-id', metavar='ID')),
libbe.command.Option(name='content-type', short_name='c',
help='Set comment content-type (e.g. text/plain)',
arg=libbe.command.Argument(name='content-type',
metavar='MIME')),
libbe.command.Option(name='full-uuid', short_name='f',
help='Print the full UUID for the new bug')
])
self.args.extend([
libbe.command.Argument(
name='id', metavar='ID', default=None,
completion_callback=libbe.command.util.complete_bug_comment_id),
libbe.command.Argument(
name='comment', metavar='COMMENT', default=None,
optional=True,
completion_callback=libbe.command.util.complete_assigned),
])
def _run(self, **params):
bugdirs = self._get_bugdirs()
bugdir,bug,parent = (
libbe.command.util.bugdir_bug_comment_from_user_id(
bugdirs, params['id']))
if params['comment'] == None:
# try to launch an editor for comment-body entry
try:
if parent == bug.comment_root:
header = "Subject: %s" % bug.summary
parent_body = parent.string_thread() or "No comments"
else:
header = "From: %s\nTo: %s" % (parent.author, bug)
parent_body = parent.body
estr = 'Please enter your comment above\n\n%s\n\n> %s\n' \
% (header, '\n> '.join(parent_body.splitlines()))
body = libbe.ui.util.editor.editor_string(estr)
except libbe.ui.util.editor.CantFindEditor, e:
raise libbe.command.UserError(
'No comment supplied, and EDITOR not specified.')
if body is None:
raise libbe.command.UserError('No comment entered.')
elif params['comment'] == '-': # read body from stdin
binary = not (params['content-type'] == None
or params['content-type'].startswith("text/"))
if not binary:
body = self.stdin.read()
if not body.endswith('\n'):
body += '\n'
else: # read-in without decoding
body = sys.stdin.read()
else: # body given on command line
body = params['comment']
if not body.endswith('\n'):
body+='\n'
if params['author'] == None:
params['author'] = self._get_user_id()
new = parent.new_reply(body=body, content_type=params['content-type'])
for key in ['alt-id', 'author']:
if params[key] != None:
setattr(new, new._setting_name_to_attr_name(key), params[key])
if params['full-uuid']:
comment_id = new.id.long_user()
else:
comment_id = new.id.user()
self.stdout.write('Created comment with ID %s\n' % (comment_id))
return 0
def _long_help(self):
return """
To add a comment to a bug, use the bug ID as the argument. To reply
to another comment, specify the comment name (as shown in "be show"
output). COMMENT, if specified, should be either the text of your
comment or "-", in which case the text will be read from stdin. If
you do not specify a COMMENT, $EDITOR is used to launch an editor. If
COMMENT is unspecified and EDITOR is not set, no comment will be
created.
"""
| gpl-2.0 |
MarkoVogle3/Marko | py/openage/codegen/__main__.py | 46 | 8158 | from ..convert.util import set_verbosity, dbg
import argparse
import os
def main():
"""
this codegen script auto-generates sourcefiles in the cpp/ subtree,
and is designed for usage by the build system
(see buildsystem/codegen.cmake).
invocation synopsis:
python3 -m openage.codegen
(mandatory)
--target-cache=filea
--depend-cache=fileb
--cpp-src-dir=dira
(commands, optional)
--write-to-sourcedir
--touch-file-on-cache-change=CMakeLists.txt
--force-rerun-on-targetcache-change
--clean
all file and directory names SHOULD be absolute paths.
this is not enforced, but relative paths may violate assumptions made by
codegen.cmake.
for each invocation, all code generation is performed, and the generated
sourcefiles are stored in an internal dict.
in addition, text data is written to the specified cache files:
- a list of all generated files (targets) to target-cache
- a list of all loaded python module files to depend-cache
depending on the specified invocation commands,
- generated sources are written to the source dir
- generated sources are cleaned
- cmake re-builds are triggered if a cache has changed
"""
# parse arguments
ap = argparse.ArgumentParser(
description=("generates c++ code within the source tree. "
"designed to be used by [buildsystem/codegen.cmake]"),
epilog=("all file and directory names should be absolute; otherwise, "
"assumptions made by this script or the cmake script might "
"not be fulfilled"))
ap.add_argument("--target-cache", required=True,
help=("filename for target cache. a list of all generated "
"sources is written there for every invocation. if "
"the list changes, --touch-file-on-cache-change and "
"--force-rerun-on-targetcache-change trigger cmake "
"re-runs"))
ap.add_argument("--depend-cache", required=True,
help=("filename for dependency cache. a list of all "
"python files and other resources that were used "
"during source generation. if the list changes, "
"--touch-file-on-cache-change will trigger cmake "
"re-runs"))
ap.add_argument("--cpp-src-dir", required=True,
help=("c++ source directory; used to determine generated "
"file names"))
ap.add_argument("--write-to-sourcedir", action='store_true',
help=("causes the sources to be actually written to "
"cpp-src-dir. otherwise, a dry run is performed. "
"even during a dry run, all code generation is "
"performed in order to determine generation targets "
"and dependencies."))
ap.add_argument("--clean", action='store_true',
help=("all generated files are deleted from the source "
"directory"))
ap.add_argument("--touch-file-on-cache-change",
help=("the file passed here will be touched if one of the"
"caches changes. designed for use with a "
"CMakeLists.txt file, to trigger cmake re-runs"))
ap.add_argument("--force-rerun-on-targetcache-change", action='store_true',
help=("a bit more drastic than --touch-file-on-change, "
"this causes codegen to abort with an error message "
"if the target cache has changed."))
ap.add_argument("--verbose", "-v", action='count', default=0)
args = ap.parse_args()
# process and validate arguments
if not args.verbose and 'VERBOSE' in os.environ:
try:
args.verbose = int(os.environ['VERBOSE'])
except:
args.verbose = 2
set_verbosity(args.verbose)
file_to_touch = args.touch_file_on_cache_change
if file_to_touch and not os.path.isfile(file_to_touch):
ap.error("file doesn't exist: %s" % file_to_touch)
cache_actions_requested = (file_to_touch or
args.force_rerun_on_targetcache_change)
old_target_cache = set()
try:
with open(args.target_cache) as f:
for target in f:
old_target_cache.add(target.strip())
except:
if cache_actions_requested:
dbg("warning: cache actions were requested, " +
"but the target cache could not be read!", 0)
old_depend_cache = set()
try:
with open(args.depend_cache) as f:
for depend in f:
old_depend_cache.add(depend.strip())
except:
if cache_actions_requested:
dbg("warning: cache actions were requested, " +
"but the depends cache could not be read!", 0)
cpp_src_dir = args.cpp_src_dir
if not os.path.isdir(cpp_src_dir):
ap.error("not a directory: %s" % cpp_src_dir)
# arguments are OK.
# generate sources
generated_files = {}
from . import codegen
for absfilename, filename, content in codegen.generate_all(cpp_src_dir):
generated_files[absfilename] = filename, content
# calculate dependencies (all used python modules)
new_depend_cache = set()
depend_cache_file = open(args.depend_cache, 'w')
for depend in codegen.get_depends():
depend_cache_file.write("%s\n" % depend)
new_depend_cache.add(depend)
# calculate targets
new_target_cache = set()
target_cache_file = open(args.target_cache, 'w')
for filename in generated_files:
target_cache_file.write(filename)
target_cache_file.write('\n')
new_target_cache.add(filename)
# check whether the cache has changed
def print_set_difference(fun, old, new):
if old:
if old - new:
fun("removed:\n\t%s" % "\n\t".join(old - new))
if new - old:
fun("added:\n\t%s" % "\n\t".join(new - old))
else:
fun("\n\t".join(new))
depend_cache_changed = False
if old_depend_cache != new_depend_cache:
depend_cache_changed = True
dbg("codegen dependencies:", 1)
print_set_difference(lambda s: dbg(s, 1),
old_depend_cache, new_depend_cache)
target_cache_changed = False
if old_target_cache != new_target_cache:
target_cache_changed = True
dbg("codegen target sources:", 1)
print_set_difference(lambda s: dbg(s, 1),
old_target_cache, new_target_cache)
if file_to_touch and (depend_cache_changed or target_cache_changed):
try:
os.utime(file_to_touch)
except:
dbg("warning: couldn't update the timestamp for %s"
% file_to_touch, 0)
if target_cache_changed and args.force_rerun_on_targetcache_change:
print("""\n\n\n\
The list of generated sourcefiles has changed.
A build update has been triggered; you need to build again.
\n\n\n""")
# fail
exit(1)
# clean sourcedir
if args.clean:
for absfilename, (filename, content) in generated_files.items():
if os.path.isfile(absfilename):
dbg("cleaning file: %s" % filename, 0)
os.unlink(absfilename)
# write generated files to sourcedir
if args.write_to_sourcedir:
for absfilename, (filename, content) in generated_files.items():
if os.path.isfile(absfilename):
with open(absfilename) as f:
if f.read() == content:
dbg("file unchanged: %s" % filename, 1)
continue
dbg("generating file: %s" % filename, 0)
with open(absfilename, 'w') as f:
f.write(content)
if __name__ == '__main__':
main()
| gpl-3.0 |
vornne/pw_module_system | header_triggers.py | 3 | 11505 | ###################################################
# header_triggers.py
# This file contains declarations for triggers
# DO NOT EDIT THIS FILE!
###################################################
ti_once = 100000000.0
#-----------------------------------------------------------------------------
# SIMPLE TRIGGERS
#-----------------------------------------------------------------------------
ti_simulate_battle = -5.0
ti_on_party_encounter = -6.0
#-----------------------------------------------------------------------------
# MISSION TEMPLATE TRIGGERS
#-----------------------------------------------------------------------------
ti_question_answered = -8.0
# trigger param 1: answer
ti_server_player_joined = -15.0
# trigger param 1: player id
ti_on_multiplayer_mission_end = -16.0
ti_before_mission_start = -19.0
ti_after_mission_start = -20.0
ti_tab_pressed = -21.0
ti_inventory_key_pressed = -22.0
ti_escape_pressed = -23.0
ti_battle_window_opened = -24.0
ti_on_agent_spawn = -25.0
# trigger param 1: agent id
ti_on_agent_killed_or_wounded = -26.0
# trigger param 1: dead agent id
# trigger param 2: killer agent id
# trigger param 3: 0 = killed, 1 = wounded
# trigger result: 1 = force kill, 2 = force wounded
ti_on_agent_knocked_down = -27.0
# trigger param 1: knocked down agent id
# trigger param 1: attacker agent id
ti_on_agent_hit = -28.0
# trigger param 1: hit agent id
# trigger param 2: attacker agent id
# trigger param 3: inflicted damage
# trigger param 4: hit bone
# trigger param 5: missile item id
# reg0: attacker item_id
# pos0: position of the blow, rotation gives the direction of the blow
# trigger result: if greater than or equal to zero, inflicted damage is set to the value specified
ti_on_player_exit = -29.0
# trigger param 1: player id
ti_on_leave_area = -30.0
#-----------------------------------------------------------------------------
# SCENE PROP TRIGGERS
#-----------------------------------------------------------------------------
ti_on_scene_prop_init = -40.0
# trigger param 1: instance id
ti_on_scene_prop_hit = -42.0
# trigger param 1: instance id
# trigger param 2: hit damage (server only)
# trigger param 3: attacker agent id (server only)
# trigger param 4: weapon item id (server only)
# trigger param 5: weapon item modifier (server only)
# trigger param 6: missile item id (server only)
# trigger param 7: missile item modifier (server only)
# pos1: hit position
# pos2: x holds attacker agent id, use (set_fixed_point_multiplier, 1)
# trigger result: if greater than or equal to zero, inflicted damage is set to the value specified
ti_on_scene_prop_destroy = -43.0
# trigger param 1: instance id
# trigger param 1: agent id
ti_on_scene_prop_use = -44.0
# trigger param 1: agent id
# trigger param 2: instance id
ti_on_scene_prop_is_animating = -45.0
# trigger param 1: instance id
# trigger param 2: remaining animation time
ti_on_scene_prop_animation_finished = -46.0
# trigger param 1: instance id
ti_on_scene_prop_start_use = -47.0
# trigger param 1: agent id
# trigger param 2: instance id
ti_on_scene_prop_cancel_use = -48.0
# trigger param 1: agent id
# trigger param 2: instance id
ti_scene_prop_deformation_finished = -76.0
# trigger param 1: instance id
#-----------------------------------------------------------------------------
# ITEM TRIGGERS
#-----------------------------------------------------------------------------
ti_on_init_item = -50.0
# trigger param 1: agent id
# trigger param 2: troop id
ti_on_weapon_attack = -51.0
# trigger param 1: attacker agent id
# pos1: weapon item position
ti_on_missile_hit = -52.0
# trigger param 1: shooter agent id
# trigger param 2: collision_type: 0 = world terrain, 1 = hostile agent, 2 = dynamic prop, 3 = world prop (flora), 4 = mission object (scene prop), 8 = friendly agent, 9 = neutral agent, 10 = under water
# pos1: missile position
ti_on_shield_hit = -80.0
# trigger param 1: defender agent id
# trigger param 2: attacker agent id
# trigger param 3: inflicted damage
# trigger param 4: weapon item id
# trigger param 5: missile item id
# trigger result: if set, override damage dealt to shield
#-----------------------------------------------------------------------------
# MISSION TEMPLATE TRIGGERS
#-----------------------------------------------------------------------------
ti_on_item_picked_up = -53.0
# trigger param 1: agent id
# trigger param 2: item id
# trigger param 3: prop instance id (will be deleted after this trigger)
ti_on_item_dropped = -54.0
# trigger param 1: agent id
# trigger param 2: item id
# trigger param 3: prop instance id
ti_on_agent_mount = -55.0
# trigger param 1: agent id
# trigger param 2: horse agent id
ti_on_agent_dismount = -56.0
# trigger param 1: agent id
# trigger param 2: horse agent id
ti_on_item_wielded = -57.0
# trigger param 1: agent id
# trigger param 2: item id
ti_on_item_unwielded = -58.0
# trigger param 1: agent id
# trigger param 2: item id
ti_on_order_issued = -71.0
# trigger param 1: order no
# trigger param 2: agent id
#-----------------------------------------------------------------------------
# PRESENTATION TRIGGERS
#-----------------------------------------------------------------------------
ti_on_presentation_load = -60.0
ti_on_presentation_run = -61.0
# trigger param 1: current time in miliseconds
ti_on_presentation_event_state_change = -62.0
# trigger param 1: object (overlay) id
# trigger param 2: value (when available)
ti_on_presentation_mouse_enter_leave = -63.0
# trigger param 1: object (overlay) id that the mouse enters or leaves
# trigger param 2: 0 = enters, 1 = leaves
ti_on_presentation_mouse_press = -64.0
# trigger param 1: object(overlay) id that the mouse is pressed on
# trigger param 2: 0 = left button, 1 = right button, 2 = middle button
#-----------------------------------------------------------------------------
# MAP TRIGGERS
#-----------------------------------------------------------------------------
ti_on_init_map_icon = -70.0
# trigger param 1: id of the owner party
ti_on_switch_to_map = -75.0
trigger_names = dict((trigger, name) for name, trigger in globals().iteritems() if isinstance(trigger, float))
def get_trigger_name(trigger):
try:
return trigger_names[trigger]
except (KeyError, TypeError):
return repr(trigger)
# keys that can be checked by key_is_down and key_clicked
key_1 = 0x02
key_2 = 0x03
key_3 = 0x04
key_4 = 0x05
key_5 = 0x06
key_6 = 0x07
key_7 = 0x08
key_8 = 0x09
key_9 = 0x0a
key_0 = 0x0b
key_a = 0x1e
key_b = 0x30
key_c = 0x2e
key_d = 0x20
key_e = 0x12
key_f = 0x21
key_g = 0x22
key_h = 0x23
key_i = 0x17
key_j = 0x24
key_k = 0x25
key_l = 0x26
key_m = 0x32
key_n = 0x31
key_o = 0x18
key_p = 0x19
key_q = 0x10
key_r = 0x13
key_s = 0x1f
key_t = 0x14
key_u = 0x16
key_v = 0x2f
key_w = 0x11
key_x = 0x2d
key_y = 0x15
key_z = 0x2c
key_numpad_0 = 0x52
key_numpad_1 = 0x4f
key_numpad_2 = 0x50
key_numpad_3 = 0x51
key_numpad_4 = 0x4b
key_numpad_5 = 0x4c
key_numpad_6 = 0x4d
key_numpad_7 = 0x47
key_numpad_8 = 0x48
key_numpad_9 = 0x49
key_num_lock = 0x45
key_numpad_slash = 0xb5
key_numpad_multiply = 0x37
key_numpad_minus = 0x4a
key_numpad_plus = 0x4e
key_numpad_enter = 0x9c
key_numpad_period = 0x53
key_insert = 0xd2
key_delete = 0xd3
key_home = 0xc7
key_end = 0xcf
key_page_up = 0xc9
key_page_down = 0xd1
key_up = 0xc8
key_down = 0xd0
key_left = 0xcb
key_right = 0xcd
key_f1 = 0x3b
key_f2 = 0x3c
key_f3 = 0x3d
key_f4 = 0x3e
key_f5 = 0x3f
key_f6 = 0x40
key_f7 = 0x41
key_f8 = 0x42
key_f9 = 0x43
key_f10 = 0x44
key_f11 = 0x57
key_f12 = 0x58
key_space = 0x39
key_escape = 0x01
key_enter = 0x1c
key_tab = 0x0f
key_back_space = 0x0e
key_open_braces = 0x1a
key_close_braces = 0x1b
key_comma = 0x33
key_period = 0x34
key_slash = 0x35
key_back_slash = 0x2b
key_equals = 0x0d
key_minus = 0x0c
key_semicolon = 0x27
key_apostrophe = 0x28
key_tilde = 0x29
key_caps_lock = 0x3a
key_left_shift = 0x2a
key_right_shift = 0x36
key_left_control = 0x1d
key_right_control = 0x9d
key_left_alt = 0x38
key_right_alt = 0xb8
key_left_mouse_button = 0xe0
key_right_mouse_button = 0xe1
key_middle_mouse_button = 0xe2
key_mouse_button_4 = 0xe3
key_mouse_button_5 = 0xe4
key_mouse_button_6 = 0xe5
key_mouse_button_7 = 0xe6
key_mouse_button_8 = 0xe7
key_mouse_scroll_up = 0xee
key_mouse_scroll_down = 0xef
key_xbox_a = 0xf0
key_xbox_b = 0xf1
key_xbox_x = 0xf2
key_xbox_y = 0xf3
key_xbox_dpad_up = 0xf4
key_xbox_dpad_down = 0xf5
key_xbox_dpad_right = 0xf6
key_xbox_dpad_left = 0xf7
key_xbox_start = 0xf8
key_xbox_back = 0xf9
key_xbox_rbumper = 0xfa
key_xbox_lbumper = 0xfb
key_xbox_ltrigger = 0xfc
key_xbox_rtrigger = 0xfd
key_xbox_rstick = 0xfe
key_xbox_lstick = 0xff
# game keys that can be checked by game_key_is_down and game_key_clicked
gk_move_forward = 0
gk_move_backward = 1
gk_move_left = 2
gk_move_right = 3
gk_action = 4
gk_jump = 5
gk_attack = 6
gk_defend = 7
gk_kick = 8
gk_toggle_weapon_mode = 9
gk_equip_weapon_1 = 10
gk_equip_weapon_2 = 11
gk_equip_weapon_3 = 12
gk_equip_weapon_4 = 13
gk_equip_primary_weapon = 14
gk_equip_secondary_weapon = 15
gk_drop_weapon = 16
gk_sheath_weapon = 17
gk_leave = 18
gk_zoom = 19
gk_view_char = 20
gk_cam_toggle = 21
gk_view_orders = 22
gk_order_1 = 23
gk_order_2 = 24
gk_order_3 = 25
gk_order_4 = 26
gk_order_5 = 27
gk_order_6 = 28
gk_everyone_hear = 29
gk_infantry_hear = 30
gk_archers_hear = 31
gk_cavalry_hear = 32
gk_group0_hear = gk_infantry_hear
gk_group1_hear = gk_archers_hear
gk_group2_hear = gk_cavalry_hear
gk_group3_hear = 33
gk_group4_hear = 34
gk_group5_hear = 35
gk_group6_hear = 36
gk_group7_hear = 37
gk_group8_hear = 38
gk_reverse_order_group = 39
gk_everyone_around_hear = 40
gk_mp_message_all = 41
gk_mp_message_team = 42
gk_character_window = 43
gk_inventory_window = 44
gk_party_window = 45
gk_quests_window = 46
gk_game_log_window = 47
gk_quick_save = 48
gk_crouch = 49
gk_order_7 = 50
gk_order_8 = 51
gk_stats_chart = gk_leave
gk_local_chat = gk_quests_window
gk_faction_chat = gk_inventory_window
gk_admin_chat = gk_view_orders
gk_money_bag = gk_party_window
gk_target_agent = gk_quick_save
gk_action_menu = gk_character_window
gk_animation_menu = gk_everyone_around_hear
# trigger positions
trigger_check_pos = 0
trigger_delay_pos = 1
trigger_rearm_pos = 2
trigger_conditions_pos = 3
trigger_consequences_pos = 4
| bsd-3-clause |
jhnnsnk/nest-simulator | pynest/nest/tests/test_regression_issue-1409.py | 19 | 2198 | # -*- coding: utf-8 -*-
#
# test_regression_issue-1409.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import numpy as np
import unittest
HAVE_OPENMP = nest.ll_api.sli_func("is_threaded")
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
class MultiplePoissonGeneratorsTestCase(unittest.TestCase):
def test_multiple_poisson_generators(self):
"""Invariable number of spikes with multiple poisson generators"""
local_num_threads = 4
time_simulation = 100
num_neurons = 1
num_pg = 50
num_iterations = 50
num_spikes = []
for i in range(num_iterations):
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': local_num_threads})
nest.set_verbosity('M_WARNING')
print('num iter {:>5d}/{}'.format(i+1, num_iterations), end='\r')
parrots = nest.Create('parrot_neuron', num_neurons)
poisson_generator = nest.Create('poisson_generator', num_pg)
poisson_generator.rate = 2000.
nest.Connect(poisson_generator, parrots, 'all_to_all')
nest.Simulate(time_simulation)
num_spikes.append(nest.GetKernelStatus('local_spike_counter'))
self.assertEqual(len(np.unique(num_spikes)), 1)
def suite():
t = unittest.TestLoader().loadTestsFromTestCase(MultiplePoissonGeneratorsTestCase)
return unittest.TestSuite([t])
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| gpl-2.0 |
2014cdbg3/2015cdbg9 | static/Brython3.1.1-20150328-091302/Lib/fractions.py | 722 | 23203 | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Fraction, infinite-precision, real numbers."""
from decimal import Decimal
import math
import numbers
import operator
import re
import sys
__all__ = ['Fraction', 'gcd']
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo the prime _PyHASH_MODULUS.
_PyHASH_MODULUS = sys.hash_info.modulus
# Value to be used for rationals that reduce to infinity modulo
# _PyHASH_MODULUS.
_PyHASH_INF = sys.hash_info.inf
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(numbers.Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Rational.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, numbers.Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, str):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, numbers.Rational) and
isinstance(denominator, numbers.Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f):
raise ValueError("Cannot convert %r to %s." % (f, cls.__name__))
if math.isinf(f):
raise OverflowError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if dec.is_infinite():
raise OverflowError(
"Cannot convert %s to %s." % (dec, cls.__name__))
if dec.is_nan():
raise ValueError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, numbers.Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, numbers.Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
def __floordiv__(a, b):
"""a // b"""
return math.floor(a / b)
def __rfloordiv__(b, a):
"""a // b"""
return math.floor(a / b)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, numbers.Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __floor__(a):
"""Will be math.floor(a) in 3.0."""
return a.numerator // a.denominator
def __ceil__(a):
"""Will be math.ceil(a) in 3.0."""
# The negations cleverly convince floordiv to return the ceiling.
return -(-a.numerator // a.denominator)
def __round__(self, ndigits=None):
"""Will be round(self, ndigits) in 3.0.
Rounds half toward even.
"""
if ndigits is None:
floor, remainder = divmod(self.numerator, self.denominator)
if remainder * 2 < self.denominator:
return floor
elif remainder * 2 > self.denominator:
return floor + 1
# Deal with the half case:
elif floor % 2 == 0:
return floor
else:
return floor + 1
shift = 10**abs(ndigits)
# See _operator_fallbacks.forward to check that the results of
# these operations will always be Fraction and therefore have
# round().
if ndigits > 0:
return Fraction(round(self * shift), shift)
else:
return Fraction(round(self / shift) * shift)
def __hash__(self):
"""hash(self)"""
# XXX since this method is expensive, consider caching the result
# In order to make sure that the hash of a Fraction agrees
# with the hash of a numerically equal integer, float or
# Decimal instance, we follow the rules for numeric hashes
# outlined in the documentation. (See library docs, 'Built-in
# Types').
# dinv is the inverse of self._denominator modulo the prime
# _PyHASH_MODULUS, or 0 if self._denominator is divisible by
# _PyHASH_MODULUS.
dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
if not dinv:
hash_ = _PyHASH_INF
else:
hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS
result = hash_ if self >= 0 else -hash_
return -2 if result == -1 else result
def __eq__(a, b):
"""a == b"""
if isinstance(b, numbers.Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, numbers.Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __bool__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| gpl-3.0 |
PsychoTV/PsychoTeam.repository | plugin.video.specto/resources/lib/libraries/favourites.py | 14 | 3253 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
import json
from resources.lib.libraries import control
def getFavourites(content):
try:
dbcon = database.connect(control.favouritesFile)
dbcur = dbcon.cursor()
dbcur.execute("SELECT * FROM %s" % content)
items = dbcur.fetchall()
items = [(i[0].encode('utf-8'), eval(i[1].encode('utf-8'))) for i in items]
except:
items = []
return items
def addFavourite(meta, content, query):
try:
item = dict()
meta = json.loads(meta)
try: id = meta['imdb']
except: id = meta['tvdb']
if 'title' in meta: title = item['title'] = meta['title']
if 'tvshowtitle' in meta: title = item['title'] = meta['tvshowtitle']
if 'year' in meta: item['year'] = meta['year']
if 'poster' in meta: item['poster'] = meta['poster']
if 'fanart' in meta: item['fanart'] = meta['fanart']
if 'imdb' in meta: item['imdb'] = meta['imdb']
if 'tmdb' in meta: item['tmdb'] = meta['tmdb']
if 'tvdb' in meta: item['tvdb'] = meta['tvdb']
if 'tvrage' in meta: item['tvrage'] = meta['tvrage']
control.makeFile(control.dataPath)
dbcon = database.connect(control.favouritesFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS %s (""id TEXT, ""items TEXT, ""UNIQUE(id)"");" % content)
dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, id))
dbcur.execute("INSERT INTO %s Values (?, ?)" % content, (id, repr(item)))
dbcon.commit()
if query == None: control.refresh()
control.infoDialog(control.lang(30411).encode('utf-8'), heading=title)
except:
return
def deleteFavourite(meta, content):
try:
meta = json.loads(meta)
if 'title' in meta: title = meta['title']
if 'tvshowtitle' in meta: title = meta['tvshowtitle']
try:
dbcon = database.connect(control.favouritesFile)
dbcur = dbcon.cursor()
try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['imdb']))
except: pass
try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tvdb']))
except: pass
dbcon.commit()
except:
pass
control.refresh()
control.infoDialog(control.lang(30412).encode('utf-8'), heading=title)
except:
return
| gpl-2.0 |
adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_file_newlines.py | 24 | 9997 | """Test handling of newlines via file's read and readline
Made for Jython.
"""
import os
import sys
import tempfile
import test.test_support as test_support
import unittest
assert not os.linesep == '\r', ('os.linesep of %r is not supported' %
os.linesep)
LF = os.linesep == '\n'
CRLF = os.linesep == '\r\n'
CRLF_TEST = 'CR\rLF\nCRLF\r\nEOF'
if sys.platform.startswith('java'):
from org.python.core.io import TextIOBase
READAHEAD_SIZE = TextIOBase.CHUNK_SIZE
else:
READAHEAD_SIZE = 300
class BaseTestCase(unittest.TestCase):
data = CRLF_TEST
write_mode = 'wb'
mode = 'r'
bufsize = -1
def setUp(self):
self.filename = tempfile.mktemp()
self.write_fp = open(self.filename, self.write_mode, self.bufsize)
self.write_fp.write(self.data)
self.write_fp.flush()
self.fp = open(self.filename, self.mode, self.bufsize)
def tearDown(self):
if self.write_fp:
self.write_fp.close()
if self.fp:
self.fp.close()
os.remove(self.filename)
class ReadBinaryNewlinesTestCase(BaseTestCase):
mode = 'rb'
def test_binary_read(self):
read(self.fp, CRLF_TEST)
self.fp.seek(0)
read(self.fp, CRLF_TEST, len(CRLF_TEST))
self.fp.seek(0)
read(self.fp, 'CR\r', 3)
read(self.fp, 'LF\n', 3)
read(self.fp, 'CRLF\r\n', 6)
read(self.fp, 'EOF', 3)
def test_binary_readline(self):
readline(self.fp, 'CR\rLF\n')
readline(self.fp, 'CRLF\r\n')
readline(self.fp, 'EOF')
self.fp.seek(0)
readline(self.fp, 'CR\rLF\n', 6)
readline(self.fp, 'CRLF\r\n', 6)
readline(self.fp, 'EOF', 3)
if LF:
class ReadTextNewlinesTestCase(ReadBinaryNewlinesTestCase):
mode = 'r'
else:
class ReadTextNewlinesTestCase(BaseTestCase):
def test_text_read(self):
read(self.fp, 'CR\rLF\nCRLF\nEOF')
self.fp.seek(0)
read(self.fp, 'CR\rLF\nCRLF\nEOF', len('CR\rLF\nCRLF\nEOF'))
self.fp.seek(0)
read(self.fp, 'CR\r', 3)
read(self.fp, 'LF\n', 3)
read(self.fp, 'CRLF\n', 5)
read(self.fp, 'EOF', 3)
def test_text_readline(self):
readline(self.fp, 'CR\rLF\n')
readline(self.fp, 'CRLF\n')
readline(self.fp, 'EOF')
self.fp.seek(0)
readline(self.fp, 'CR\rLF\n', 6)
readline(self.fp, 'CRLF\n', 5)
readline(self.fp, 'EOF', 3)
class ReadTextBasicBoundaryTestCase(BaseTestCase):
data = 'CR\r'
read_data = 'CR\r'
def test_read_basic_boundary(self):
read(self.fp, self.read_data)
self.fp.seek(0)
read(self.fp, self.read_data, 3)
def test_readline_basic_boundary(self):
readline(self.fp, self.read_data)
self.fp.seek(0)
readline(self.fp, self.read_data, 3)
class ReadUniversalBasicBoundaryTestCase(ReadTextBasicBoundaryTestCase):
mode = 'U'
read_data = 'CR\n'
class BinaryReadaheadBoundaryTestCase(BaseTestCase):
mode = 'rb'
data = 'foo\n' + ('x' * READAHEAD_SIZE)
def test_read_boundary(self):
readline(self.fp, 'foo\n')
read(self.fp, 'x' * READAHEAD_SIZE, READAHEAD_SIZE)
def test_readline_boundary(self):
readline(self.fp, 'foo\n')
readline(self.fp, 'x' * READAHEAD_SIZE, READAHEAD_SIZE)
class TextReadaheadBoundaryTestCase(BinaryReadaheadBoundaryTestCase):
mode = 'r'
class UniversalReadaheadBoundaryTestCase(BinaryReadaheadBoundaryTestCase):
mode = 'U'
class TextReadaheadBoundary2TestCase(BaseTestCase):
"""For CRLF platforms only"""
data = ('x' * (READAHEAD_SIZE + 1)) + '\r\n'
def test_read_boundary2(self):
read(self.fp, 'x' * (READAHEAD_SIZE + 1), READAHEAD_SIZE + 1)
read(self.fp, '\n')
class UniversalReadaheadBoundary2TestCase(TextReadaheadBoundary2TestCase):
mode = 'U'
class TextReadaheadBoundary3TestCase(BaseTestCase):
"""For CRLF platforms only"""
mode = 'r'
data = ('x' * (READAHEAD_SIZE - 1)) + '\r\n'
def test_read_boundary3(self):
size = READAHEAD_SIZE - 1
read(self.fp, 'x' * size, size)
read(self.fp, '\n')
self.fp.seek(0)
read(self.fp, ('x' * size) + '\n', READAHEAD_SIZE)
def test_readline_boundary3(self):
size = READAHEAD_SIZE - 1
readline(self.fp, 'x' * size, size)
readline(self.fp, '\n')
def test_read_boundary3_with_extra(self):
self.write_fp.seek(0, 2)
self.write_fp.write('foo')
self.write_fp.flush()
self.write_fp.seek(0)
size = READAHEAD_SIZE - 1
read(self.fp, ('x' * size) + '\nfoo', READAHEAD_SIZE + 3)
class UniversalReadaheadBoundary3TestCase(TextReadaheadBoundary3TestCase):
mode = 'U'
class TextReadaheadBoundary4TestCase(BaseTestCase):
"""For CRLF platforms only"""
mode = 'r'
data = ('x' * (READAHEAD_SIZE - 2)) + '\n\r'
def test_read_boundary4(self):
readline(self.fp, 'x' * (READAHEAD_SIZE - 2) + '\n')
self.write_fp.write('\n')
self.write_fp.flush()
read(self.fp, '\n')
def test_readline_boundary4(self):
readline(self.fp, 'x' * (READAHEAD_SIZE - 2) + '\n')
self.write_fp.write('\n')
self.write_fp.flush()
readline(self.fp, '\n')
class UniversalReadaheadBoundary4TestCase(TextReadaheadBoundary4TestCase):
mode = 'U'
data = ('x' * (READAHEAD_SIZE - 2)) + '\n\r'
class TextReadaheadBoundary5TestCase(BaseTestCase):
"""For CRLF platforms only"""
mode = 'r'
data = 'foobar\n' + ('x' * (READAHEAD_SIZE + 1))
def test_boundary5(self):
readline(self.fp, 'foobar\n')
read(self.fp, 'x' * (READAHEAD_SIZE + 1), READAHEAD_SIZE + 1)
class UniversalReadaheadBoundary5TestCase(TextReadaheadBoundary5TestCase):
mode = 'U'
class TextCRAtReadheadBoundaryTestCase(BaseTestCase):
"""For CRLF platforms only"""
data = ('x' * (READAHEAD_SIZE - 1)) + '\rfoo'
read_data = data
def test_readline_cr_at_boundary(self):
readline(self.fp, self.read_data, len(self.read_data))
self.fp.seek(0)
readline(self.fp, self.read_data)
class TextCRAtReadheadBoundary2TestCase(TextCRAtReadheadBoundaryTestCase):
"""For CRLF platforms only"""
data = ('x' * (READAHEAD_SIZE - 1)) + '\r' + ('x' * 300)
read_data = data
class UniversalCRAtReadaheadBoundaryTestCase(BaseTestCase):
mode = 'U'
bufsize = 0
data = ('-' * 1023) + '\r\n' + ('-' * 10233)
def test_read_cr_at_boundary(self):
# Used to raise a BufferOverflowException w/ bufsize of 0
read(self.fp, ('-' * 1023) + '\n', 1024)
class WriteTextNewlinesTestCase(BaseTestCase):
write_mode = 'w'
mode = 'rb'
def test_text_written(self):
if LF:
readline(self.fp, 'CR\rLF\n')
readline(self.fp, 'CRLF\r\n')
elif CRLF:
readline(self.fp, 'CR\rLF\r\n')
readline(self.fp, 'CRLF\r\r\n')
readline(self.fp, 'EOF')
class ReadUniversalNewlinesTestCase(BaseTestCase):
mode = 'rU'
def test_read(self):
read(self.fp, 'CR\nLF\nCRLF\nEOF')
self.fp.seek(0)
read(self.fp, 'CR\nLF\nCRLF\nEOF', 14)
def test_readline(self):
readline(self.fp, 'CR\n')
assert self.fp.newlines == None, repr(self.fp.newlines)
readline(self.fp, 'LF\n')
assert self.fp.newlines == ('\r', '\n'), repr(self.fp.newlines)
readline(self.fp, 'CRLF\n')
assert self.fp.newlines == ('\r', '\n'), repr(self.fp.newlines)
readline(self.fp, 'EOF')
assert self.fp.newlines == ('\r', '\n', '\r\n'), repr(self.fp.newlines)
self.fp.seek(0)
readline(self.fp, 'CR\n', 3)
readline(self.fp, 'LF\n', 3)
readline(self.fp, 'CRLF\n', 5)
readline(self.fp, 'EOF', 3)
def test_seek(self):
# Ensure seek doesn't confuse CRLF newline identification
self.fp.seek(6)
readline(self.fp, 'CRLF\n')
assert self.fp.newlines == None
self.fp.seek(5)
readline(self.fp, '\n')
assert self.fp.newlines == '\n'
class WriteUniversalNewlinesTestCase(unittest.TestCase):
def test_fails(self):
try:
open(tempfile.mktemp(), 'wU')
except ValueError:
pass
else:
raise AssertionError("file mode 'wU' did not raise a "
"ValueError")
def read(fp, data, size=-1):
line = fp.read(size)
assert line == data, 'read: %r expected: %r' % (line, data)
def readline(fp, data, size=-1):
line = fp.readline(size)
assert line == data, 'readline: %r expected: %r' % (line, data)
def test_main():
tests = [ReadBinaryNewlinesTestCase,
ReadTextNewlinesTestCase,
ReadTextBasicBoundaryTestCase,
ReadUniversalBasicBoundaryTestCase,
BinaryReadaheadBoundaryTestCase,
TextReadaheadBoundaryTestCase,
UniversalReadaheadBoundaryTestCase,
UniversalReadaheadBoundary2TestCase,
UniversalReadaheadBoundary3TestCase,
UniversalReadaheadBoundary4TestCase,
UniversalReadaheadBoundary5TestCase,
UniversalCRAtReadaheadBoundaryTestCase,
WriteTextNewlinesTestCase,
ReadUniversalNewlinesTestCase,
WriteUniversalNewlinesTestCase]
if CRLF:
tests.extend([TextReadaheadBoundary2TestCase,
TextReadaheadBoundary3TestCase,
TextReadaheadBoundary4TestCase,
TextReadaheadBoundary5TestCase,
TextCRAtReadheadBoundaryTestCase,
TextCRAtReadheadBoundary2TestCase])
test_support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
| epl-1.0 |
pombredanne/django-rest-framework-nested-resource | setup.py | 3 | 1608 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import drf_nested_resource
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = drf_nested_resource.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-rest-framework-nested-resource',
version=version,
description="""DRF view mixin for nested resources""",
long_description=readme + '\n\n' + history,
author='SimpleEnergy',
author_email='piper@simpleenergy.com',
url='https://github.com/simpleenergy/django-rest-framework-nested-resource',
packages=[
'drf_nested_resource',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='django-rest-framework-nested-resource',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
) | bsd-3-clause |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/ordereddict.py | 1047 | 4094 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| mit |
azumimuo/family-xbmc-addon | plugin.video.SportsDevil/service/ordereddict.py | 1047 | 4094 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| gpl-2.0 |
skosukhin/spack | var/spack/repos/builtin/packages/py-colormath/package.py | 1 | 1679 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyColormath(PythonPackage):
"""Color math and conversion library."""
homepage = "https://pypi.python.org/pypi/colormath/2.1.1"
url = "https://pypi.io/packages/source/c/colormath/colormath-2.1.1.tar.gz"
version('2.1.1', '10a0fb17e3c24363d0e1a3f2dccaa33b')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-networkx', type=('build', 'run'))
| lgpl-2.1 |
EmreAtes/spack | var/spack/repos/builtin/packages/xcb-demo/package.py | 5 | 1800 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class XcbDemo(AutotoolsPackage):
"""xcb-demo: A collection of demo programs that use the XCB library."""
homepage = "https://xcb.freedesktop.org/"
url = "https://xcb.freedesktop.org/dist/xcb-demo-0.1.tar.gz"
version('0.1', '803c5c91d54e734e6f6fa3f04f2463ff')
depends_on('libxcb')
depends_on('xcb-util')
depends_on('xcb-util-image')
depends_on('xcb-util-wm')
depends_on('pkgconfig', type='build')
# FIXME: crashes with the following error message
# X11/XCB/xcb.h: No such file or directory
| lgpl-2.1 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/format/stockholm.py | 1 | 3403 | #!/usr/bin/env python
"""
Writer for Stockholm format.
"""
from cogent.core.alignment import SequenceCollection
from copy import copy
__author__ = "Jeremy Widmann"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Jeremy Widmann"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Jeremy Widmann"
__email__ = "jeremy.widmann@colorado.edu"
__status__ = "Development"
def stockholm_from_alignment(aln, interleave_len=None, GC_annotation=None):
"""Returns a string in Stockholm format.
- aln: can be an Alignment object or a dict.
- interleave_len: sequence line width. Only available if sequences are
aligned.
- GC_annotation: dict containing Per-column annotation {<tag>:<s>},
added to Stockholm file in the following format: #=GC <tag> <s>
- <s> is an aligned text line of annotation type <tag>.
- #=GC lines are associated with a sequence alignment block;
- <s> is aligned to the residues in the alignment block, and has the
same length as the rest of the block. #=GC lines are
placed at the end of each block.
"""
if not aln:
return ''
# get seq output order
try:
order = aln.RowOrder
except:
order = aln.keys()
order.sort()
seqs = SequenceCollection(aln)
stockholm_list = ["# STOCKHOLM 1.0\n"]
if seqs.isRagged():
raise ValueError,\
"Sequences in alignment are not all the same length." +\
"Cannot generate Stockholm format."
aln_len = seqs.SeqLen
#Get all labels
labels = copy(seqs.Names)
#Get ordered seqs
ordered_seqs = [seqs.NamedSeqs[label] for label in order]
if GC_annotation is not None:
GC_annotation_list = \
[(k,GC_annotation[k]) for k in sorted(GC_annotation.keys())]
#Add GC_annotation to list of labels.
labels.extend(['#=GC '+ k for k in GC_annotation.keys()])
for k,v in GC_annotation.items():
if len(v) != aln_len:
raise ValueError, """GC annotation %s is not same length as alignment. Cannot generate Stockholm format."""%(k)
#Find all label lengths in order to get padding.
label_lengths = [len(l) for l in labels]
label_max = max(label_lengths)
max_spaces = label_max+4
if interleave_len is not None:
curr_ix = 0
while curr_ix < aln_len:
stockholm_list.extend(["%s%s%s"%(x,' '*(max_spaces-len(x)),\
y[curr_ix:curr_ix+ \
interleave_len]) for x,y in zip(order, ordered_seqs)])
if GC_annotation is not None:
stockholm_list.extend(["#=GC %s%s%s"%(x,\
' '*(max_spaces-len(x)-5),\
y[curr_ix:curr_ix + interleave_len]) for x,y in\
GC_annotation_list])
stockholm_list.append("")
curr_ix += interleave_len
else:
stockholm_list.extend(["%s%s%s"%(x,' '*(max_spaces-len(x)),y) \
for x,y in zip(order, ordered_seqs)])
if GC_annotation is not None:
stockholm_list.extend(["#=GC %s%s%s"%(x,' '*(max_spaces-len(x)-5),\
y) for x,y in GC_annotation_list])
stockholm_list.append("")
return '\n'.join(stockholm_list)+'//'
| mit |
edlane/python-debug-harness | launch-perf.py | 1 | 2326 | #!/usr/bin/python
__author__ = 'ed'
import os
import sys
from resource import getrusage as resource_usage, RUSAGE_SELF
from time import time as timestamp
from multiprocessing import Process, Queue, Pipe
def trueit_local():
return True
def trueit_queue(q):
q.put(True)
def trueit_pipe(conn):
conn.send(True)
conn.close()
if __name__ == '__main__':
reps = int(sys.argv[1])
if reps != 0:
for test in sys.argv[2:]:
start_time, start_resources = timestamp(), resource_usage(RUSAGE_SELF)
if test == '1':
print('os.system(\'true\') =')
for i in xrange(0, reps):
os.system('true')
elif test == '2':
print('python local call, \'True\' =')
for i in xrange(0, reps):
trueit_local()
elif test == '3':
print('os.system, python executable =')
for i in xrange(0, reps):
command = sys.argv[0] + " 0 2"
os.system(command)
elif test == '4':
q = Queue()
print('multiprocess, queue =')
for i in xrange(0, reps):
p = Process(target=trueit_queue, args=(q,))
p.start()
r = q.get()
p.join()
elif test == '5':
print('multiprocess, pipe =')
parent_conn, child_conn = Pipe()
for i in xrange(0, reps):
p = Process(target=trueit_pipe, args=(child_conn,))
p.start()
r = parent_conn.recv()
p.join()
elif test == '6':
print('os.system, python executable =')
for i in xrange(0, reps):
command = "/usr/bin/salt-call test.ping"
os.system(command)
end_resources, end_time = resource_usage(RUSAGE_SELF), timestamp()
results = {'real': end_time - start_time,
'sys': end_resources.ru_stime - start_resources.ru_stime,
'user': end_resources.ru_utime - start_resources.ru_utime}
print results
print '=', reps / results['real'], 'calls per second\n'
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/231_test_new.py | 9 | 2934 | from test.test_support import verbose, verify
import sys
import new
class Eggs:
def get_yolks(self):
return self.yolks
print 'new.module()'
m = new.module('Spam')
if verbose:
print m
m.Eggs = Eggs
sys.modules['Spam'] = m
import Spam
def get_more_yolks(self):
return self.yolks + 3
print 'new.classobj()'
C = new.classobj('Spam', (Spam.Eggs,), {'get_more_yolks': get_more_yolks})
if verbose:
print C
print 'new.instance()'
c = new.instance(C, {'yolks': 3})
if verbose:
print c
o = new.instance(C)
verify(o.__dict__ == {},
"new __dict__ should be empty")
del o
o = new.instance(C, None)
verify(o.__dict__ == {},
"new __dict__ should be empty")
del o
def break_yolks(self):
self.yolks = self.yolks - 2
print 'new.instancemethod()'
im = new.instancemethod(break_yolks, c, C)
if verbose:
print im
verify(c.get_yolks() == 3 and c.get_more_yolks() == 6,
'Broken call of hand-crafted class instance')
im()
verify(c.get_yolks() == 1 and c.get_more_yolks() == 4,
'Broken call of hand-crafted instance method')
# It's unclear what the semantics should be for a code object compiled at
# module scope, but bound and run in a function. In CPython, `c' is global
# (by accident?) while in Jython, `c' is local. The intent of the test
# clearly is to make `c' global, so let's be explicit about it.
codestr = '''
global c
a = 1
b = 2
c = a + b
'''
ccode = compile(codestr, '<string>', 'exec')
# Jython doesn't have a __builtins__, so use a portable alternative
import __builtin__
g = {'c': 0, '__builtins__': __builtin__}
# this test could be more robust
print 'new.function()'
func = new.function(ccode, g)
if verbose:
print func
func()
verify(g['c'] == 3,
'Could not create a proper function object')
# test the various extended flavors of function.new
def f(x):
def g(y):
return x + y
return g
g = f(4)
new.function(f.func_code, {}, "blah")
g2 = new.function(g.func_code, {}, "blah", (2,), g.func_closure)
verify(g2() == 6)
g3 = new.function(g.func_code, {}, "blah", None, g.func_closure)
verify(g3(5) == 9)
def test_closure(func, closure, exc):
try:
new.function(func.func_code, {}, "", None, closure)
except exc:
pass
else:
print "corrupt closure accepted"
test_closure(g, None, TypeError) # invalid closure
test_closure(g, (1,), TypeError) # non-cell in closure
test_closure(g, (1, 1), ValueError) # closure is wrong size
test_closure(f, g.func_closure, ValueError) # no closure needed
print 'new.code()'
# bogus test of new.code()
# Note: Jython will never have new.code()
if hasattr(new, 'code'):
d = new.code(3, 3, 3, 3, codestr, (), (), (),
"<string>", "<name>", 1, "", (), ())
# test backwards-compatibility version with no freevars or cellvars
d = new.code(3, 3, 3, 3, codestr, (), (), (),
"<string>", "<name>", 1, "")
if verbose:
print d
| gpl-3.0 |
gkoelln/youtube-dl | youtube_dl/extractor/uplynk.py | 65 | 2625 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
ExtractorError,
)
class UplynkIE(InfoExtractor):
IE_NAME = 'uplynk'
_VALID_URL = r'https?://.*?\.uplynk\.com/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.(?:m3u8|json)(?:.*?\bpbs=(?P<session_id>[^&]+))?'
_TEST = {
'url': 'http://content.uplynk.com/e89eaf2ce9054aa89d92ddb2d817a52e.m3u8',
'info_dict': {
'id': 'e89eaf2ce9054aa89d92ddb2d817a52e',
'ext': 'mp4',
'title': '030816-kgo-530pm-solar-eclipse-vid_web.mp4',
'uploader_id': '4413701bf5a1488db55b767f8ae9d4fa',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _extract_uplynk_info(self, uplynk_content_url):
path, external_id, video_id, session_id = re.match(UplynkIE._VALID_URL, uplynk_content_url).groups()
display_id = video_id or external_id
formats = self._extract_m3u8_formats(
'http://content.uplynk.com/%s.m3u8' % path,
display_id, 'mp4', 'm3u8_native')
if session_id:
for f in formats:
f['extra_param_to_segment_url'] = 'pbs=' + session_id
self._sort_formats(formats)
asset = self._download_json('http://content.uplynk.com/player/assetinfo/%s.json' % path, display_id)
if asset.get('error') == 1:
raise ExtractorError('% said: %s' % (self.IE_NAME, asset['msg']), expected=True)
return {
'id': asset['asset'],
'title': asset['desc'],
'thumbnail': asset.get('default_poster_url'),
'duration': float_or_none(asset.get('duration')),
'uploader_id': asset.get('owner'),
'formats': formats,
}
def _real_extract(self, url):
return self._extract_uplynk_info(url)
class UplynkPreplayIE(UplynkIE):
IE_NAME = 'uplynk:preplay'
_VALID_URL = r'https?://.*?\.uplynk\.com/preplay2?/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.json'
_TEST = None
def _real_extract(self, url):
path, external_id, video_id = re.match(self._VALID_URL, url).groups()
display_id = video_id or external_id
preplay = self._download_json(url, display_id)
content_url = 'http://content.uplynk.com/%s.m3u8' % path
session_id = preplay.get('sid')
if session_id:
content_url += '?pbs=' + session_id
return self._extract_uplynk_info(content_url)
| unlicense |
loveairbear/Kin | kin/scheduling/tz_mgmt.py | 2 | 1088 | from datetime import datetime, timedelta
import pytz
# do edge case of daylight savings transition
# which raise error type pytz.exceptions.AmbiguousTimeError
def utc_to_tz(gmtoffset):
"""
convert utc offset to a timezone in Olson format for use in pytz
gmtoffset - gmtoffset can be an unsigned integer or float
"""
offset = timedelta(hours=gmtoffset)
now = datetime.now()
if(gmtoffset > 14 or gmtoffset < -12):
raise 'incorrect utc offset'
for tz in map(pytz.timezone, pytz.common_timezones_set):
if tz.utcoffset(now) == offset:
return str(tz)
def find_day(date, day):
''' given a datetime object it will return a modified datetime
with the day of the month set to the next time the specified
day occurs
:params date: datetime object, could be timezone aware
day : integer from 0 to 6 (0 is Monday, 6 is Sunday)
'''
assert day < 7
assert day >= 0
tmp = date
day_diff = (day - tmp.weekday()) % 7
tmp = tmp + timedelta(days=day_diff)
return tmp | mit |
SUSE/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/network_features.py | 3 | 3148 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NetworkFeatures(Resource):
"""Full view of network features for an app (presently VNET integration and
Hybrid Connections).
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:param name: Resource Name.
:type name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:param type: Resource type.
:type type: str
:param tags: Resource tags.
:type tags: dict
:ivar virtual_network_name: The Virtual Network name.
:vartype virtual_network_name: str
:ivar virtual_network_connection: The Virtual Network summary view.
:vartype virtual_network_connection: :class:`VnetInfo
<azure.mgmt.web.models.VnetInfo>`
:ivar hybrid_connections: The Hybrid Connections summary view.
:vartype hybrid_connections: list of :class:`RelayServiceConnectionEntity
<azure.mgmt.web.models.RelayServiceConnectionEntity>`
:ivar hybrid_connections_v2: The Hybrid Connection V2 (Service Bus) view.
:vartype hybrid_connections_v2: list of :class:`HybridConnection
<azure.mgmt.web.models.HybridConnection>`
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
'virtual_network_name': {'readonly': True},
'virtual_network_connection': {'readonly': True},
'hybrid_connections': {'readonly': True},
'hybrid_connections_v2': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'virtual_network_name': {'key': 'properties.virtualNetworkName', 'type': 'str'},
'virtual_network_connection': {'key': 'properties.virtualNetworkConnection', 'type': 'VnetInfo'},
'hybrid_connections': {'key': 'properties.hybridConnections', 'type': '[RelayServiceConnectionEntity]'},
'hybrid_connections_v2': {'key': 'properties.hybridConnectionsV2', 'type': '[HybridConnection]'},
}
def __init__(self, location, name=None, kind=None, type=None, tags=None):
super(NetworkFeatures, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags)
self.virtual_network_name = None
self.virtual_network_connection = None
self.hybrid_connections = None
self.hybrid_connections_v2 = None
| mit |
vslavik/poedit | deps/boost/libs/python/test/test_pointer_adoption.py | 20 | 1710 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from test_pointer_adoption_ext import *
>>> num_a_instances()
0
>>> a = create('dynamically allocated')
>>> num_a_instances()
1
>>> a.content()
'dynamically allocated'
>>> innards = a.get_inner()
>>> innards.change('with an exposed reference')
>>> a.content()
'with an exposed reference'
# The a instance should be kept alive...
>>> a = None
>>> num_a_instances()
1
# ...until we're done with its innards
>>> innards = None
>>> num_a_instances()
0
>>> b = B()
>>> a = create('another')
>>> b.a_content()
'empty'
>>> innards = b.adopt(a);
>>> b.a_content()
'another'
>>> num_a_instances()
1
>>> del a # innards and b are both holding a reference
>>> num_a_instances()
1
>>> innards.change('yet another')
>>> b.a_content()
'yet another'
>>> del innards
>>> num_a_instances() # b still owns a reference to a
1
>>> del b
>>> num_a_instances()
0
Test call policies for constructors here
>>> a = create('second a')
>>> num_a_instances()
1
>>> b = B(a)
>>> num_a_instances()
1
>>> a.content()
'second a'
>>> del a
>>> num_a_instances()
1
>>> b.a_content()
'second a'
>>> del b
>>> num_a_instances()
0
>>> assert as_A(create('dynalloc')) is not None
>>> base = Base()
>>> assert as_A(base) is None
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
| mit |
kalikaneko/bonafide | src/leap/bonafide/provider.py | 2 | 5990 | # -*- coding: utf-8 -*-
# provier.py
# Copyright (C) 2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
LEAP Provider API.
"""
from copy import deepcopy
import re
from urlparse import urlparse
class _MetaActionDispatcher(type):
"""
A metaclass that will create dispatcher methods dynamically for each
action made available by the LEAP provider API.
The new methods will be created according to the values contained in an
`_actions` dictionary, with the following format::
{'action_name': (uri_template, method)}
where `uri_template` is a string that will be formatted with an arbitrary
number of keyword arguments.
Any class that uses this one as its metaclass needs to implement two
private methods::
_get_uri(self, action_name, **extra_params)
_get_method(self, action_name)
Beware that currently they cannot be inherited from bases.
"""
def __new__(meta, name, bases, dct):
def _generate_action_funs(dct):
_get_uri = dct['_get_uri']
_get_method = dct['_get_method']
newdct = deepcopy(dct)
actions = dct['_actions']
def create_uri_fun(action_name):
return lambda self, **kw: _get_uri(
self, action_name=action_name, **kw)
def create_met_fun(action_name):
return lambda self: _get_method(
self, action_name=action_name)
for action in actions:
uri, method = actions[action]
_action_uri = 'get_%s_uri' % action
_action_met = 'get_%s_method' % action
newdct[_action_uri] = create_uri_fun(action)
newdct[_action_met] = create_met_fun(action)
return newdct
newdct = _generate_action_funs(dct)
return super(_MetaActionDispatcher, meta).__new__(
meta, name, bases, newdct)
class BaseProvider(object):
def __init__(self, netloc, version=1):
parsed = urlparse(netloc)
if parsed.scheme != 'https':
raise ValueError(
'ProviderApi needs to be passed a url with https scheme')
self.netloc = parsed.netloc
self.version = version
def get_hostname(self):
return urlparse(self._get_base_url()).hostname
def _get_base_url(self):
return "https://{0}/{1}".format(self.netloc, self.version)
class Api(BaseProvider):
"""
An object that has all the information that a client needs to communicate
with the remote methods exposed by the web API of a LEAP provider.
The actions are described in https://leap.se/bonafide
By using the _MetaActionDispatcher as a metaclass, the _actions dict will
be translated dynamically into a set of instance methods that will allow
getting the uri and method for each action.
The keyword arguments specified in the format string will automatically
raise a KeyError if the needed keyword arguments are not passed to the
dynamically created methods.
"""
# TODO when should the provider-api object be created?
# TODO pass a Provider object to constructor, with autoconf flag.
# TODO make the actions attribute depend on the api version
# TODO missing UPDATE USER RECORD
__metaclass__ = _MetaActionDispatcher
_actions = {
'signup': ('users', 'POST'),
'update_user': ('users/{uid}', 'PUT'),
'handshake': ('sessions', 'POST'),
'authenticate': ('sessions/{login}', 'PUT'),
'logout': ('logout', 'DELETE'),
'vpn_cert': ('cert', 'POST'),
'smtp_cert': ('smtp_cert', 'POST'),
}
# Methods expected by the dispatcher metaclass
def _get_uri(self, action_name, **extra_params):
resource, _ = self._actions.get(action_name)
uri = '{0}/{1}'.format(
bytes(self._get_base_url()),
bytes(resource)).format(**extra_params)
return uri
def _get_method(self, action_name):
_, method = self._actions.get(action_name)
return method
class Discovery(BaseProvider):
"""
Discover basic information about a provider, including the provided
services.
"""
__metaclass__ = _MetaActionDispatcher
_actions = {
'provider_info': ('provider.json', 'GET'),
'configs': ('1/configs.json', 'GET'),
}
api_uri = None
api_port = None
def _get_base_url(self):
if self.api_uri:
base = self.api_uri
else:
base = self.netloc
uri = "https://{0}".format(base)
if self.api_port:
uri = uri + ':%s' % self.api_port
return uri
def get_base_uri(self):
return self._get_base_url()
# Methods expected by the dispatcher metaclass
def _get_uri(self, action_name, **extra_params):
resource, _ = self._actions.get(action_name)
uri = '{0}/{1}'.format(
bytes(self._get_base_url()),
bytes(resource)).format(**extra_params)
return uri
def _get_method(self, action_name):
_, method = self._actions.get(action_name)
return method
def validate_username(username):
accepted_characters = '^[a-z0-9\-\_\.]*$'
if not re.match(accepted_characters, username):
raise ValueError('Only lowercase letters, digits, . - and _ allowed.')
| gpl-3.0 |
biddisco/VTK | ThirdParty/Twisted/twisted/internet/reactor.py | 63 | 1863 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The reactor is the Twisted event loop within Twisted, the loop which drives
applications using Twisted. The reactor provides APIs for networking,
threading, dispatching events, and more.
The default reactor depends on the platform and will be installed if this
module is imported without another reactor being explicitly installed
beforehand. Regardless of which reactor is installed, importing this module is
the correct way to get a reference to it.
New application code should prefer to pass and accept the reactor as a
parameter where it is needed, rather than relying on being able to import this
module to get a reference. This simplifies unit testing and may make it easier
to one day support multiple reactors (as a performance enhancement), though
this is not currently possible.
@see: L{IReactorCore<twisted.internet.interfaces.IReactorCore>}
@see: L{IReactorTime<twisted.internet.interfaces.IReactorTime>}
@see: L{IReactorProcess<twisted.internet.interfaces.IReactorProcess>}
@see: L{IReactorTCP<twisted.internet.interfaces.IReactorTCP>}
@see: L{IReactorSSL<twisted.internet.interfaces.IReactorSSL>}
@see: L{IReactorUDP<twisted.internet.interfaces.IReactorUDP>}
@see: L{IReactorMulticast<twisted.internet.interfaces.IReactorMulticast>}
@see: L{IReactorUNIX<twisted.internet.interfaces.IReactorUNIX>}
@see: L{IReactorUNIXDatagram<twisted.internet.interfaces.IReactorUNIXDatagram>}
@see: L{IReactorFDSet<twisted.internet.interfaces.IReactorFDSet>}
@see: L{IReactorThreads<twisted.internet.interfaces.IReactorThreads>}
@see: L{IReactorPluggableResolver<twisted.internet.interfaces.IReactorPluggableResolver>}
"""
from __future__ import division, absolute_import
import sys
del sys.modules['twisted.internet.reactor']
from twisted.internet import default
default.install()
| bsd-3-clause |
p0psicles/SickRage | lib/fanart/__init__.py | 60 | 2956 | __author__ = 'Andrea De Marco <24erre@gmail.com>'
__version__ = '1.4.0'
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
]
__copyright__ = "2012, %s " % __author__
__license__ = """
Copyright %s.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied.
See the License for the specific language governing permissions and
limitations under the License.
""" % __copyright__
__docformat__ = 'restructuredtext en'
__doc__ = """
:abstract: Python interface to fanart.tv API
:version: %s
:author: %s
:contact: http://z4r.github.com/
:date: 2012-04-04
:copyright: %s
""" % (__version__, __author__, __license__)
def values(obj):
return [v for k, v in obj.__dict__.iteritems() if not k.startswith('_')]
BASEURL = 'http://webservice.fanart.tv/v3/%s/%s?api_key=%s'
class FORMAT(object):
JSON = 'JSON'
XML = 'XML'
PHP = 'PHP'
class WS(object):
MUSIC = 'music'
MOVIE = 'movies'
TV = 'tv'
class TYPE(object):
ALL = 'all'
class TV(object):
LOGO = 'clearlogo'
CHARACTER = 'characterart'
BACKGROUND = 'showbackground'
HDLOGO = 'hdtvlogo'
HDART = 'hdclearart'
ART = 'clearart'
THUMB = 'tvthumb'
POSTER = 'tvposter'
BANNER = 'tvbanner'
SEASONTHUMB = 'seasonthumb'
SEASONPOSTER = 'seasonposter'
SEASONBANNER = 'seasonbanner'
class MUSIC(object):
DISC = 'cdart'
LOGO = 'musiclogo'
BACKGROUND = 'artistbackground'
COVER = 'albumcover'
THUMB = 'artistthumb'
class MOVIE(object):
ART = 'movieart'
LOGO = 'movielogo'
DISC = 'moviedisc'
POSTER = 'movieposter'
BACKGROUND = 'moviebackground'
HDLOGO = 'hdmovielogo'
HDART = 'hdmovieclearart'
BANNER = 'moviebanner'
THUMB = 'moviethumb'
class SORT(object):
POPULAR = 1
NEWEST = 2
OLDEST = 3
class LIMIT(object):
ONE = 1
ALL = 2
FORMAT_LIST = values(FORMAT)
WS_LIST = values(WS)
TYPE_LIST = values(TYPE.MUSIC) + values(TYPE.TV) + values(TYPE.MOVIE) + [TYPE.ALL]
MUSIC_TYPE_LIST = values(TYPE.MUSIC) + [TYPE.ALL]
TV_TYPE_LIST = values(TYPE.TV) + [TYPE.ALL]
MOVIE_TYPE_LIST = values(TYPE.MOVIE) + [TYPE.ALL]
SORT_LIST = values(SORT)
LIMIT_LIST = values(LIMIT)
| gpl-3.0 |
vrenaville/OCB | openerp/addons/base/module/module.py | 29 | 37426 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from docutils import nodes
from docutils.core import publish_string
from docutils.transforms import Transform, writer_aux
from docutils.writers.html4css1 import Writer
import imp
import logging
from operator import attrgetter
import os
import re
import shutil
import tempfile
import urllib
import urllib2
import urlparse
import zipfile
import zipimport
import lxml.html
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # NOQA
import openerp
import openerp.exceptions
from openerp import modules, tools
from openerp.modules.db import create_categories
from openerp.modules import get_module_resource
from openerp.tools.parse_version import parse_version
from openerp.tools.translate import _
from openerp.osv import osv, orm, fields
from openerp import api, fields as fields2
_logger = logging.getLogger(__name__)
ACTION_DICT = {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'target': 'new',
'type': 'ir.actions.act_window',
'nodestroy': True,
}
def backup(path, raise_exception=True):
path = os.path.normpath(path)
if not os.path.exists(path):
if not raise_exception:
return None
raise OSError('path does not exists')
cnt = 1
while True:
bck = '%s~%d' % (path, cnt)
if not os.path.exists(bck):
shutil.move(path, bck)
return bck
cnt += 1
class module_category(osv.osv):
_name = "ir.module.category"
_description = "Application"
def _module_nbr(self, cr, uid, ids, prop, unknow_none, context):
cr.execute('SELECT category_id, COUNT(*) \
FROM ir_module_module \
WHERE category_id IN %(ids)s \
OR category_id IN (SELECT id \
FROM ir_module_category \
WHERE parent_id IN %(ids)s) \
GROUP BY category_id', {'ids': tuple(ids)}
)
result = dict(cr.fetchall())
for id in ids:
cr.execute('select id from ir_module_category where parent_id=%s', (id,))
result[id] = sum([result.get(c, 0) for (c,) in cr.fetchall()],
result.get(id, 0))
return result
_columns = {
'name': fields.char("Name", required=True, translate=True, select=True),
'parent_id': fields.many2one('ir.module.category', 'Parent Application', select=True),
'child_ids': fields.one2many('ir.module.category', 'parent_id', 'Child Applications'),
'module_nr': fields.function(_module_nbr, string='Number of Modules', type='integer'),
'module_ids': fields.one2many('ir.module.module', 'category_id', 'Modules'),
'description': fields.text("Description", translate=True),
'sequence': fields.integer('Sequence'),
'visible': fields.boolean('Visible'),
'xml_id': fields.function(osv.osv.get_external_id, type='char', string="External ID"),
}
_order = 'name'
_defaults = {
'visible': 1,
}
class MyFilterMessages(Transform):
"""
Custom docutils transform to remove `system message` for a document and
generate warnings.
(The standard filter removes them based on some `report_level` passed in
the `settings_override` dictionary, but if we use it, we can't see them
and generate warnings.)
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
_logger.warning("docutils' system message present: %s", str(node))
node.parent.remove(node)
class MyWriter(Writer):
"""
Custom docutils html4ccs1 writer that doesn't add the warnings to the
output document.
"""
def get_transforms(self):
return [MyFilterMessages, writer_aux.Admonitions]
class module(osv.osv):
_name = "ir.module.module"
_rec_name = "shortdesc"
_description = "Module"
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(module, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'action_server_module_immediate_install')[1]
if view_type == 'form':
if res.get('toolbar',False):
list = [rec for rec in res['toolbar']['action'] if rec.get('id', False) != result]
res['toolbar'] = {'action': list}
return res
@classmethod
def get_module_info(cls, name):
info = {}
try:
info = modules.load_information_from_description_file(name)
except Exception:
_logger.debug('Error when trying to fetch informations for '
'module %s', name, exc_info=True)
return info
def _get_desc(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = dict.fromkeys(ids, '')
for module in self.browse(cr, uid, ids, context=context):
path = get_module_resource(module.name, 'static/description/index.html')
if path:
with tools.file_open(path, 'rb') as desc_file:
doc = desc_file.read()
html = lxml.html.document_fromstring(doc)
for element, attribute, link, pos in html.iterlinks():
if element.get('src') and not '//' in element.get('src') and not 'static/' in element.get('src'):
element.set('src', "/%s/static/description/%s" % (module.name, element.get('src')))
res[module.id] = lxml.html.tostring(html)
else:
overrides = {
'embed_stylesheet': False,
'doctitle_xform': False,
'output_encoding': 'unicode',
'xml_declaration': False,
}
output = publish_string(source=module.description or '', settings_overrides=overrides, writer=MyWriter())
res[module.id] = output
return res
def _get_latest_version(self, cr, uid, ids, field_name=None, arg=None, context=None):
default_version = modules.adapt_version('1.0')
res = dict.fromkeys(ids, default_version)
for m in self.browse(cr, uid, ids):
res[m.id] = self.get_module_info(m.name).get('version', default_version)
return res
def _get_views(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = {}
model_data_obj = self.pool.get('ir.model.data')
dmodels = []
if field_name is None or 'views_by_module' in field_name:
dmodels.append('ir.ui.view')
if field_name is None or 'reports_by_module' in field_name:
dmodels.append('ir.actions.report.xml')
if field_name is None or 'menus_by_module' in field_name:
dmodels.append('ir.ui.menu')
assert dmodels, "no models for %s" % field_name
for module_rec in self.browse(cr, uid, ids, context=context):
res_mod_dic = res[module_rec.id] = {
'menus_by_module': [],
'reports_by_module': [],
'views_by_module': []
}
# Skip uninstalled modules below, no data to find anyway.
if module_rec.state not in ('installed', 'to upgrade', 'to remove'):
continue
# then, search and group ir.model.data records
imd_models = dict([(m, []) for m in dmodels])
imd_ids = model_data_obj.search(cr, uid, [
('module', '=', module_rec.name),
('model', 'in', tuple(dmodels))
])
for imd_res in model_data_obj.read(cr, uid, imd_ids, ['model', 'res_id'], context=context):
imd_models[imd_res['model']].append(imd_res['res_id'])
def browse(model):
M = self.pool[model]
# as this method is called before the module update, some xmlid may be invalid at this stage
# explictly filter records before reading them
ids = M.exists(cr, uid, imd_models.get(model, []), context)
return M.browse(cr, uid, ids, context)
def format_view(v):
aa = v.inherit_id and '* INHERIT ' or ''
return '%s%s (%s)' % (aa, v.name, v.type)
res_mod_dic['views_by_module'] = map(format_view, browse('ir.ui.view'))
res_mod_dic['reports_by_module'] = map(attrgetter('name'), browse('ir.actions.report.xml'))
res_mod_dic['menus_by_module'] = map(attrgetter('complete_name'), browse('ir.ui.menu'))
for key in res.iterkeys():
for k, v in res[key].iteritems():
res[key][k] = "\n".join(sorted(v))
return res
def _get_icon_image(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = dict.fromkeys(ids, '')
for module in self.browse(cr, uid, ids, context=context):
path = get_module_resource(module.name, 'static', 'description', 'icon.png')
if path:
image_file = tools.file_open(path, 'rb')
try:
res[module.id] = image_file.read().encode('base64')
finally:
image_file.close()
return res
_columns = {
'name': fields.char("Technical Name", readonly=True, required=True, select=True),
'category_id': fields.many2one('ir.module.category', 'Category', readonly=True, select=True),
'shortdesc': fields.char('Module Name', readonly=True, translate=True),
'summary': fields.char('Summary', readonly=True, translate=True),
'description': fields.text("Description", readonly=True, translate=True),
'description_html': fields.function(_get_desc, string='Description HTML', type='html', method=True, readonly=True),
'author': fields.char("Author", readonly=True),
'maintainer': fields.char('Maintainer', readonly=True),
'contributors': fields.text('Contributors', readonly=True),
'website': fields.char("Website", readonly=True),
# attention: Incorrect field names !!
# installed_version refers the latest version (the one on disk)
# latest_version refers the installed version (the one in database)
# published_version refers the version available on the repository
'installed_version': fields.function(_get_latest_version, string='Latest Version', type='char'),
'latest_version': fields.char('Installed Version', readonly=True),
'published_version': fields.char('Published Version', readonly=True),
'url': fields.char('URL', readonly=True),
'sequence': fields.integer('Sequence'),
'dependencies_id': fields.one2many('ir.module.module.dependency', 'module_id', 'Dependencies', readonly=True),
'auto_install': fields.boolean('Automatic Installation',
help='An auto-installable module is automatically installed by the '
'system when all its dependencies are satisfied. '
'If the module has no dependency, it is always installed.'),
'state': fields.selection([
('uninstallable', 'Not Installable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed')
], string='Status', readonly=True, select=True),
'demo': fields.boolean('Demo Data', readonly=True),
'license': fields.selection([
('GPL-2', 'GPL Version 2'),
('GPL-2 or any later version', 'GPL-2 or later version'),
('GPL-3', 'GPL Version 3'),
('GPL-3 or any later version', 'GPL-3 or later version'),
('AGPL-3', 'Affero GPL-3'),
('Other OSI approved licence', 'Other OSI Approved Licence'),
('Other proprietary', 'Other Proprietary')
], string='License', readonly=True),
'menus_by_module': fields.function(_get_views, string='Menus', type='text', multi="meta", store=True),
'reports_by_module': fields.function(_get_views, string='Reports', type='text', multi="meta", store=True),
'views_by_module': fields.function(_get_views, string='Views', type='text', multi="meta", store=True),
'application': fields.boolean('Application', readonly=True),
'icon': fields.char('Icon URL'),
'icon_image': fields.function(_get_icon_image, string='Icon', type="binary"),
}
_defaults = {
'state': 'uninstalled',
'sequence': 100,
'demo': False,
'license': 'AGPL-3',
}
_order = 'sequence,name'
def _name_uniq_msg(self, cr, uid, ids, context=None):
return _('The name of the module must be unique !')
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', _name_uniq_msg),
]
def unlink(self, cr, uid, ids, context=None):
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
mod_names = []
for mod in self.read(cr, uid, ids, ['state', 'name'], context):
if mod['state'] in ('installed', 'to upgrade', 'to remove', 'to install'):
raise orm.except_orm(_('Error'), _('You try to remove a module that is installed or will be installed'))
mod_names.append(mod['name'])
#Removing the entry from ir_model_data
#ids_meta = self.pool.get('ir.model.data').search(cr, uid, [('name', '=', 'module_meta_information'), ('module', 'in', mod_names)])
#if ids_meta:
# self.pool.get('ir.model.data').unlink(cr, uid, ids_meta, context)
return super(module, self).unlink(cr, uid, ids, context=context)
@staticmethod
def _check_external_dependencies(terp):
depends = terp.get('external_dependencies')
if not depends:
return
for pydep in depends.get('python', []):
parts = pydep.split('.')
parts.reverse()
path = None
while parts:
part = parts.pop()
try:
_, path, _ = imp.find_module(part, path and [path] or None)
except ImportError:
raise ImportError('No module named %s' % (pydep,))
for binary in depends.get('bin', []):
if tools.find_in_path(binary) is None:
raise Exception('Unable to find %r in path' % (binary,))
@classmethod
def check_external_dependencies(cls, module_name, newstate='to install'):
terp = cls.get_module_info(module_name)
try:
cls._check_external_dependencies(terp)
except Exception, e:
if newstate == 'to install':
msg = _('Unable to install module "%s" because an external dependency is not met: %s')
elif newstate == 'to upgrade':
msg = _('Unable to upgrade module "%s" because an external dependency is not met: %s')
else:
msg = _('Unable to process module "%s" because an external dependency is not met: %s')
raise orm.except_orm(_('Error'), msg % (module_name, e.args[0]))
@api.multi
def state_update(self, newstate, states_to_update, level=100):
if level < 1:
raise orm.except_orm(_('Error'), _('Recursion error in modules dependencies !'))
# whether some modules are installed with demo data
demo = False
for module in self:
# determine dependency modules to update/others
update_mods, ready_mods = self.browse(), self.browse()
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise orm.except_orm(_('Error'), _("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system.") % (module.name, dep.name,))
if dep.depend_id.state == newstate:
ready_mods += dep.depend_id
else:
update_mods += dep.depend_id
# update dependency modules that require it, and determine demo for module
update_demo = update_mods.state_update(newstate, states_to_update, level=level-1)
module_demo = module.demo or update_demo or any(mod.demo for mod in ready_mods)
demo = demo or module_demo
# check dependencies and update module itself
self.check_external_dependencies(module.name, newstate)
if module.state in states_to_update:
module.write({'state': newstate, 'demo': module_demo})
return demo
def button_install(self, cr, uid, ids, context=None):
# Mark the given modules to be installed.
self.state_update(cr, uid, ids, 'to install', ['uninstalled'], context=context)
# Mark (recursively) the newly satisfied modules to also be installed
# Select all auto-installable (but not yet installed) modules.
domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)]
uninstalled_ids = self.search(cr, uid, domain, context=context)
uninstalled_modules = self.browse(cr, uid, uninstalled_ids, context=context)
# Keep those with:
# - all dependencies satisfied (installed or to be installed),
# - at least one dependency being 'to install'
satisfied_states = frozenset(('installed', 'to install', 'to upgrade'))
def all_depencies_satisfied(m):
states = set(d.state for d in m.dependencies_id)
return states.issubset(satisfied_states) and ('to install' in states)
to_install_modules = filter(all_depencies_satisfied, uninstalled_modules)
to_install_ids = map(lambda m: m.id, to_install_modules)
# Mark them to be installed.
if to_install_ids:
self.button_install(cr, uid, to_install_ids, context=context)
return dict(ACTION_DICT, name=_('Install'))
def button_immediate_install(self, cr, uid, ids, context=None):
""" Installs the selected module(s) immediately and fully,
returns the next res.config action to execute
:param ids: identifiers of the modules to install
:returns: next res.config item to execute
:rtype: dict[str, object]
"""
return self._button_immediate_function(cr, uid, ids, self.button_install, context=context)
def button_install_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'uninstalled', 'demo': False})
return True
def module_uninstall(self, cr, uid, ids, context=None):
"""Perform the various steps required to uninstall a module completely
including the deletion of all database structures created by the module:
tables, columns, constraints, etc."""
ir_model_data = self.pool.get('ir.model.data')
modules_to_remove = [m.name for m in self.browse(cr, uid, ids, context)]
ir_model_data._module_data_uninstall(cr, uid, modules_to_remove, context)
self.write(cr, uid, ids, {'state': 'uninstalled'})
return True
def downstream_dependencies(self, cr, uid, ids, known_dep_ids=None,
exclude_states=['uninstalled', 'uninstallable', 'to remove'],
context=None):
"""Return the ids of all modules that directly or indirectly depend
on the given module `ids`, and that satisfy the `exclude_states`
filter"""
if not ids:
return []
known_dep_ids = set(known_dep_ids or [])
cr.execute('''SELECT DISTINCT m.id
FROM
ir_module_module_dependency d
JOIN
ir_module_module m ON (d.module_id=m.id)
WHERE
d.name IN (SELECT name from ir_module_module where id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s ''',
(tuple(ids), tuple(exclude_states), tuple(known_dep_ids or ids)))
new_dep_ids = set([m[0] for m in cr.fetchall()])
missing_mod_ids = new_dep_ids - known_dep_ids
known_dep_ids |= new_dep_ids
if missing_mod_ids:
known_dep_ids |= set(self.downstream_dependencies(cr, uid, list(missing_mod_ids),
known_dep_ids, exclude_states, context))
return list(known_dep_ids)
def _button_immediate_function(self, cr, uid, ids, function, context=None):
function(cr, uid, ids, context=context)
cr.commit()
api.Environment.reset()
registry = openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
config = registry['res.config'].next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# reload the client; open the first available root menu
menu_obj = registry['ir.ui.menu']
menu_ids = menu_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids and menu_ids[0] or False}
}
#TODO remove me in master, not called anymore
def button_immediate_uninstall(self, cr, uid, ids, context=None):
"""
Uninstall the selected module(s) immediately and fully,
returns the next res.config action to execute
"""
return self._button_immediate_function(cr, uid, ids, self.button_uninstall, context=context)
def button_uninstall(self, cr, uid, ids, context=None):
if any(m.name == 'base' for m in self.browse(cr, uid, ids, context=context)):
raise orm.except_orm(_('Error'), _("The `base` module cannot be uninstalled"))
dep_ids = self.downstream_dependencies(cr, uid, ids, context=context)
self.write(cr, uid, ids + dep_ids, {'state': 'to remove'})
return dict(ACTION_DICT, name=_('Uninstall'))
def button_uninstall_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'installed'})
return True
def button_immediate_upgrade(self, cr, uid, ids, context=None):
"""
Upgrade the selected module(s) immediately and fully,
return the next res.config action to execute
"""
return self._button_immediate_function(cr, uid, ids, self.button_upgrade, context=context)
def button_upgrade(self, cr, uid, ids, context=None):
depobj = self.pool.get('ir.module.module.dependency')
todo = list(self.browse(cr, uid, ids, context=context))
self.update_list(cr, uid)
i = 0
while i < len(todo):
mod = todo[i]
i += 1
if mod.state not in ('installed', 'to upgrade'):
raise orm.except_orm(_('Error'), _("Can not upgrade module '%s'. It is not installed.") % (mod.name,))
self.check_external_dependencies(mod.name, 'to upgrade')
iids = depobj.search(cr, uid, [('name', '=', mod.name)], context=context)
for dep in depobj.browse(cr, uid, iids, context=context):
if dep.module_id.state == 'installed' and dep.module_id not in todo:
todo.append(dep.module_id)
ids = map(lambda x: x.id, todo)
self.write(cr, uid, ids, {'state': 'to upgrade'}, context=context)
to_install = []
for mod in todo:
for dep in mod.dependencies_id:
if dep.state == 'unknown':
raise orm.except_orm(_('Error'), _('You try to upgrade a module that depends on the module: %s.\nBut this module is not available in your system.') % (dep.name,))
if dep.state == 'uninstalled':
ids2 = self.search(cr, uid, [('name', '=', dep.name)])
to_install.extend(ids2)
self.button_install(cr, uid, to_install, context=context)
return dict(ACTION_DICT, name=_('Apply Schedule Upgrade'))
def button_upgrade_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'installed'})
return True
def button_update_translations(self, cr, uid, ids, context=None):
self.update_translations(cr, uid, ids)
return True
@staticmethod
def get_values_from_terp(terp):
return {
'description': terp.get('description', ''),
'shortdesc': terp.get('name', ''),
'author': terp.get('author', 'Unknown'),
'maintainer': terp.get('maintainer', False),
'contributors': ', '.join(terp.get('contributors', [])) or False,
'website': terp.get('website', ''),
'license': terp.get('license', 'AGPL-3'),
'sequence': terp.get('sequence', 100),
'application': terp.get('application', False),
'auto_install': terp.get('auto_install', False),
'icon': terp.get('icon', False),
'summary': terp.get('summary', ''),
}
def create(self, cr, uid, vals, context=None):
new_id = super(module, self).create(cr, uid, vals, context=context)
module_metadata = {
'name': 'module_%s' % vals['name'],
'model': 'ir.module.module',
'module': 'base',
'res_id': new_id,
'noupdate': True,
}
self.pool['ir.model.data'].create(cr, uid, module_metadata)
return new_id
# update the list of available packages
def update_list(self, cr, uid, context=None):
res = [0, 0] # [update, add]
default_version = modules.adapt_version('1.0')
known_mods = self.browse(cr, uid, self.search(cr, uid, []))
known_mods_names = dict([(m.name, m) for m in known_mods])
# iterate through detected modules and update/create them in db
for mod_name in modules.get_modules():
mod = known_mods_names.get(mod_name)
terp = self.get_module_info(mod_name)
values = self.get_values_from_terp(terp)
if mod:
updated_values = {}
for key in values:
old = getattr(mod, key)
updated = isinstance(values[key], basestring) and tools.ustr(values[key]) or values[key]
if (old or updated) and updated != old:
updated_values[key] = values[key]
if terp.get('installable', True) and mod.state == 'uninstallable':
updated_values['state'] = 'uninstalled'
if parse_version(terp.get('version', default_version)) > parse_version(mod.latest_version or default_version):
res[0] += 1
if updated_values:
self.write(cr, uid, mod.id, updated_values)
else:
mod_path = modules.get_module_path(mod_name)
if not mod_path:
continue
if not terp or not terp.get('installable', True):
continue
id = self.create(cr, uid, dict(name=mod_name, state='uninstalled', **values))
mod = self.browse(cr, uid, id)
res[1] += 1
self._update_dependencies(cr, uid, mod, terp.get('depends', []))
self._update_category(cr, uid, mod, terp.get('category', 'Uncategorized'))
# Trigger load_addons if new module have been discovered it exists on
# wsgi handlers, so they can react accordingly
if tuple(res) != (0, 0):
for handler in openerp.service.wsgi_server.module_handlers:
if hasattr(handler, 'load_addons'):
handler.load_addons()
return res
def download(self, cr, uid, ids, download=True, context=None):
return []
def install_from_urls(self, cr, uid, urls, context=None):
if not self.pool['res.users'].has_group(cr, uid, 'base.group_system'):
raise openerp.exceptions.AccessDenied()
apps_server = urlparse.urlparse(self.get_apps_server(cr, uid, context=context))
OPENERP = 'openerp'
tmp = tempfile.mkdtemp()
_logger.debug('Install from url: %r', urls)
try:
# 1. Download & unzip missing modules
for module_name, url in urls.items():
if not url:
continue # nothing to download, local version is already the last one
up = urlparse.urlparse(url)
if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc:
raise openerp.exceptions.AccessDenied()
try:
_logger.info('Downloading module `%s` from OpenERP Apps', module_name)
content = urllib2.urlopen(url).read()
except Exception:
_logger.exception('Failed to fetch module %s', module_name)
raise osv.except_osv(_('Module not found'),
_('The `%s` module appears to be unavailable at the moment, please try again later.') % module_name)
else:
zipfile.ZipFile(StringIO(content)).extractall(tmp)
assert os.path.isdir(os.path.join(tmp, module_name))
# 2a. Copy/Replace module source in addons path
for module_name, url in urls.items():
if module_name == OPENERP or not url:
continue # OPENERP is special case, handled below, and no URL means local module
module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False)
bck = backup(module_path, False)
_logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path)
shutil.move(os.path.join(tmp, module_name), module_path)
if bck:
shutil.rmtree(bck)
# 2b. Copy/Replace server+base module source if downloaded
if urls.get(OPENERP, None):
# special case. it contains the server and the base module.
# extract path is not the same
base_path = os.path.dirname(modules.get_module_path('base'))
# copy all modules in the SERVER/openerp/addons directory to the new "openerp" module (except base itself)
for d in os.listdir(base_path):
if d != 'base' and os.path.isdir(os.path.join(base_path, d)):
destdir = os.path.join(tmp, OPENERP, 'addons', d) # XXX 'openerp' subdirectory ?
shutil.copytree(os.path.join(base_path, d), destdir)
# then replace the server by the new "base" module
server_dir = openerp.tools.config['root_path'] # XXX or dirname()
bck = backup(server_dir)
_logger.info('Copy downloaded module `openerp` to `%s`', server_dir)
shutil.move(os.path.join(tmp, OPENERP), server_dir)
#if bck:
# shutil.rmtree(bck)
self.update_list(cr, uid, context=context)
with_urls = [m for m, u in urls.items() if u]
downloaded_ids = self.search(cr, uid, [('name', 'in', with_urls)], context=context)
already_installed = self.search(cr, uid, [('id', 'in', downloaded_ids), ('state', '=', 'installed')], context=context)
to_install_ids = self.search(cr, uid, [('name', 'in', urls.keys()), ('state', '=', 'uninstalled')], context=context)
post_install_action = self.button_immediate_install(cr, uid, to_install_ids, context=context)
if already_installed:
# in this case, force server restart to reload python code...
cr.commit()
openerp.service.server.restart()
return {
'type': 'ir.actions.client',
'tag': 'home',
'params': {'wait': True},
}
return post_install_action
finally:
shutil.rmtree(tmp)
def get_apps_server(self, cr, uid, context=None):
return tools.config.get('apps_server', 'https://apps.openerp.com/apps')
def _update_dependencies(self, cr, uid, mod_browse, depends=None):
if depends is None:
depends = []
existing = set(x.name for x in mod_browse.dependencies_id)
needed = set(depends)
for dep in (needed - existing):
cr.execute('INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)', (mod_browse.id, dep))
for dep in (existing - needed):
cr.execute('DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s', (mod_browse.id, dep))
self.invalidate_cache(cr, uid, ['dependencies_id'], [mod_browse.id])
def _update_category(self, cr, uid, mod_browse, category='Uncategorized'):
current_category = mod_browse.category_id
current_category_path = []
while current_category:
current_category_path.insert(0, current_category.name)
current_category = current_category.parent_id
categs = category.split('/')
if categs != current_category_path:
cat_id = create_categories(cr, categs)
mod_browse.write({'category_id': cat_id})
def update_translations(self, cr, uid, ids, filter_lang=None, context=None):
if not filter_lang:
res_lang = self.pool.get('res.lang')
lang_ids = res_lang.search(cr, uid, [('translatable', '=', True)])
filter_lang = [lang.code for lang in res_lang.browse(cr, uid, lang_ids)]
elif not isinstance(filter_lang, (list, tuple)):
filter_lang = [filter_lang]
modules = [m.name for m in self.browse(cr, uid, ids) if m.state == 'installed']
self.pool.get('ir.translation').load_module_terms(cr, modules, filter_lang, context=context)
def check(self, cr, uid, ids, context=None):
for mod in self.browse(cr, uid, ids, context=context):
if not mod.description:
_logger.warning('module %s: description is empty !', mod.name)
DEP_STATES = [
('uninstallable', 'Uninstallable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed'),
('unknown', 'Unknown'),
]
class module_dependency(osv.Model):
_name = "ir.module.module.dependency"
_description = "Module dependency"
# the dependency name
name = fields2.Char(index=True)
# the module that depends on it
module_id = fields2.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the dependency, and its status
depend_id = fields2.Many2one('ir.module.module', 'Dependency', compute='_compute_depend')
state = fields2.Selection(DEP_STATES, string='Status', compute='_compute_state')
@api.multi
@api.depends('name')
def _compute_depend(self):
# retrieve all modules corresponding to the dependency names
names = list(set(dep.name for dep in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = dict((mod.name, mod) for mod in mods)
for dep in self:
dep.depend_id = name_mod.get(dep.name)
@api.one
@api.depends('depend_id.state')
def _compute_state(self):
self.state = self.depend_id.state or 'unknown'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sreejithr/emacs.d | pyenv/emacs/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py | 163 | 11979 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, fsencode, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and os.name == 'nt':
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
if self.executable:
executable = self.executable
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
executable = fsencode(executable)
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and os.name == 'nt'
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else:
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher:
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if os.name == 'nt' and not outname.endswith('.' + ext):
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
shebang = self._get_shebang('utf-8', options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError:
if not self.dry_run:
raise
f = None
else:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
first_line = f.readline()
if not first_line:
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line:
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt':
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| mit |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/cronscripts/daily_product_jobs.py | 1 | 1141 | #!/usr/bin/python -S
#
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Request jobs to update products and send emails."""
__metaclass__ = type
import _pythonpath
import transaction
from lp.registry.model.productjob import ProductJobManager
from lp.services.config import config
from lp.services.scripts.base import LaunchpadCronScript
from lp.services.webapp.errorlog import globalErrorUtility
class RequestProductJobs(LaunchpadCronScript):
"""Create `ProductJobs` for products that need updating."""
def __init__(self):
name = 'daily_product_jobs'
dbuser = config.ICommercialExpiredJobSource.dbuser
LaunchpadCronScript.__init__(self, name, dbuser)
def main(self):
globalErrorUtility.configure(self.name)
manager = ProductJobManager(self.logger)
job_count = manager.createAllDailyJobs()
self.logger.info('Requested %d total product jobs.' % job_count)
transaction.commit()
if __name__ == '__main__':
script = RequestProductJobs()
script.lock_and_run()
| agpl-3.0 |
suiyuan2009/tensorflow | tensorflow/contrib/learn/python/learn/estimators/run_config.py | 9 | 16198 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run Config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import six
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as core_run_config
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
# A list of the property names in RunConfig user allows to change. They will
# not affect the execution framework, so when execution framework checks the
# `uid` of the RunConfig, it should be ingored.
_DEFAULT_UID_WHITE_LIST = [
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
'log_step_count_steps',
]
class Environment(object):
# For running general distributed training.
CLOUD = 'cloud'
# For running Google-internal distributed training.
GOOGLE = 'google'
# For running on local desktop.
LOCAL = 'local'
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class ClusterConfig(object):
"""This class specifies the configurations for a distributed run.
If you're using `tf.learn` `Estimators`, you should probably use the subclass
RunConfig instead.
"""
def __init__(self, master=None, evaluation_master=None):
"""Constructor.
Sets the properties `cluster_spec`, `is_chief`, `master` (if `None` in the
args), `num_ps_replicas`, `task_id`, and `task_type` based on the
`TF_CONFIG` environment variable, if the pertinent information is
present. The `TF_CONFIG` environment variable is a JSON object with
attributes: `cluster`, `environment`, and `task`.
`cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from
`server_lib.py`, mapping task types (usually one of the TaskType enums) to a
list of task addresses.
`environment` specifies the runtime environment for the job (usually one of
the `Environment` enums). Defaults to `LOCAL`.
`task` has two attributes: `type` and `index`, where `type` can be any of
the task types in `cluster`. When `TF_CONFIG` contains said information, the
following properties are set on this class:
* `task_type` is set to `TF_CONFIG['task']['type']`. Defaults to `None`.
* `task_id` is set to `TF_CONFIG['task']['index']`. Defaults to 0.
* `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}.
* `master` is determined by looking up `task_type` and `task_id` in the
`cluster_spec`. Defaults to ''.
* `num_ps_replicas` is set by counting the number of nodes listed
in the `ps` attribute of `cluster_spec`. Defaults to 0.
* `num_worker_replicas` is set by counting the number of nodes listed
in the `worker` attribute of `cluster_spec`. Defaults to 0.
* `is_chief` is deteremined based on `task_type`, `type_id`, and
`environment`.
Example:
```
cluster = {'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'worker', 'index': 1}})
config = ClusterConfig()
assert config.master == 'host4:2222'
assert config.task_id == 1
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 3
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'worker'
assert not config.is_chief
```
Args:
master: TensorFlow master. Defaults to empty string for local.
evaluation_master: The master on which to perform evaluation.
"""
# If not explicitly specified in the constructor and the TF_CONFIG
# environment variable is present, load cluster_spec from TF_CONFIG.
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
# Set task_type and task_id if the TF_CONFIG environment variable is
# present. Otherwise, use the respective default (None / 0).
task_env = config.get('task', {})
self._task_type = task_env.get('type', None)
self._task_id = self.get_task_id()
self._cluster_spec = server_lib.ClusterSpec(config.get('cluster', {}))
self._master = (master if master is not None else
_get_master(self._cluster_spec, self._task_type,
self._task_id) or '')
self._num_ps_replicas = _count_ps(self._cluster_spec) or 0
self._num_worker_replicas = _count_worker(self._cluster_spec) or 0
# Set is_chief.
self._environment = config.get('environment', Environment.LOCAL)
self._is_chief = None
if self._task_type is None:
self._is_chief = (self._task_id == 0)
elif self._environment == Environment.CLOUD:
# When the TF_CONFIG environment variable is set, we can set the
# default of is_chief to 0 when task_type is "master" and task_id is 0.
self._is_chief = (self._task_type == TaskType.MASTER and
self._task_id == 0)
else:
# Legacy behavior is that is_chief is None if task_id == 0.
self._is_chief = (self._task_type == TaskType.WORKER and
self._task_id == 0)
self._evaluation_master = evaluation_master or ''
@property
def cluster_spec(self):
return self._cluster_spec
@property
def environment(self):
return self._environment
@property
def evaluation_master(self):
return self._evaluation_master
@property
def is_chief(self):
return self._is_chief
@property
def master(self):
return self._master
@property
def num_ps_replicas(self):
return self._num_ps_replicas
@property
def num_worker_replicas(self):
return self._num_worker_replicas
@property
def task_id(self):
return self._task_id
@property
def task_type(self):
return self._task_type
@staticmethod
def get_task_id():
"""Returns task index from `TF_CONFIG` environmental variable.
If you have a ClusterConfig instance, you can just access its task_id
property instead of calling this function and re-parsing the environmental
variable.
Returns:
`TF_CONFIG['task']['index']`. Defaults to 0.
"""
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
task_env = config.get('task', {})
task_index = task_env.get('index')
return int(task_index) if task_index else 0
class RunConfig(ClusterConfig, core_run_config.RunConfig):
"""This class specifies the configurations for an `Estimator` run.
This class is the implementation of ${tf.estimator.RunConfig} interface.
If you're a Google-internal user using command line flags with
`learn_runner.py` (for instance, to do distributed training or to use
parameter servers), you probably want to use `learn_runner.EstimatorConfig`
instead.
"""
_USE_DEFAULT = 0
def __init__(self,
master=None,
num_cores=0,
log_device_placement=False,
gpu_memory_fraction=1,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_secs=_USE_DEFAULT,
save_checkpoints_steps=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
evaluation_master='',
model_dir=None,
session_config=None):
"""Constructor.
Note that the superclass `ClusterConfig` may set properties like
`cluster_spec`, `is_chief`, `master` (if `None` in the args),
`num_ps_replicas`, `task_id`, and `task_type` based on the `TF_CONFIG`
environment variable. See `ClusterConfig` for more details.
Args:
master: TensorFlow master. Defaults to empty string for local.
num_cores: Number of cores to be used. If 0, the system picks an
appropriate number (default: 0).
log_device_placement: Log the op placement to devices (default: False).
gpu_memory_fraction: Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_secs: Save checkpoints every this many seconds. Can not
be specified with `save_checkpoints_steps`.
save_checkpoints_steps: Save checkpoints every this many steps. Can not be
specified with `save_checkpoints_secs`.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec will be logged during training.
evaluation_master: the master on which to perform evaluation.
model_dir: directory where model parameters, graph etc are saved. If
`None`, will use `model_dir` property in `TF_CONFIG` environment
variable. If both are set, must have same value. If both are `None`, see
`Estimator` about where the model will be saved.
session_config: a ConfigProto used to set session parameters, or None.
Note - using this argument, it is easy to provide settings which break
otherwise perfectly good models. Use with care.
"""
super(RunConfig, self).__init__(
master=master, evaluation_master=evaluation_master)
gpu_options = config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
self._tf_config = config_pb2.ConfigProto(
log_device_placement=log_device_placement,
inter_op_parallelism_threads=num_cores,
intra_op_parallelism_threads=num_cores,
gpu_options=gpu_options)
self._tf_random_seed = tf_random_seed
self._save_summary_steps = save_summary_steps
self._save_checkpoints_secs = save_checkpoints_secs
self._log_step_count_steps = log_step_count_steps
self._session_config = session_config
if save_checkpoints_secs == RunConfig._USE_DEFAULT:
if save_checkpoints_steps is None:
self._save_checkpoints_secs = 600
else:
self._save_checkpoints_secs = None
self._save_checkpoints_steps = save_checkpoints_steps
# TODO(weiho): Remove these after ModelFn refactoring, when users can
# create Scaffold and Saver in their model_fn to set these.
self._keep_checkpoint_max = keep_checkpoint_max
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._model_dir = _get_model_dir(model_dir)
@experimental
def uid(self, whitelist=None):
"""Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = _DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = ordered_state['_cluster_spec'].as_dict()
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
@property
def model_dir(self):
return self._model_dir
@property
def tf_config(self):
return self._tf_config
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def session_config(self):
return self._session_config
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
@property
def log_step_count_steps(self):
return self._log_step_count_steps
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0
def _count_worker(cluster_spec):
"""Counts the number of workers in cluster_spec."""
return len(cluster_spec.as_dict().get('worker', [])) if cluster_spec else 0
def _get_master(cluster_spec, task_type, task_id):
"""Returns the appropriate string for the TensorFlow master."""
if not cluster_spec:
return ''
# If there is only one node in the cluster, do things locally.
jobs = cluster_spec.jobs
if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:
return ''
# Lookup the master in cluster_spec using task_type and task_id,
# if possible.
if task_type:
if task_type not in jobs:
raise ValueError(
'%s is not a valid task_type in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_type, cluster_spec))
addresses = cluster_spec.job_tasks(task_type)
if task_id >= len(addresses) or task_id < 0:
raise ValueError(
'%d is not a valid task_id for task_type %s in the '
'cluster_spec:\n'
'%s\n\n'
'Note that these value may be coming from the TF_CONFIG environment '
'variable.' % (task_id, task_type, cluster_spec))
return 'grpc://' + addresses[task_id]
# For backwards compatibility, we return empty string if task_type was
# not set (task_type did not previously exist).
return ''
def _get_model_dir(model_dir):
"""Returns `model_dir` based user provided `model_dir` or `TF_CONFIG`."""
model_dir_in_tf_config = json.loads(
os.environ.get('TF_CONFIG') or '{}').get('model_dir', None)
if model_dir_in_tf_config is not None:
if model_dir is not None and model_dir_in_tf_config != model_dir:
raise ValueError(
'`model_dir` provided in RunConfig construct, if set, '
'must have the same value as the model_dir in TF_CONFIG. '
'model_dir: {}\nTF_CONFIG["model_dir"]: {}.\n'.format(
model_dir, model_dir_in_tf_config))
logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)
return model_dir or model_dir_in_tf_config
| apache-2.0 |
amilan/dev-maxiv-pynutaq | src/pynutaq/extra/extra.py | 1 | 12088 | #!/usr/bin/env python
###############################################################################
# Extra methods to be used by the special attributes in pynutaq device
# server.
#
# Copyright (C) 2013 Max IV Laboratory, Lund Sweden
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
###############################################################################
"""This module defines the extra methods used for the special attributes in the
device servers
"""
__author__ = 'antmil'
__docformat__ = 'restructuredtext'
import math
try:
from pynutaq.perseus.perseusdefs import *
except ImportError, e:
print "#############################################"
print "It's not possible to import perseusdefs. "
print "This device can run only in simulated mode. "
print "#############################################"
raise
from pynutaq.perseus.perseusutils import read_direct, write_direct, read_diag_direct, get_offset
def get_GainTetrode1(perseus, address, cavity):
try:
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset) / 19898.0
return value
except Exception, e:
raise e
def set_GainTetrode1(perseus, GainTetrode1, address, cavity):
try:
offset = get_offset('write', cavity)
value = address << 17 | (int(GainTetrode1 * 19898.0))
perseus.write(offset, value)
except Exception, e:
raise e
def get_GainTetrode2(perseus, address, cavity):
try:
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset) / 19898.0
return value
except Exception, e:
raise e
def set_GainTetrode2(perseus, GainTetrode2, address, cavity):
try:
offset = get_offset('write', cavity)
value = address << 17 | (int(GainTetrode2 * 19898.0))
perseus.write(offset, value)
except Exception, e:
raise e
def get_GainOl(perseus, address, cavity):
try:
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
# value = math.floor((value * 2.0) / 127)
value = (value * 2.0) / 127
return value
except Exception, e:
raise e
def set_GainOl(perseus, GainOl, address, cavity):
try:
offset = get_offset('write', cavity)
value = math.ceil((GainOl/2.0) * 127)
value = address << 17 | int(value)
perseus.write(offset, value)
except Exception, e:
raise e
def get_Freqsquare(perseus, address, cavity):
try:
offset = get_offset('read', cavity)
# @warning: read direct??
perseus.write(offset, address)
value = perseus.read(offset) / 80000.0
return value
except Exception, e:
raise e
def set_Freqsquare(perseus, FreqsquareA, address, cavity):
try:
offset = get_offset('write', cavity)
value = ((1 / FreqsquareA) * 1000000.0) / 12.5
value = address << 17 | int(value)
perseus.write(offset, value)
except Exception, e:
raise e
def get_ConditioningdutyCicle(perseus, address, cavity):
try:
value = read_direct(perseus, address, cavity)
value = (value / 8000000.0) * 256 * 100.0
return value
except Exception, e:
raise e
def set_ConditioningdutyCicle(perseus, ConditioningdutyCicleA, address, cavity):
try:
value = ((ConditioningdutyCicleA * 8000000.0) / 100.0) / 256
write_direct(perseus, value, address, cavity)
except Exception, e:
raise e
def get_MDivider(perseus, address, cavity):
try:
value = read_direct(perseus, address, cavity) + 1
# @warning: read_direct?? or +1
return value
except Exception, e:
raise e
def set_MDivider(perseus, MDivider, address, cavity):
try:
value = MDivider - 1
write_direct(perseus, value, address, cavity)
except Exception, e:
raise e
def get_NDivider(perseus, address, cavity):
try:
value = read_direct(perseus, address, cavity) + 1
# @warning: read_direct?? or +1
return value
except Exception, e:
raise e
def set_NDivider(perseus, NDivider, address, cavity):
try:
value = NDivider - 1
write_direct(perseus, value, address, cavity)
except Exception, e:
raise e
def get_Pilimit(perseus, address, cavity):
try:
value = read_direct(perseus, address, cavity)
value = (value* 1000.0) / 32767
return value
except Exception, e:
raise e
def set_Pilimit(perseus, PiLimitA, address, cavity):
try:
value = (PiLimitA/1000.0) * 32767
write_direct(perseus, value, address, cavity)
except Exception, e:
raise e
def get_Fwmin(perseus, address, cavity):
try:
value = read_direct(perseus, address, cavity)
value = (value* 1000.0) / 32767
return value
except Exception, e:
raise e
def set_Fwmin(perseus, Fwmina, address, cavity):
try:
value = (Fwmina/1000.0) * 32767
write_direct(perseus, value, address, cavity)
except Exception, e:
raise e
def get_Tuningdelay(perseus, address, cavity):
try:
# P100/80000000*2^12
value = read_direct(perseus, address, cavity)
value = (value/80000000.0) * (2**12)
return value
except Exception, e:
raise e
def set_Tuningdelay(perseus, TuningDelay, address, cavity):
try:
# E100*80000000/2^12
value = (TuningDelay*80000000.0) / (2**12)
write_direct(perseus, value, address, cavity)
except Exception, e:
raise e
def get_InterlocksDelay(perseus, address, cavity):
try:
# =+P39/80
value = read_direct(perseus, address, cavity)
value = value / 80.0
return value
except Exception, e:
raise e
def set_InterlocksDelay(perseus, value, address, cavity):
try:
# =E39*80
value = value * 80.0
write_direct(perseus, value, address, cavity)
except Exception, e:
raise e
def get_FdlTriggerDelay(perseus, address, cavity):
try:
# =+P40/80000*2^12
value = read_direct(perseus, address, cavity)
value = (value/80000.0) * (2**12)
return value
except Exception, e:
raise e
def set_FdlTriggerDelay(perseus, value, address, cavity):
try:
# =E40*80000/2^12
value = (value*80000.0) / (2**12)
write_direct(perseus, value, address, cavity)
except Exception, e:
raise e
# Diagnostics device methods
# def get_Rvtet1(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 0
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_Rvtet2(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 1
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_Rvcirc(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 2
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_Fwload(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 3
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_Fwhybload(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 4
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_Rvcav(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 5
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_Arcs(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 6
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_Vacuum(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 7
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_ManualInterlock(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 8
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_ExternalItck(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 9
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_PlungerEndSwitchUp(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 10
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
#
# def get_PlungerEndSwitchDown(perseus, address, itck_number):
# try:
# if itck_number == 0:
# address = 150
# pos = 11
# address = address + itck_number
# value = read_direct(perseus, address)
# return bool((value >> pos) & 1)
# except Exception, e:
# raise e
def read_bit_direct(perseus, address, position, cavity):
try:
value = read_direct(perseus, address, cavity)
return bool((value >> position) & 1)
except Exception, e:
raise e
def read_diag_bit_direct(perseus, address, position, cavity):
try:
value = read_diag_direct(perseus, address, cavity)
return bool((value >> position) & 1)
except Exception, e:
raise e
def read_diag_timestamp(perseus, address, cavity):
try:
value = read_diag_direct(perseus, address, cavity)
value = (value*12.5) / 1000.0
return value
except Exception, e:
raise e
read_Diag_Timestamp1 = read_diag_timestamp
read_Diag_Timestamp2 = read_diag_timestamp
read_Diag_Timestamp3 = read_diag_timestamp
read_Diag_Timestamp4 = read_diag_timestamp
read_Diag_Timestamp5 = read_diag_timestamp
read_Diag_Timestamp6 = read_diag_timestamp
read_Diag_Timestamp7 = read_diag_timestamp
| gpl-3.0 |
xiandaicxsj/qemu-copy | roms/u-boot/tools/patman/series.py | 32 | 9358 | # Copyright (c) 2011 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import itertools
import os
import get_maintainer
import gitutil
import terminal
# Series-xxx tags that we understand
valid_series = ['to', 'cc', 'version', 'changes', 'prefix', 'notes', 'name',
'cover-cc', 'process_log']
class Series(dict):
"""Holds information about a patch series, including all tags.
Vars:
cc: List of aliases/emails to Cc all patches to
commits: List of Commit objects, one for each patch
cover: List of lines in the cover letter
notes: List of lines in the notes
changes: (dict) List of changes for each version, The key is
the integer version number
allow_overwrite: Allow tags to overwrite an existing tag
"""
def __init__(self):
self.cc = []
self.to = []
self.cover_cc = []
self.commits = []
self.cover = None
self.notes = []
self.changes = {}
self.allow_overwrite = False
# Written in MakeCcFile()
# key: name of patch file
# value: list of email addresses
self._generated_cc = {}
# These make us more like a dictionary
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
return self[name]
def AddTag(self, commit, line, name, value):
"""Add a new Series-xxx tag along with its value.
Args:
line: Source line containing tag (useful for debug/error messages)
name: Tag name (part after 'Series-')
value: Tag value (part after 'Series-xxx: ')
"""
# If we already have it, then add to our list
name = name.replace('-', '_')
if name in self and not self.allow_overwrite:
values = value.split(',')
values = [str.strip() for str in values]
if type(self[name]) != type([]):
raise ValueError("In %s: line '%s': Cannot add another value "
"'%s' to series '%s'" %
(commit.hash, line, values, self[name]))
self[name] += values
# Otherwise just set the value
elif name in valid_series:
self[name] = value
else:
raise ValueError("In %s: line '%s': Unknown 'Series-%s': valid "
"options are %s" % (commit.hash, line, name,
', '.join(valid_series)))
def AddCommit(self, commit):
"""Add a commit into our list of commits
We create a list of tags in the commit subject also.
Args:
commit: Commit object to add
"""
commit.CheckTags()
self.commits.append(commit)
def ShowActions(self, args, cmd, process_tags):
"""Show what actions we will/would perform
Args:
args: List of patch files we created
cmd: The git command we would have run
process_tags: Process tags as if they were aliases
"""
col = terminal.Color()
print 'Dry run, so not doing much. But I would do this:'
print
print 'Send a total of %d patch%s with %scover letter.' % (
len(args), '' if len(args) == 1 else 'es',
self.get('cover') and 'a ' or 'no ')
# TODO: Colour the patches according to whether they passed checks
for upto in range(len(args)):
commit = self.commits[upto]
print col.Color(col.GREEN, ' %s' % args[upto])
cc_list = list(self._generated_cc[commit.patch])
# Skip items in To list
if 'to' in self:
try:
map(cc_list.remove, gitutil.BuildEmailList(self.to))
except ValueError:
pass
for email in cc_list:
if email == None:
email = col.Color(col.YELLOW, "<alias '%s' not found>"
% tag)
if email:
print ' Cc: ',email
print
for item in gitutil.BuildEmailList(self.get('to', '<none>')):
print 'To:\t ', item
for item in gitutil.BuildEmailList(self.cc):
print 'Cc:\t ', item
print 'Version: ', self.get('version')
print 'Prefix:\t ', self.get('prefix')
if self.cover:
print 'Cover: %d lines' % len(self.cover)
cover_cc = gitutil.BuildEmailList(self.get('cover_cc', ''))
all_ccs = itertools.chain(cover_cc, *self._generated_cc.values())
for email in set(all_ccs):
print ' Cc: ',email
if cmd:
print 'Git command: %s' % cmd
def MakeChangeLog(self, commit):
"""Create a list of changes for each version.
Return:
The change log as a list of strings, one per line
Changes in v4:
- Jog the dial back closer to the widget
Changes in v3: None
Changes in v2:
- Fix the widget
- Jog the dial
etc.
"""
final = []
process_it = self.get('process_log', '').split(',')
process_it = [item.strip() for item in process_it]
need_blank = False
for change in sorted(self.changes, reverse=True):
out = []
for this_commit, text in self.changes[change]:
if commit and this_commit != commit:
continue
if 'uniq' not in process_it or text not in out:
out.append(text)
line = 'Changes in v%d:' % change
have_changes = len(out) > 0
if 'sort' in process_it:
out = sorted(out)
if have_changes:
out.insert(0, line)
else:
out = [line + ' None']
if need_blank:
out.insert(0, '')
final += out
need_blank = have_changes
if self.changes:
final.append('')
return final
def DoChecks(self):
"""Check that each version has a change log
Print an error if something is wrong.
"""
col = terminal.Color()
if self.get('version'):
changes_copy = dict(self.changes)
for version in range(1, int(self.version) + 1):
if self.changes.get(version):
del changes_copy[version]
else:
if version > 1:
str = 'Change log missing for v%d' % version
print col.Color(col.RED, str)
for version in changes_copy:
str = 'Change log for unknown version v%d' % version
print col.Color(col.RED, str)
elif self.changes:
str = 'Change log exists, but no version is set'
print col.Color(col.RED, str)
def MakeCcFile(self, process_tags, cover_fname, raise_on_error):
"""Make a cc file for us to use for per-commit Cc automation
Also stores in self._generated_cc to make ShowActions() faster.
Args:
process_tags: Process tags as if they were aliases
cover_fname: If non-None the name of the cover letter.
raise_on_error: True to raise an error when an alias fails to match,
False to just print a message.
Return:
Filename of temp file created
"""
# Look for commit tags (of the form 'xxx:' at the start of the subject)
fname = '/tmp/patman.%d' % os.getpid()
fd = open(fname, 'w')
all_ccs = []
for commit in self.commits:
list = []
if process_tags:
list += gitutil.BuildEmailList(commit.tags,
raise_on_error=raise_on_error)
list += gitutil.BuildEmailList(commit.cc_list,
raise_on_error=raise_on_error)
list += get_maintainer.GetMaintainer(commit.patch)
all_ccs += list
print >>fd, commit.patch, ', '.join(list)
self._generated_cc[commit.patch] = list
if cover_fname:
cover_cc = gitutil.BuildEmailList(self.get('cover_cc', ''))
print >>fd, cover_fname, ', '.join(set(cover_cc + all_ccs))
fd.close()
return fname
def AddChange(self, version, commit, info):
"""Add a new change line to a version.
This will later appear in the change log.
Args:
version: version number to add change list to
info: change line for this version
"""
if not self.changes.get(version):
self.changes[version] = []
self.changes[version].append([commit, info])
def GetPatchPrefix(self):
"""Get the patch version string
Return:
Patch string, like 'RFC PATCH v5' or just 'PATCH'
"""
version = ''
if self.get('version'):
version = ' v%s' % self['version']
# Get patch name prefix
prefix = ''
if self.get('prefix'):
prefix = '%s ' % self['prefix']
return '%sPATCH%s' % (prefix, version)
| gpl-2.0 |
eepalms/gem5-newcache | src/arch/x86/isa/insts/general_purpose/compare_and_test/__init__.py | 91 | 2398 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["bit_scan",
"bit_test",
"bounds",
"compare",
"set_byte_on_condition",
"test"]
microcode = ""
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
janebeckman/gpdb | src/test/tinc/ext/modgrammar/__init__.py | 21 | 55458 | import sys
import re
import textwrap
import modgrammar.util
__doc__ = """
This module provides a full-featured pure-python framework for building tokenizing LR language parsers and interpreters for context-free grammars. (The :mod:`modgrammar` parsing engine is implemented as a recursive-descent parser with backtracking, using an object-oriented grammar model.)
The :mod:`modgrammar` parser is designed such that language grammars can be defined in python modules using standard python syntax. To create a new grammar, simply create a new class definition (or multiple class definitions) derived from the :class:`Grammar` base class, and set its :attr:`grammar` attribute to a list of sub-grammars to match. (Such definitions can be combined together into full grammar trees.) Several basic pre-defined grammar constructs are also available in this module which larger grammars can be built up from.
Once a grammar is defined, the :meth:`~Grammar.parser` method can be called on the toplevel grammar class to obtain a :class:`GrammarParser` object, which can be used to parse text against the defined grammar.
"""
# A note on how different descriptive attrs/methods are used:
# grammar_name = alternative to class name (used by repr, str, ebnf)
# grammar_desc = description of grammar used in error messages
# grammar_details() = full description of grammar (used by repr)
__all__ = [
"ReferenceError", "UnknownReferenceError", "BadReferenceError", "ParseError", "Grammar",
"Terminal",
"Literal", "Word", "Repetition", "ListRepetition", "Reference",
"GRAMMAR", "G", "ANY", "EMPTY", "REF", "LITERAL", "L", "OR", "EXCEPT", "WORD", "REPEAT", "LIST_OF", "OPTIONAL",
"NOT_FOLLOWED_BY",
"ZERO_OR_MORE", "ONE_OR_MORE", "ANY_EXCEPT", "BOL", "EOL", "EOF",
"REST_OF_LINE", "SPACE",
"generate_ebnf",
]
grammar_whitespace = True
class _Singleton(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
DEFAULT = _Singleton("DEFAULT") # singleton used for detecting default arguments
PARSEERROR_FOUNDTXT_LEN = 16
def _gclass_reconstructor(name, bases, cdict):
return GrammarClass(name, bases, cdict)
def _ginstance_reconstructor(name, bases, cdict):
cls = GrammarClass(name, bases, cdict)
return cls.__new__(cls)
###############################################################################
# Exceptions #
###############################################################################
class InternalError(Exception):
"""
This exception is raised by the parser if something happens which should never happen. It usually indicates that a grammar with a custom :meth:`~Grammar.grammar_parse` definition has done something it shouldn't.
"""
pass
class GrammarDefError(Exception):
"""
This exception is raised when creating/defining new grammar classes if there is a problem with the definition which cannot be resolved.
"""
pass
class ReferenceError(Exception):
"""
This is the base class for :exc:`UnknownReferenceError` and :exc:`BadReferenceError`. It can be used to easily catch either exception.
"""
pass
class UnknownReferenceError(ReferenceError):
"""
An attempt was made to resolve a :func:`REF` reference, but no grammar with the given name could be found, and no default was provided in the :func:`REF` declaration.
"""
pass
class BadReferenceError(ReferenceError):
"""
An attempt was made to resolve a :func:`REF` reference, and the reference name was resolved to an object, but the object is not a valid grammar object.
"""
pass
class ParseError(Exception):
"""
Raised by the parser when the provided text cannot be matched against the grammar.
This exception has several useful attributes:
.. attribute:: grammar
The top-level grammar the parser was attempting to match.
.. attribute:: buffer
The contents of the text buffer the parser was attempting to match against the grammar.
.. attribute:: pos
The position within the buffer at which the problem occurred.
.. attribute:: char
The (total parsing) character position at which the problem occurred (similar to the :attr:`GrammarParser.char` attribute).
.. attribute:: line
The line at which the problem occurred (similar to the :attr:`GrammarParser.line` attribute).
.. attribute:: col
The column position within the line at which the problem occurred (similar to the :attr:`GrammarParser.col` attribute).
.. attribute:: expected
A list of possible sub-grammars which the parser expected to find at this position (but didn't).
.. attribute:: message
The text message which would be printed if this exception were printed. (This is of the form "Expected ...: Found ...")
"""
def __init__(self, grammar, buf, pos, char, line=None, col=None, expected=None, message=None):
if message is None:
if not expected:
message = ""
else:
expected_txt = " or ".join(sorted(e.grammar_desc for e in expected))
end = min(len(buf), pos + PARSEERROR_FOUNDTXT_LEN)
found_txt = buf[pos:end]
if found_txt:
found_txt = repr(found_txt)
else:
found_txt = "(end of input)"
message = "Expected {0}: Found {1}".format(expected_txt, found_txt)
self.buffer = buf
self.buffer_pos = pos
self.char = char
self.line = line
self.col = col
self.expected = expected
self.message = message
def __str__(self):
lc = []
if self.line is not None:
lc.append("line {0}".format(self.line + 1))
if self.col is not None:
lc.append("column {9}".format(self.col + 1))
if lc:
return "[{0}] {1}".format(", ".join(lc), self.message)
else:
return "[char {0}] {1}".format(self.char + 1, self.message)
###############################################################################
# Core (internal) Classes #
###############################################################################
class GrammarClass(type):
"The metaclass for all Grammar classes"
def __init__(cls, name, bases, classdict):
cls._hash_id = None
if "grammar_name" not in classdict:
cls.grammar_name = cls.__name__
if "grammar_desc" not in classdict:
cls.grammar_desc = cls.grammar_name
classdict["abstract"] = False
cls.grammar = util.regularize(cls.grammar)
tags = getattr(cls, "grammar_tags", ())
if isinstance(tags, str):
# This is going to be a common slip-up.. might as well handle it
# gracefully...
tags = (tags,)
cls.grammar_tags = tags
if "grammar_whitespace" not in classdict and cls.grammar_whitespace is None:
whitespace = sys.modules[cls.__module__].__dict__.get("grammar_whitespace", grammar_whitespace)
cls.grammar_whitespace = whitespace
cls.__class_init__(classdict)
def __reduce__(cls):
# Note: __reduce__ on metaclasses does not currently work, so this is
# currently unused. The hope is that someday it will actually work.
try:
lookup = sys.modules[cls.__module__].__dict__[cls.__name__]
except KeyError:
lookup = None
if lookup == cls:
return cls.__name__
cdict = dict(cls.__dict__)
for key in cls.__dict__.keys():
if key.startswith('__'):
del cdict[key]
return (_gclass_reconstructor, (cls.__name__, cls.__bases__, cdict))
def __repr__(cls):
return cls.__class_repr__()
def __str__(cls):
return cls.__class_str__()
def __add__(cls, other):
return util.add_grammar(cls, other)
def __radd__(cls, other):
return util.add_grammar(other, cls)
def __or__(cls, other):
return OR(cls, other)
def __ror__(cls, other):
return OR(other, cls)
def __sub__(cls, other):
return EXCEPT(cls, other)
def __rsub__(cls, other):
return EXCEPT(other, cls)
def __setattr__(cls, attr, value):
if attr in cls.grammar_hashattrs and cls._hash_id is not None:
# Python hashability requires that once something obtains our hash, it
# should never change, so we just consider these attributes read-only if
# our hash value has ever been calculated before.
raise AttributeError(
"Changing the value of the {0!r} attribute would change the hash value of the object.".format(attr))
return type.__setattr__(cls, attr, value)
def __hash__(cls):
hash_id = cls._hash_id
if hash_id is None:
hash_id = hash(cls.grammar_hashdata())
cls._hash_id = hash_id
return hash_id
def __eq__(cls, other):
if not isinstance(other, GrammarClass):
return NotImplemented
return cls.grammar_hashdata() == other.grammar_hashdata()
def __ne__(cls, other):
if not isinstance(other, GrammarClass):
return NotImplemented
return cls.grammar_hashdata() != other.grammar_hashdata()
class Text(object):
"""Text objects are used to hold the current working text being matched against the grammar. They keep track of both the text contents and certain other useful state information such as whether we're at the beginning of a line or the end of a file, etc.
Do not use this class directly. This is only intended to be used internally by the modgrammar module.
"""
def __init__(self, string, bol=False, eof=False):
self.string = ""
self.append(string, bol=bol, eof=eof)
def append(self, string, bol=None, eof=None):
if bol is not None:
if not self.string:
self.bol = bol
elif bol:
self.string += "\n"
eof = bool(eof)
if string:
self.string += string
eof = bool(eof)
if eof is not None:
self.eof = eof
return self
def skip(self, count):
if count:
self.bol = (self.string[count - 1] == "\n")
self.string = self.string[count:]
return self
def __str__(self):
return self.string
def __repr__(self):
cls = self.__class__
return "{0.__module__}.{0.__name__}({1.string!r}, bol={1.bol}, eof={1.eof})".format(cls, self)
class GrammarParser(object):
"""
Parser objects are the way in which an application can actually make use of a grammar definition. They provide the core interface to take input texts and attempt to match them against an associated grammar definition.
:class:`GrammarParser` objects are not generally instantiated directly. Instead, to obtain one, call the :meth:`~Grammar.parser` method on the appropriate grammar class.
Parser objects have the following useful attributes:
.. attribute:: char
The number of characters we've successfully parsed since the beginning of parsing (or the last :meth:`reset`).
.. attribute:: line
The number of lines we've successfully parsed since the beginning of parsing (or the last :meth:`reset`). This is measured based on the number of line-end sequences we've seen thus far.
.. attribute:: col
The position of the current :attr:`line` we're at.
"""
def __init__(self, grammar, sessiondata, tabs):
self.grammar = grammar
self.tabs = tabs
self.sessiondata = sessiondata
self.reset()
def reset(self):
"""
Reset this parser back to its initial state.
This will clear any remainder in the buffer and reset all (line, column, etc) counters to zero.
"""
self.char = 0
self.line = 0
self.col = 0
self.clear_remainder()
def clear_remainder(self):
"""
Clear any un-matched text left in the buffer.
"""
self.text = Text("", bol=True)
self.state = (None, None)
def remainder(self):
"""
Return the left over unmatched text in the buffer, if any. (This method does not actually change the buffer, only report its current contents. If you want to clear the buffer, use :meth:`clear_remainder`.)
"""
return self.text.string
def append(self, string, bol=None, eof=None):
self.text.append(string, bol=bol, eof=eof)
def _parse(self, pos, data, matchtype):
parsestate, matches = self.state
while True:
if not parsestate:
matches = []
parsestate = self.grammar.grammar_parse(self.text, pos, data)
count, obj = next(parsestate)
else:
count, obj = parsestate.send(self.text)
if count is False:
# We're done
if matches:
break
# No results. We must have errored out.
if pos == len(self.text.string):
# This happens when we've hit EOF but we want to do one more pass
# through in case anything wants to match the EOF itself. If nothing
# does, we don't really expect anything else to match on an empty
# string, so ignore the error.
return (None, None)
errpos, expected = obj
if errpos == len(self.text.string) and self.grammar.grammar_whitespace:
# If we hit EOF and this grammar is whitespace-consuming, check to
# see whether we had only whitespace before the EOF. If so, treat
# this like the pos == len(self.text.string) case above.
whitespace_re = self.grammar.grammar_whitespace
if whitespace_re is True:
whitespace_re = util._whitespace_re
m = whitespace_re.match(self.text.string, pos)
if m and m.end() == len(self.text.string):
return (None, None)
char = self.char + errpos
line, col = util.calc_line_col(self.text.string, errpos, self.line, self.col, self.tabs)
raise ParseError(self.grammar, self.text.string, errpos, char, line=line, col=col, expected=expected)
if count is None:
# We need more input
self.state = (parsestate, matches)
return (None, None)
matches.append((count, obj))
if matchtype == 'first':
# We only need the first one, no need to keep looping
break
# At this point we've gotten one or more successful matches
self.state = (None, None)
if matchtype == 'first':
count, obj = matches[0]
elif matchtype == 'last':
count, obj = matches[-1]
elif matchtype == 'longest':
count, obj = max(matches, key=lambda m: m[0])
elif matchtype == 'shortest':
count, obj = min(matches, key=lambda m: m[0])
elif matchtype == 'all':
objs = [x[1] for x in matches]
count = max(x[0] for x in matches)
pp_objs = []
for obj in objs:
result = obj.grammar_postprocess(None, data)
if len(result) == 1:
result = result[0]
pp_objs.append(result)
return (count, pp_objs)
else:
raise ValueError("Invalid value for 'matchtype' parameter: {0!r}".format(matchtype))
result = obj.grammar_postprocess(None, data)
if len(result) == 1:
result = result[0]
return (count, result)
def _parse_string(self, string, bol, eof, data, matchtype):
self.append(string, bol=bol, eof=eof)
pos = 0
if data is None:
data = self.sessiondata
while True:
count, obj = self._parse(pos, data, matchtype)
if count is None:
# Partial match
break
elif matchtype != 'all':
self.skip(count)
yield obj
if not count:
# We matched a zero-length string. If we keep looping, we'll just loop
# infinitely doing the same thing. Best to stop now.
break
if not self.text.eof and pos == len(self.text.string):
# We've done all we can for now.
# Note: if we're at EOF, we loop one more time in case something wants
# to match the EOF, and then we'll break on either the error-on-EOF
# case or the count-is-zero case next time through.
break
def parse_string(self, string, bol=None, eof=None, reset=False, multi=False, data=None, matchtype='first'):
"""
Attempt to match *string* against the associated grammar. If successful, returns a corresponding match object. If there is an incomplete match (or it is impossible to determine yet whether the match is complete or not), save the current text in the match buffer and return :const:`None` to indicate more text is required. If the text does not match any valid grammar construction, raise :exc:`ParseError`.
Optional parameters:
*reset*
Call :meth:`reset` before starting to parse the supplied text.
*multi*
Instead of returning a single match result, keep matching as many times as possible before returning, and return a list of matches, in sequence.
*eof*
Indicates that no more text will be coming, and the parser should return the best match it can with the supplied text instead of asking for more. (If *eof* is set, the parser will never return a :const:`None` result, unless the buffer is completely empty.)
*data*
Use the provided data instead of the default *sessiondata* during this parse run.
*matchtype*
If a grammar could match multiple ways, determine how the best match is chosen:
"first" (default)
The first successful match the grammar comes up with (as determined by normal grammar test ordering).
"last"
The last successful match.
"longest"
The match which uses up the longest portion of the input text.
"shortest"
The match which uses up the shortest portion of the input text.
"all"
Return all possible matches, in a list. Note that in this case the buffer position will not be automatically advanced. You must call :func:`~GrammarParser.skip` manually.
*bol*
Treat the input text as starting at the beginning of a line (for the purposes of matching the :const:`BOL` grammar element). It is not usually necessary to specify this explicitly.
"""
if reset:
self.reset()
if multi:
return list(self._parse_string(string, bol, eof, data, matchtype))
else:
for result in self._parse_string(string, bol, eof, data, matchtype):
# This will always just return the first result
return result
return None
def parse_lines(self, lines, bol=False, eof=False, reset=False, data=None, matchtype='first'):
"""
*(generator method)*
Attempt to match a list (or actually any iterable) of strings against the associated grammar. This is effectively the same as calling :meth:`parse_string` repeatedly for each string in the list to obtain all matches in sequence.
Return values, exceptions, and optional parameters are all exactly the same as for :meth:`parse_string`.
Note: Be careful using ``matchtype="all"`` with parse_lines/parse_file. You must manually call :func:`~GrammarParser.skip` after each yielded match, or you will end up with an infinite loop!
"""
if reset:
self.reset()
for line in lines:
for result in self._parse_string(line, bol, False, data, matchtype):
yield result
bol = None
if eof:
for result in self._parse_string("", None, True, data, matchtype):
yield result
def parse_file(self, file, bol=False, eof=True, reset=False, data=None, matchtype='first'):
"""
*(generator method)*
Open and process the contents of a file using the associated grammar. This is basically the same as opening the specified file, and passing the resulting file object to :meth:`parse_lines`.
Return values, exceptions, and optional parameters are all exactly the same as for :meth:`parse_string`.
Note: Be careful using ``matchtype="all"`` with parse_lines/parse_file. You must manually call :func:`~GrammarParser.skip` after each yielded match, or you will end up with an infinite loop!
"""
if isinstance(file, str):
with open(file, "r") as f:
for result in self.parse_lines(f, bol=bol, eof=eof, reset=reset, data=data, matchtype=matchtype):
yield result
else:
for result in self.parse_lines(file, bol=bol, eof=eof, reset=reset, data=data, matchtype=matchtype):
yield result
def skip(self, count):
"""
Skip forward the specified number of characters in the input buffer (discarding the text skipped over).
"""
if count:
if count > len(self.text.string):
raise ValueError("Attempt to skip past end of available buffer.")
# The state may contain index values in it, which will become invalid if
# we change the starting point, so we (unfortunately) need to nuke it.
self.state = (None, None)
self.char += count
self.line, self.col = util.calc_line_col(self.text.string, count, self.line, self.col, self.tabs)
self.text.skip(count)
def remainder(self):
"""
Return the remaining contents of the parse buffer. After parsing, this will contain whatever portion of the original text was not used by the parser up to this point.
"""
return self.text.string
###############################################################################
# Base (public) Classes #
###############################################################################
if sys.version_info[0] < 3:
from modgrammar.grammar_py2 import Grammar
else:
from modgrammar.grammar_py3 import Grammar
class AnonGrammar(Grammar):
grammar_whitespace = None
@classmethod
def grammar_details(cls, depth=-1, visited=None):
if len(cls.grammar) == 1:
return cls.grammar[0].grammar_details(depth, visited)
else:
return "(" + ", ".join((g.grammar_details(depth, visited) for g in cls.grammar)) + ")"
@classmethod
def grammar_ebnf_lhs(cls, opts):
if cls.grammar_name == "<GRAMMAR>":
names, nts = util.get_ebnf_names(cls.grammar, opts)
return (", ".join(names), nts)
else:
return (cls.grammar_name, (cls,))
@classmethod
def grammar_ebnf_rhs(cls, opts):
if cls.grammar_name == "<GRAMMAR>":
return None
else:
names, nts = util.get_ebnf_names(cls.grammar, opts)
return (", ".join(names), nts)
class Terminal(Grammar):
grammar = ()
grammar_terminal = True
grammar_whitespace = False
@classmethod
def grammar_details(cls, depth=-1, visited=None):
return cls.grammar_name
class Literal(Terminal):
string = ""
grammar_collapse_skip = True
grammar_hashattrs = ('string',)
@classmethod
def __class_init__(cls, attrs):
if "grammar_name" not in attrs:
cls.grammar_name = "L({0!r})".format(cls.string)
if "grammar_desc" not in attrs:
cls.grammar_desc = repr(cls.string)
@classmethod
def grammar_parse(cls, text, index, sessiondata):
while (len(cls.string) + index > len(text.string)) and cls.string.startswith(text.string[index:]):
if text.eof:
break
# Partial match. Try again when we have more text.
text = yield (None, None)
if text.string.startswith(cls.string, index):
yield (len(cls.string), cls(cls.string))
yield util.error_result(index, cls)
@classmethod
def grammar_ebnf_rhs(cls, opts):
return None
@classmethod
def grammar_ebnf_lhs(cls, opts):
return (repr(cls.string), ())
# NOTE: GRAMMAR and LITERAL must occur before Repetition/ListRepetition because
# they use them in their __class_init__ constructors.
def GRAMMAR(*subgrammars, **kwargs):
"""
Allows the construction of "anonymous grammars", that is, creating a grammar without explicitly defining a named class derived from the :class:`Grammar` superclass. This can be useful for some simple grammars where a full class definition is not necessary.
*subgrammars* is a list of other grammars which the new grammar should be made up of, the same as would be given as the :attr:`~Grammar.grammar` attribute in a grammar class definition.
"""
grammar = util.regularize(subgrammars)
if len(grammar) == 1 and not kwargs:
return grammar[0]
else:
cdict = util.make_classdict(AnonGrammar, grammar, kwargs)
return GrammarClass("<GRAMMAR>", (AnonGrammar,), cdict)
def LITERAL(string, **kwargs):
"""
Create a simple grammar that only matches the specified literal string. Literal matches are case-sensitive.
"""
cdict = util.make_classdict(Literal, (), kwargs, string=string)
return GrammarClass("<LITERAL>", (Literal,), cdict)
class ANY(Terminal):
grammar_name = "ANY"
grammar_desc = "any character"
@classmethod
def grammar_parse(cls, text, index, sessiondata):
while index == len(text.string):
# The only case we can't match is if there's no input
if text.eof:
yield util.error_result(index, cls)
text = yield (None, None)
yield (1, cls(text.string, index, index + 1))
yield util.error_result(index, cls)
class EMPTY(Terminal):
grammar_collapse = True
grammar_collapse_skip = True
grammar_desc = "(nothing)"
@classmethod
def grammar_parse(cls, text, index, sessiondata):
# This always matches, no matter where it is.
yield (0, cls(""))
yield util.error_result(index, cls)
@classmethod
def grammar_ebnf_lhs(cls, opts):
return ("(*empty*)", ())
@classmethod
def grammar_ebnf_rhs(cls, opts):
return None
def OR(*grammars, **kwargs):
"""
An either-or grammar that will successfully match if any of its subgrammars matches. :func:`OR` grammars can also be created by combining other grammars in python expressions using the or operator (``|``).
Note: Each of the possible grammars are attempted in left-to-right order. This means that if more than one of the listed grammars could potentially match, the leftmost one will always match first.
"""
collapsed = []
for g in grammars:
if hasattr(g, "grammar_OR_merge"):
collapsed.extend(g.grammar_OR_merge())
else:
collapsed.append(GRAMMAR(g))
cdict = util.make_classdict(OR_Operator, collapsed, kwargs)
return GrammarClass("<OR>", (OR_Operator,), cdict)
class OR_Operator(Grammar):
grammar_whitespace = False
@classmethod
def __class_init__(cls, attrs):
if not "grammar_desc" in attrs and cls.grammar:
# This is not used directly when constructing ParseExceptions (because we
# never return ourselves as a failure class, we only return the failures
# from our subgrammars), but is needed by some other grammars which
# construct their grammar_desc based on their subgrammar's grammar_descs
# (i.e. NOT())
cls.grammar_desc = " or ".join(g.grammar_desc for g in cls.grammar)
@classmethod
def grammar_parse(cls, text, index, sessiondata):
best_error = None
for g in cls.grammar:
results = g.grammar_parse(text, index, sessiondata)
for count, obj in results:
while count is None:
if text.eof:
# Subgrammars should not be asking for more data after eof.
raise InternalError("{0} requested more data when at EOF".format(g))
text = yield (None, None)
count, obj = results.send(text)
if count is False:
best_error = util.update_best_error(best_error, obj)
break
yield (count, obj)
yield util.error_result(*best_error)
@classmethod
def grammar_OR_merge(cls):
return cls.grammar
@classmethod
def grammar_details(cls, depth=-1, visited=None):
if not depth:
depth += 1
return "(" + " | ".join((c.grammar_details(depth - 1, visited) for c in cls.grammar)) + ")"
@classmethod
def grammar_ebnf_lhs(cls, opts):
names, nts = util.get_ebnf_names(cls.grammar, opts)
return ("( " + " | ".join(names) + " )", nts)
@classmethod
def grammar_ebnf_rhs(cls, opts):
return None
def NOT_FOLLOWED_BY(*grammar, **kwargs):
"""
Returns a successful match as long as the next text after this point does NOT match the specified grammar.
When successful (that is, the next text in the input does not match the specified grammar), this element of the parse tree will contain :const:`None`, and no input text will be consumed. When unsuccessful (that is, the next text does match), a :exc:`ParseError` will be raised.
"""
cdict = util.make_classdict(NotFollowedBy, grammar, kwargs)
return GrammarClass("<NOT_FOLLOWED_BY>", (NotFollowedBy,), cdict)
class NotFollowedBy(Grammar):
grammar_whitespace = False
grammar_collapse = True
@classmethod
def __class_init__(cls, attrs):
if not cls.grammar:
cls.grammar = (EMPTY,)
else:
cls.grammar = (GRAMMAR(cls.grammar),)
if not "grammar_desc" in attrs and cls.grammar:
cls.grammar_desc = "anything except {0}".format(cls.grammar[0].grammar_desc)
@classmethod
def grammar_parse(cls, text, index, sessiondata):
best_error = None
g = cls.grammar[0]
results = g.grammar_parse(text, index, sessiondata)
count, obj = next(results)
while count is None:
if text.eof:
# Subgrammars should not be asking for more data after eof.
raise InternalError("{0} requested more data when at EOF".format(g))
text = yield (None, None)
count, obj = results.send(text)
if count is not False:
# The subgrammar matched. That means we should consider this a parse
# error.
yield util.error_result(index, cls)
else:
# Subgrammar did not match. Return a (successful) None match.
yield (0, cls(''))
@classmethod
def grammar_details(cls, depth=-1, visited=None):
if not visited:
visited = (cls,)
elif cls in visited:
# Circular reference. Stop here.
return cls.grammar_name
else:
visited = visited + (cls,)
return "NOT_FOLLOWED_BY({0})".format(cls.grammar[0].grammar_details(depth, visited))
@classmethod
def grammar_ebnf_lhs(cls, opts):
sub_lhs, sub_nts = cls.grammar[0].grammar_ebnf_lhs(opts)
desc = "not followed by {0}".format(sub_lhs)
return (util.ebnf_specialseq(cls, opts, desc=desc), (cls.grammar[0],))
@classmethod
def grammar_ebnf_rhs(cls, opts):
return None
def EXCEPT(grammar, exc_grammar, **kwargs):
"""
Match *grammar*, but only if it does not also match *exception_grammar*. (This is equivalent to the ``-`` (exception) operator in EBNF) :func:`EXCEPT` grammars can also be created by combining other grammars in python expressions using the except operator (``-``).
Note: In many cases there are more efficient ways to design a particular grammar than using this construct. It is provided mostly for full EBNF compatibility.
"""
cdict = util.make_classdict(ExceptionGrammar, (grammar, exc_grammar), kwargs)
return GrammarClass("<EXCEPT>", (ExceptionGrammar,), cdict)
class ExceptionGrammar(Grammar):
grammar_whitespace = False
@classmethod
def __class_init__(cls, attrs):
if not "grammar_desc" in attrs and cls.grammar:
cls.grammar_desc = "{0} except {1}".format(cls.grammar[0].grammar_desc, cls.grammar[1].grammar_desc)
@classmethod
def grammar_parse(cls, text, index, sessiondata):
best_error = None
g = cls.grammar[0]
exc = cls.grammar[1]
results = g.grammar_parse(text, index, sessiondata)
for count, obj in results:
while count is None:
if text.eof:
# Subgrammars should not be asking for more data after eof.
raise InternalError("{0} requested more data when at EOF".format(g))
text = yield (None, None)
count, obj = results.send(text)
if count is False:
best_error = util.update_best_error(best_error, obj)
break
# We found one, but now we need to check to make sure that the
# exception-grammar does NOT match the same part of the text string.
exc_text = Text(text.string[:index + count], bol=text.bol, eof=True)
found = False
for e_count, e_obj in exc.grammar_parse(exc_text, index, sessiondata):
if e_count is None:
# Subgrammars should not be asking for more data after eof.
raise InternalError("{0} requested more data when at EOF".format(g))
if e_count is False:
break
found = True
break
if not found:
yield (count, obj)
# In some cases, our "best error" can lead to really confusing messages,
# since it may say "expected foo" at a place where foo actually WAS found
# (because the exclusion grammar prevented it from being returned). If
# this is the case (we"re returning a best error at our own starting
# position) return ourselves as the error object, so at least it will be
# obvious there were extra conditions on the match that weren't fulfilled.
if best_error[0] == index:
yield util.error_result(index, cls)
else:
yield util.error_result(*best_error)
@classmethod
def grammar_details(cls, depth=-1, visited=None):
if not depth:
depth += 1
return "(" + " - ".join((c.grammar_details(depth - 1, visited) for c in cls.grammar)) + ")"
@classmethod
def grammar_ebnf_lhs(cls, opts):
names, nts = util.get_ebnf_names(cls.grammar, opts)
return ("( " + " - ".join(names) + " )", nts)
@classmethod
def grammar_ebnf_rhs(cls, opts):
return None
def REPEAT(*grammar, **kwargs):
"""
Match (by default) one-or-more repetitions of *grammar*, one right after another. If the *min* or *max* keyword parameters are provided, the number of matches can be restricted to a particular range.
"""
cdict = util.make_classdict(Repetition, grammar, kwargs)
return GrammarClass("<REPEAT>", (Repetition,), cdict)
class Repetition(Grammar):
grammar_count = None
grammar_null_subtoken_ok = False
grammar_min = 1
grammar_max = None
grammar_whitespace = None
@classmethod
def __class_init__(cls, attrs):
if not cls.grammar:
grammar = EMPTY
else:
grammar = GRAMMAR(cls.grammar)
if cls.grammar_count is not None:
cls.grammar_min = cls.grammar_count
cls.grammar_max = cls.grammar_count
elif not cls.grammar_max:
cls.grammar_max = sys.maxsize
cls.grammar = util.RepeatingTuple(grammar, grammar, len=cls.grammar_max)
@classmethod
def grammar_details(cls, depth=-1, visited=None):
if cls.grammar_min == 0 and cls.grammar_max == 1 and cls.grammar_collapse:
return "OPTIONAL({0})".format(cls.grammar[0].grammar_details(depth, visited))
params = ""
if cls.grammar_min == cls.grammar_max:
params += ", count={0}".format(cls.grammar_min)
else:
if cls.grammar_min != 1:
params += ", min={0}".format(cls.grammar_min)
if cls.grammar_max != sys.maxsize:
params += ", max={0}".format(cls.grammar_max)
if cls.grammar_collapse:
params += ", collapse=True"
return "REPEAT({0}{1})".format(cls.grammar[0].grammar_details(depth, visited), params)
@classmethod
def grammar_resolve_refs(cls, refmap={}, recurse=True, follow=False, missing_ok=False, skip=None):
# The default grammar_resolve_refs process will inadvertently replace our
# RepeatingTuple with a simple tuple, so we need to convert it back to
# RepeatingTuple after it's done.
old_grammar = cls.grammar
Grammar.grammar_resolve_refs.__func__(cls, refmap, recurse, follow, missing_ok, skip)
cls.grammar = util.RepeatingTuple(*cls.grammar, len=old_grammar.len)
#TODO: implement strict vs non-strict EBNF
@classmethod
def grammar_ebnf_lhs(cls, opts):
names, nts = util.get_ebnf_names((cls.grammar[0],), opts)
name = names[0]
if "," in name:
ename = "( {0} )".format(name)
else:
ename = name
if cls.grammar_min == 0 and cls.grammar_max == 1:
return ("[{0}]".format(name), nts)
if cls.grammar_min == 0:
descs = []
elif cls.grammar_min == 1:
descs = ["{0}".format(ename)]
else:
descs = ["{0} * {1}".format(cls.grammar_min, ename)]
extra = cls.grammar_max - cls.grammar_min
if cls.grammar_max == sys.maxsize:
descs.append("{{{0}}}".format(name)) # "{%s}"
elif extra == 1:
descs.append("[{0}]".format(name))
elif extra:
descs.append("{0} * [{1}]".format(extra, name))
return (", ".join(descs), nts)
@classmethod
def grammar_ebnf_rhs(cls, opts):
return None
def WORD(startchars, restchars=None, **kwargs):
"""
Match any text consisting of a sequence of the specified characters. If *restchars* is not provided, all characters in the sequence must be in the set specified by *startchars*. If *restchars* is provided, then *startchars* specifies the valid options for the first character of the sequence, and *restchars* specifies the valid options for all following characters.
*startchars* and *restchars* are each strings containing a sequence of individual characters, or character ranges, in the same format used by python regular expressions for character-range (``[]``) operations (i.e. ``"0123456789"`` or ``"A-Za-z"``). If the first character of *startchars* or *restchars* is ``^``, the meaning is also inverted, just as in regular expressions, so ``"^A-Z"`` would match anything *except* an upper-case ascii alphabet character.
"""
cdict = util.make_classdict(Word, (), kwargs, startchars=startchars, restchars=restchars)
return GrammarClass("<WORD>", (Word,), cdict)
class Word(Terminal):
startchars = ""
restchars = None
grammar_count = None
grammar_min = 1
grammar_max = None
@classmethod
def __class_init__(cls, attrs):
if cls.grammar_count is not None:
cls.grammar_min = cls.grammar_count
cls.grammar_max = cls.grammar_count
startchars = cls.startchars
restchars = cls.restchars
if not restchars:
restchars = startchars
startchars = re.sub('([\\]\\\\])', '\\\\\\1', startchars)
restchars = re.sub('([\\]\\\\])', '\\\\\\1', restchars)
max = cls.grammar_max
if not max:
regexp = "[{0}][{1}]*".format(startchars, restchars)
else:
regexp = "[{0}][{1}]{{,{2}}}".format(startchars, restchars, max - 1)
if cls.grammar_min < 1:
regexp = "({0})?".format(regexp)
cls.regexp = re.compile(regexp)
if "grammar_name" not in attrs:
if cls.restchars is None:
argspec = repr(startchars)
else:
argspec = "{0!r}, {1!r}".format(startchars, restchars)
cls.grammar_name = "WORD({0})".format(argspec)
if "grammar_desc" not in attrs:
cls.grammar_desc = cls.grammar_name
@classmethod
def grammar_details(cls, depth=-1, visited=None):
startchars = cls.startchars
restchars = cls.restchars
if restchars is None:
argspec = repr(startchars)
else:
argspec = "{0!r}, {1!r}".format(startchars, restchars)
min = cls.grammar_min
max = cls.grammar_max
if min != 1:
argspec += ", min={0}".format(min)
if max:
argspec += ", max={0}".format(max)
return "WORD({0})".format(argspec)
@classmethod
def grammar_parse(cls, text, index, sessiondata):
greedy = cls.grammar_greedy
returned = cls.grammar_min - 1
while True:
string = text.string
m = cls.regexp.match(string, index)
if not m:
yield util.error_result(index, cls)
end = m.end()
matchlen = end - index
if not greedy:
while returned < matchlen:
returned += 1
yield (returned, cls(string, index, index + returned))
if end < len(string) or matchlen == cls.grammar_max or text.eof:
break
# We need more text before we can be sure we"re at the end.
text = yield (None, None)
if greedy:
while matchlen >= cls.grammar_min:
yield (matchlen, cls(string, index, index + matchlen))
matchlen -= 1
yield util.error_result(index, cls)
@classmethod
def grammar_ebnf_lhs(cls, opts):
return (util.ebnf_specialseq(cls, opts), ())
@classmethod
def grammar_ebnf_rhs(cls, opts):
return None
def __bool__(self):
return bool(self.string)
__nonzero__ = __bool__
def REF(ref_name, module=DEFAULT, default=None):
"""
Create a reference to a grammar named *ref_name*, to be resolved later.
This can either be resolved by calling :meth:`~Grammar.grammar_resolve_refs` prior to parsing, or, alternately, :mod:`modgrammar` will automatically attempt to resolve any :func:`REF` whenever it is used in parsing, and will treat it the same as if it were actually an occurrence of the resolved grammar.
By default, resolving a reference involves searching for a grammar class with the same name in the same python module. The python module is determined based on the location where the :func:`REF` call occurred. If you wish to use a different module to look for the grammar this :func:`REF` refers to, it can be provided in the *module* parameter. If *module* is given as :const:`None`, then no module will be searched.
If provided, *default* should contain a grammar which will be used if the given reference cannot be resolved.
"""
if module is DEFAULT:
# Try to figure out what module we were called from, as that should be what
# we"ll later look things up relative to...
module = util.get_calling_module()
return GrammarClass("<REF>", (Reference,), dict(ref_name=ref_name, ref_base=module, ref_default=default))
class Reference(Grammar):
ref_name = None
ref_base = None
ref_default = None
grammar_whitespace = False
@classmethod
def __class_init__(cls, attrs):
cls.grammar_name = "REF({0!r})".format(cls.ref_name)
@classmethod
def resolve(cls, sessiondata={}):
o = None
if sessiondata:
f = getattr(sessiondata, "grammar_resolve_ref", None)
if f is not None:
o = f(cls.ref_name)
else:
# Try using .get() first, because exceptions are expensive and this may
# be being called many times from inside of grammar_parse..
f = getattr(sessiondata, "get", None)
if f is not None:
o = f(cls.ref_name, None)
else:
try:
o = sessiondata[cls.ref_name]
except (KeyError, TypeError):
pass
if o is None:
o = getattr(cls.ref_base, cls.ref_name, None)
if o is None:
o = cls.ref_default
if o is None:
raise UnknownReferenceError("Unable to resolve reference to {0.ref_name!r} in {0.ref_base}.".format(cls))
if not hasattr(o, 'grammar_parse'):
raise BadReferenceError(
"Resolving reference to {0.ref_name!r}: Object {1!r} does not appear to be a valid grammar.".format(cls,
o))
return o
@classmethod
def grammar_parse(cls, text, index, sessiondata):
state = cls.resolve(sessiondata).grammar_parse(text, index, sessiondata)
text = yield next(state)
while True:
text = yield state.send(text)
@classmethod
def grammar_ebnf_lhs(cls, opts):
try:
o = cls.resolve()
except ReferenceError:
return (util.ebnf_specialseq(cls, opts), ())
return o.grammar_ebnf_lhs(opts)
@classmethod
def grammar_ebnf_rhs(cls, opts):
return None
###############################################################################
# Extras #
###############################################################################
L = LITERAL
G = GRAMMAR
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def LIST_OF(*grammar, **kwargs):
"""
Match a list consisting of repetitions of *grammar* separated by *sep*. As with other repetition grammars, the *min* and *max* keywords can also be used to restrict the number of matches to a certain range.
Note: Although this is most commonly used with a literal separator (such as the default ``","``), actually any (arbitrarily-complex) subgrammar can be specified for *sep* if desired.
"""
cdict = util.make_classdict(ListRepetition, grammar, kwargs)
return GrammarClass("<LIST>", (ListRepetition,), cdict)
class ListRepetition(Repetition):
sep = LITERAL(",")
grammar_min = 1
grammar_whitespace = None
@classmethod
def __class_init__(cls, attrs):
grammar = GRAMMAR(cls.grammar)
Repetition.__class_init__.__func__(cls, attrs)
cls.sep = GRAMMAR(cls.sep)
succ_grammar = GRAMMAR(cls.sep, grammar, whitespace=cls.grammar_whitespace)
cls.grammar = util.RepeatingTuple(grammar, succ_grammar, len=cls.grammar_max)
@classmethod
def grammar_details(cls, depth=-1, visited=None):
params = ""
if cls.grammar_min == cls.grammar_max:
params += ", count={0}".format(cls.grammar_min)
else:
if cls.grammar_min != 1:
params += ", min={0}".format(cls.grammar_min)
if cls.grammar_max != sys.maxsize:
params += ", max={0}".format(cls.grammar_max)
if cls.grammar_collapse:
params += ", collapse=True"
return "LIST_OF({0}, sep={1}{2})".format(cls.grammar[0].grammar_details(depth, visited),
cls.sep.grammar_details(depth, visited), params)
def grammar_postprocess(self, parent, sessiondata):
# Collapse down the succ_grammar instances for successive matches
elems = []
for e in self.elements:
if not elems:
elems = [e]
else:
elems.extend(e.elements)
self.elements = elems
return Grammar.grammar_postprocess(self, parent, sessiondata)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def OPTIONAL(*grammar, **kwargs):
"""
Specify that *grammar* is optional. It will match if present, or it will match the empty string if *grammar* cannot be matched.
If *grammar* is present in the matched input text, this element of the parse tree will contain a single grammar match object (the same as it would have if ``GRAMMAR(*grammar)`` had matched). If *grammar* was not found, this element of the parse tree will contain :const:`None`.
This construct is functionally equivalent to ``OR(grammar, EMPTY)``. (It is is also functionally similar to ``REPEAT(*grammar, min=0, max=1, collapse=True)``, except that in the case of :func:`REPEAT`, an empty-match produces no elements at all in the resulting parse tree (not even :const:`None`).)
"""
kwargs.update(min=0, max=1)
kwargs.setdefault("collapse", True)
kwargs.setdefault("grammar_name", "<OPTIONAL>")
kwargs.setdefault("whitespace", False)
return REPEAT(*grammar, **kwargs)
def ZERO_OR_MORE(*grammar, **kwargs):
"""
This is a synonym for ``REPEAT(*grammar, min=0)``
"""
kwargs.update(min=0, max=None)
return REPEAT(*grammar, **kwargs)
def ONE_OR_MORE(*grammar, **kwargs):
"""
This is a synonym for ``REPEAT(*grammar, min=1)``
"""
kwargs.update(min=1, max=None)
return REPEAT(*grammar, **kwargs)
def ANY_EXCEPT(charlist, **kwargs):
"""
Match a string of any characters except those listed in *charlist*.
This is functionally equivalent to ``WORD("^"+charlist)``.
"""
kwargs.setdefault("grammar_name", "ANY_EXCEPT({0!r})".format(charlist))
return WORD("^{0}".format(charlist), **kwargs)
# FIXME: whitespace at beginning of line
class BOL(Terminal):
grammar_desc = "beginning of line"
@classmethod
def grammar_parse(cls, text, index, sessiondata):
if index:
if text.string[index - 1] in ("\n", "\r"):
yield (0, cls(""))
elif text.bol:
yield (0, cls(""))
yield util.error_result(index, cls)
class EOF(Terminal):
grammar_desc = "end of file"
@classmethod
def grammar_parse(cls, text, index, sessiondata):
if text.eof and index == len(text.string):
yield (0, cls(""))
yield util.error_result(index, cls)
class EOL(Terminal):
grammar_desc = "end of line"
grammar_collapse_skip = True
grammar = (L("\n\r") | L("\r\n") | L("\r") | L("\n"))
class SPACE(Word):
grammar_desc = "whitespace"
regexp = re.compile("[\s]+")
@classmethod
def __class_init__(cls, attrs):
# Don't do the normal Word __class_init__ stuff.
pass
@classmethod
def grammar_details(cls, depth=-1, visited=None):
return cls.grammar_name
REST_OF_LINE = ANY_EXCEPT("\r\n", min=0, grammar_name="REST_OF_LINE", grammar_desc="rest of the line")
###############################################################################
def generate_ebnf(grammar, **opts):
"""
*(generator function)*
Take a given grammar and produce a description of the grammar in Extended Backus-Naur Form (EBNF). This generator produces fully-formatted output lines suitable for writing to a file, etc.
As there are a few different variants of EBNF in common use, as well as some aspects which could be considered a matter of preference when producing such descriptions, this function also accepts a variety of configurable options, specified as keyword parameters:
*wrap* (default 80)
Wrap the output text at *wrap* columns.
*align* (default True)
Align each entry so that all of the RHSes start on the same column.
*indent* (default True)
The number of characters that subsequent (wrapped) lines should be indented. Can be set to either a number or to :const:`True`. If set to :const:`True`, the indent will be auto-calculated to line up with the position of the RHS in the first line.
*expand_terminals* (default False)
If grammars have subgrammars, show their expansion even if :attr:`~Grammar.grammar_terminal` is true.
*special_style* (default "desc")
How some grammars (which can't be easily represented as EBNF) should be represented inside EBNF "special sequences". Valid options are "desc" (use the (human-readable) :attr:`~Grammar.grammar_desc` text), "name" (just use the grammar's name), or "python" (use a repr-like syntax similar to the python syntax used to create them).
Additional options may also be offered by certain individual grammars.
"""
defaults = dict(expand_terminals=False, special_style="desc", wrap=80, indent=True, align=True)
defaults.update(opts)
opts = defaults
todo = [grammar]
processed = set()
results = []
while todo:
g = todo.pop(0)
rhs = g.grammar_ebnf_rhs(opts)
if rhs:
processed.add(g)
desc, nonterminals = rhs
name, lhs_nt = g.grammar_ebnf_lhs(opts)
results.append((name, desc))
for nt in nonterminals:
if nt not in processed and nt not in todo:
todo.append(nt)
elif not processed:
# We were passed an anonymous grammar of some kind. Wrap it in something
# that has a name (and thus an EBNF LHS and RHS) and try again.
todo.append(GRAMMAR(g, grammar_name="grammar"))
else:
processed.add(g)
width = opts["wrap"]
if not width:
width = sys.maxsize
align_width = 0
if opts["align"]:
max_align = width * 0.75 - 2
for name, desc in results:
w = len(name)
if w <= max_align:
align_width = max(align_width, w)
indent = opts["indent"]
if indent is True:
if align_width:
indent = align_width + 3
else:
indent = 8
tw = textwrap.TextWrapper(width=width, subsequent_indent=(" " * indent), break_long_words=False,
break_on_hyphens=False)
for name, desc in results:
yield tw.fill("{0:{1}} = {2};".format(name, align_width, desc)) + "\n"
| apache-2.0 |
dfaruque/Serenity | Tools/Node/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 446 | 43487 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| mit |
ar7z1/ansible | lib/ansible/modules/cloud/amazon/redshift_subnet_group.py | 50 | 5877 | #!/usr/bin/python
# Copyright 2014 Jens Carl, Hothead Games Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
module: redshift_subnet_group
version_added: "2.2"
short_description: manage Redshift cluster subnet groups
description:
- Create, modifies, and deletes Redshift cluster subnet groups.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
default: 'present'
choices: ['present', 'absent' ]
group_name:
description:
- Cluster subnet group name.
required: true
aliases: ['name']
group_description:
description:
- Database subnet group description.
aliases: ['description']
group_subnets:
description:
- List of subnet IDs that make up the cluster subnet group.
aliases: ['subnets']
requirements: [ 'boto' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a Redshift subnet group
- local_action:
module: redshift_subnet_group
state: present
group_name: redshift-subnet
group_description: Redshift subnet
group_subnets:
- 'subnet-aaaaa'
- 'subnet-bbbbb'
# Remove subnet group
- redshift_subnet_group:
state: absent
group_name: redshift-subnet
'''
RETURN = '''
group:
description: dictionary containing all Redshift subnet group information
returned: success
type: complex
contains:
name:
description: name of the Redshift subnet group
returned: success
type: string
sample: "redshift_subnet_group_name"
vpc_id:
description: Id of the VPC where the subnet is located
returned: success
type: string
sample: "vpc-aabb1122"
'''
try:
import boto
import boto.redshift
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
group_name=dict(required=True, aliases=['name']),
group_description=dict(required=False, aliases=['description']),
group_subnets=dict(required=False, aliases=['subnets'], type='list'),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
state = module.params.get('state')
group_name = module.params.get('group_name')
group_description = module.params.get('group_description')
group_subnets = module.params.get('group_subnets')
if state == 'present':
for required in ('group_name', 'group_description', 'group_subnets'):
if not module.params.get(required):
module.fail_json(msg=str("parameter %s required for state='present'" % required))
else:
for not_allowed in ('group_description', 'group_subnets'):
if module.params.get(not_allowed):
module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
# Connect to the Redshift endpoint.
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
changed = False
exists = False
group = None
try:
matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except boto.exception.JSONResponseError as e:
if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
# if e.code != 'ClusterSubnetGroupNotFoundFault':
module.fail_json(msg=str(e))
if state == 'absent':
if exists:
conn.delete_cluster_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
group = {
'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
['ClusterSubnetGroup']['VpcId'],
}
else:
changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
group = {
'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
['ClusterSubnetGroup']['VpcId'],
}
changed = True
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, group=group)
if __name__ == '__main__':
main()
| gpl-3.0 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/UserString.py | 312 | 9687 | #!/usr/bin/env python
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
import sys
import collections
__all__ = ["UserString","MutableString"]
class UserString(collections.Sequence):
def __init__(self, seq):
if isinstance(seq, basestring):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, basestring):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
class MutableString(UserString, collections.MutableSequence):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
from warnings import warnpy3k
warnpy3k('the class UserString.MutableString has been removed in '
'Python 3.0', stacklevel=2)
self.data = string
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def __setitem__(self, index, sub):
if isinstance(index, slice):
if isinstance(sub, UserString):
sub = sub.data
elif not isinstance(sub, basestring):
sub = str(sub)
start, stop, step = index.indices(len(self.data))
if step == -1:
start, stop = stop+1, start+1
sub = sub[::-1]
elif step != 1:
# XXX(twouters): I guess we should be reimplementing
# the extended slice assignment/deletion algorithm here...
raise TypeError, "invalid step in slicing assignment"
start = min(start, stop)
self.data = self.data[:start] + sub + self.data[stop:]
else:
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if isinstance(index, slice):
start, stop, step = index.indices(len(self.data))
if step == -1:
start, stop = stop+1, start+1
elif step != 1:
# XXX(twouters): see same block in __setitem__
raise TypeError, "invalid step in slicing deletion"
start = min(start, stop)
self.data = self.data[:start] + self.data[stop:]
else:
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, basestring):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, basestring):
self.data += other
else:
self.data += str(other)
return self
def __imul__(self, n):
self.data *= n
return self
def insert(self, index, value):
self[index:index] = value
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
called_in_dir, called_as = os.path.split(sys.argv[0])
called_as, py = os.path.splitext(called_as)
if '-q' in sys.argv:
from test import test_support
test_support.verbose = 0
__import__('test.test_' + called_as.lower())
| mit |
fr34k8/atomic-reactor | atomic_reactor/plugins/pre_check_rebuild.py | 1 | 2098 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
from atomic_reactor.plugin import PreBuildPlugin
def is_rebuild(workflow):
return (CheckRebuildPlugin.key in workflow.prebuild_results and
workflow.prebuild_results[CheckRebuildPlugin.key])
class CheckRebuildPlugin(PreBuildPlugin):
"""
Determine whether this is an automated rebuild
If this is the first build, there will be a label set in the
metadata to say so. The OSBS client sets this label when it
creates the BuildConfig, but removes it after instantiating a
Build.
If that label is not present, this must be an automated rebuild.
Example configuration:
{
"name": "check_rebuild",
"args": {
"key": "client",
"value": "osbs"
}
}
"""
key = "check_rebuild"
can_fail = False # We really want to stop the process
def __init__(self, tasker, workflow, key, value):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param key: str, key of label used to indicate first build
:param value: str, value of label used to indicate first build
"""
# call parent constructor
super(CheckRebuildPlugin, self).__init__(tasker, workflow)
self.label_key = key
self.label_value = value
def run(self):
"""
run the plugin
"""
try:
build_json = json.loads(os.environ["BUILD"])
except KeyError:
self.log.error("No $BUILD env variable. Probably not running in build container")
raise
metadata = build_json.get("metadata", {})
if metadata.get(self.label_key) == self.label_value:
self.log.info("This is not a rebuild")
return False
self.log.info("This is a rebuild")
return True
| bsd-3-clause |
xkmato/youtube-dl | youtube_dl/extractor/sina.py | 107 | 2755 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
compat_urllib_parse,
)
class SinaIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(.*?\.)?video\.sina\.com\.cn/
(
(.+?/(((?P<pseudo_id>\d+).html)|(.*?(\#|(vid=)|b/)(?P<id>\d+?)($|&|\-))))
|
# This is used by external sites like Weibo
(api/sinawebApi/outplay.php/(?P<token>.+?)\.swf)
)
'''
_TESTS = [
{
'url': 'http://video.sina.com.cn/news/vlist/zt/chczlj2013/?opsubject_id=top12#110028898',
'md5': 'd65dd22ddcf44e38ce2bf58a10c3e71f',
'info_dict': {
'id': '110028898',
'ext': 'flv',
'title': '《中国新闻》 朝鲜要求巴拿马立即释放被扣船员',
}
},
{
'url': 'http://video.sina.com.cn/v/b/101314253-1290078633.html',
'info_dict': {
'id': '101314253',
'ext': 'flv',
'title': '军方提高对朝情报监视级别',
},
},
]
def _extract_video(self, video_id):
data = compat_urllib_parse.urlencode({'vid': video_id})
url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
video_id, 'Downloading video url')
image_page = self._download_webpage(
'http://interface.video.sina.com.cn/interface/common/getVideoImage.php?%s' % data,
video_id, 'Downloading thumbnail info')
return {'id': video_id,
'url': url_doc.find('./durl/url').text,
'ext': 'flv',
'title': url_doc.find('./vname').text,
'thumbnail': image_page.split('=')[1],
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if mobj.group('token') is not None:
# The video id is in the redirected url
self.to_screen('Getting video id')
request = compat_urllib_request.Request(url)
request.get_method = lambda: 'HEAD'
(_, urlh) = self._download_webpage_handle(request, 'NA', False)
return self._real_extract(urlh.geturl())
elif video_id is None:
pseudo_id = mobj.group('pseudo_id')
webpage = self._download_webpage(url, pseudo_id)
video_id = self._search_regex(r'vid:\'(\d+?)\'', webpage, 'video id')
return self._extract_video(video_id)
| unlicense |
andyfaff/scipy | doc/source/tutorial/examples/optimize_global_1.py | 12 | 1752 | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
def eggholder(x):
return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))
-x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))
bounds = [(-512, 512), (-512, 512)]
x = np.arange(-512, 513)
y = np.arange(-512, 513)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])
results = dict()
results['shgo'] = optimize.shgo(eggholder, bounds)
results['DA'] = optimize.dual_annealing(eggholder, bounds)
results['DE'] = optimize.differential_evolution(eggholder, bounds)
results['BH'] = optimize.basinhopping(eggholder, bounds)
results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=256, iters=5,
sampling_method='sobol')
fig = plt.figure(figsize=(4.5, 4.5))
ax = fig.add_subplot(111)
im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',
cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_point(res, marker='o', color=None):
ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)
plot_point(results['BH'], color='y') # basinhopping - yellow
plot_point(results['DE'], color='c') # differential_evolution - cyan
plot_point(results['DA'], color='w') # dual_annealing. - white
# SHGO produces multiple minima, plot them all (with a smaller marker size)
plot_point(results['shgo'], color='r', marker='+')
plot_point(results['shgo_sobol'], color='r', marker='x')
for i in range(results['shgo_sobol'].xl.shape[0]):
ax.plot(512 + results['shgo_sobol'].xl[i, 0],
512 + results['shgo_sobol'].xl[i, 1],
'ro', ms=2)
ax.set_xlim([-4, 514*2])
ax.set_ylim([-4, 514*2])
fig.tight_layout()
plt.show()
| bsd-3-clause |
neuroidss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| agpl-3.0 |
broferek/ansible | lib/ansible/modules/storage/netapp/na_ontap_ldap_client.py | 19 | 13037 | #!/usr/bin/python
'''
(c) 2018-2019, NetApp, Inc
GNU General Public License v3.0+
(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_ldap_client
short_description: NetApp ONTAP LDAP client
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.9'
author: Milan Zink (@zeten30) <zeten30@gmail.com>/<mzink@redhat.com>
description:
- Create, modify or delete LDAP client on NetApp ONTAP
options:
state:
description:
- Whether the specified LDAP client configuration exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
vserver:
description:
- vserver/svm that holds LDAP client configuration
required: true
type: str
name:
description:
- The name of LDAP client configuration
required: true
type: str
ldap_servers:
description:
- Comma separated list of LDAP servers. FQDN's or IP addresses
- Required if I(state=present).
type: list
schema:
description:
- LDAP schema
- Required if I(state=present).
choices: ['AD-IDMU', 'AD-SFU', 'MS-AD-BIS', 'RFC-2307']
type: str
base_dn:
description:
- LDAP base DN
type: str
base_scope:
description:
- LDAP search scope
choices: ['subtree', 'onelevel', 'base']
type: str
port:
description:
- LDAP server port
type: int
query_timeout:
description:
- LDAP server query timeout
type: int
min_bind_level:
description:
- Minimal LDAP server bind level.
choices: ['anonymous', 'simple', 'sasl']
type: str
bind_dn:
description:
- LDAP bind user DN
type: str
bind_password:
description:
- LDAP bind user password
type: str
use_start_tls:
description:
- Start TLS on LDAP connection
choices: ['true', 'false']
type: str
referral_enabled:
description:
- LDAP Referral Chasing
choices: ['true', 'false']
type: str
session_security:
description:
- Client Session Security
choices: ['true', 'false']
type: str
'''
EXAMPLES = '''
- name: Create LDAP client
na_ontap_ldap_client:
state: present
name: 'example_ldap'
vserver: 'vserver1'
ldap_servers: 'ldap1.example.company.com,ldap2.example.company.com'
base_dn: 'dc=example,dc=company,dc=com'
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
'''
RETURN = '''
'''
import traceback
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapLDAPClient(object):
'''
LDAP Client definition class
'''
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
base_dn=dict(required=False, type='str'),
base_scope=dict(required=False, default=None, choices=['subtree', 'onelevel', 'base']),
bind_dn=dict(required=False, default=None, type='str'),
bind_password=dict(type='str', required=False, default=None, no_log=True),
name=dict(required=True, type='str'),
ldap_servers=dict(required_if=[["state", "present"]], type='list'),
min_bind_level=dict(required=False, default=None, choices=['anonymous', 'simple', 'sasl']),
port=dict(required=False, default=None, type='int'),
query_timeout=dict(required=False, default=None, type='int'),
referral_enabled=dict(required=False, default=None, choices=['true', 'false']),
schema=dict(required_if=[["state", "present"]], default=None, type='str', choices=['AD-IDMU', 'AD-SFU', 'MS-AD-BIS', 'RFC-2307']),
session_security=dict(required=False, default=None, choices=['true', 'false']),
state=dict(required=False, choices=['present', 'absent'], default='present'),
use_start_tls=dict(required=False, default=None, choices=['true', 'false']),
vserver=dict(required=True, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
required_if=[('state', 'present', ['ldap_servers', 'schema'])],
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
self.simple_attributes = [
'base_dn',
'base_scope',
'bind_dn',
'bind_password',
'min_bind_level',
'port',
'query_timeout',
'referral_enabled',
'session_security',
'use_start_tls'
]
def get_ldap_client(self, client_config_name=None, vserver_name=None):
'''
Checks if LDAP client config exists.
:return:
ldap client config object if found
None if not found
:rtype: object/None
'''
# Make query
client_config_info = netapp_utils.zapi.NaElement('ldap-client-get-iter')
if client_config_name is None:
client_config_name = self.parameters['name']
if vserver_name is None:
vserver_name = '*'
query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client',
**{'ldap-client-config': client_config_name, 'vserver': vserver_name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
client_config_info.add_child_elem(query)
result = self.server.invoke_successfully(client_config_info, enable_tunneling=False)
# Get LDAP client configuration details
client_config_details = None
if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1):
attributes_list = result.get_child_by_name('attributes-list')
client_config_info = attributes_list.get_child_by_name('ldap-client')
# Get LDAP servers list
ldap_server_list = list()
get_list = client_config_info.get_child_by_name('ldap-servers')
if get_list is not None:
ldap_servers = get_list.get_children()
for ldap_server in ldap_servers:
ldap_server_list.append(ldap_server.get_content())
# Define config details structure
client_config_details = {'name': client_config_info.get_child_content('ldap-client-config'),
'ldap_servers': client_config_info.get_child_content('ldap-servers'),
'base_dn': client_config_info.get_child_content('base-dn'),
'base_scope': client_config_info.get_child_content('base-scope'),
'bind_dn': client_config_info.get_child_content('bind-dn'),
'bind_password': client_config_info.get_child_content('bind-password'),
'min_bind_level': client_config_info.get_child_content('min-bind-level'),
'port': client_config_info.get_child_content('port'),
'query_timeout': client_config_info.get_child_content('query-timeout'),
'referral_enabled': client_config_info.get_child_content('referral-enabled'),
'schema': client_config_info.get_child_content('schema'),
'session_security': client_config_info.get_child_content('session-security'),
'use_start_tls': client_config_info.get_child_content('use-start-tls'),
'vserver': client_config_info.get_child_content('vserver')}
return client_config_details
def create_ldap_client(self):
'''
Create LDAP client configuration
'''
# LDAP servers NaElement
ldap_servers_element = netapp_utils.zapi.NaElement('ldap-servers')
# Mandatory options
for ldap_server_name in self.parameters['ldap_servers']:
ldap_servers_element.add_new_child('string', ldap_server_name)
options = {
'ldap-client-config': self.parameters['name'],
'schema': self.parameters['schema'],
}
# Other options/attributes
for attribute in self.simple_attributes:
if self.parameters.get(attribute) is not None:
options[str(attribute).replace('_', '-')] = self.parameters[attribute]
# Initialize NaElement
ldap_client_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client-create', **options)
ldap_client_create.add_child_elem(ldap_servers_element)
# Try to create LDAP configuration
try:
self.server.invoke_successfully(ldap_client_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as errcatch:
self.module.fail_json(msg='Error creating LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)),
exception=traceback.format_exc())
def delete_ldap_client(self):
'''
Delete LDAP client configuration
'''
ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'ldap-client-delete', **{'ldap-client-config': self.parameters['name']})
try:
self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as errcatch:
self.module.fail_json(msg='Error deleting LDAP client configuration %s: %s' % (
self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc())
def modify_ldap_client(self, modify):
'''
Modify LDAP client
:param modify: list of modify attributes
'''
ldap_client_modify = netapp_utils.zapi.NaElement('ldap-client-modify')
ldap_client_modify.add_new_child('ldap-client-config', self.parameters['name'])
for attribute in modify:
# LDAP_servers
if attribute == 'ldap_servers':
ldap_servers_element = netapp_utils.zapi.NaElement('ldap-servers')
for ldap_server_name in self.parameters['ldap_servers']:
ldap_servers_element.add_new_child('string', ldap_server_name)
ldap_client_modify.add_child_elem(ldap_servers_element)
# Simple attributes
if attribute in self.simple_attributes:
ldap_client_modify.add_new_child(str(attribute).replace('_', '-'), self.parameters[attribute])
# Try to modify LDAP client
try:
self.server.invoke_successfully(ldap_client_modify, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as errcatch:
self.module.fail_json(msg='Error modifying LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)),
exception=traceback.format_exc())
def apply(self):
'''Call create/modify/delete operations.'''
current = self.get_ldap_client()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
# create an ems log event for users with auto support turned on
netapp_utils.ems_log_event("na_ontap_ldap_client", self.server)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if cd_action == 'create':
self.create_ldap_client()
elif cd_action == 'delete':
self.delete_ldap_client()
elif modify:
self.modify_ldap_client(modify)
self.module.exit_json(changed=self.na_helper.changed)
#
# MAIN
#
def main():
'''ONTAP LDAP client configuration'''
ldapclient = NetAppOntapLDAPClient()
ldapclient.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
3dfxmadscientist/CBSS | addons/google_base_account/google_base_account.py | 53 | 1297 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_users(osv.osv):
_inherit = "res.users"
_columns = {
'gmail_user': fields.char('Username', size=64,),
'gmail_password': fields.char('Password', size=64),
}
res_users()
# vim:expandtab:smartindent:toabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TalShafir/ansible | lib/ansible/modules/network/cloudengine/ce_bgp.py | 43 | 80680 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_bgp
version_added: "2.4"
short_description: Manages BGP configuration on HUAWEI CloudEngine switches.
description:
- Manages BGP configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
as_number:
description:
- Local AS number.
The value is a string of 1 to 11 characters.
graceful_restart:
description:
- Enable GR of the BGP speaker in the specified address family, peer address, or peer group.
default: no_use
choices: ['no_use','true','false']
time_wait_for_rib:
description:
- Period of waiting for the End-Of-RIB flag.
The value is an integer ranging from 3 to 3000. The default value is 600.
as_path_limit:
description:
- Maximum number of AS numbers in the AS_Path attribute. The default value is 255.
check_first_as:
description:
- Check the first AS in the AS_Path of the update messages from EBGP peers.
default: no_use
choices: ['no_use','true','false']
confed_id_number:
description:
- Confederation ID.
The value is a string of 1 to 11 characters.
confed_nonstanded:
description:
- Configure the device to be compatible with devices in a nonstandard confederation.
default: no_use
choices: ['no_use','true','false']
bgp_rid_auto_sel:
description:
- The function to automatically select router IDs for all VPN BGP instances is enabled.
default: no_use
choices: ['no_use','true','false']
keep_all_routes:
description:
- If the value is true, the system stores all route update messages received from all peers (groups) after
BGP connection setup.
If the value is false, the system stores only BGP update messages that are received from peers and pass
the configured import policy.
default: no_use
choices: ['no_use','true','false']
memory_limit:
description:
- Support BGP RIB memory protection.
default: no_use
choices: ['no_use','true','false']
gr_peer_reset:
description:
- Peer disconnection through GR.
default: no_use
choices: ['no_use','true','false']
is_shutdown:
description:
- Interrupt BGP all neighbor.
default: no_use
choices: ['no_use','true','false']
suppress_interval:
description:
- Suppress interval.
hold_interval:
description:
- Hold interval.
clear_interval:
description:
- Clear interval.
confed_peer_as_num:
description:
- Confederation AS number, in two-byte or four-byte format.
The value is a string of 1 to 11 characters.
vrf_name:
description:
- Name of a BGP instance. The name is a case-sensitive string of characters.
vrf_rid_auto_sel:
description:
- If the value is true, VPN BGP instances are enabled to automatically select router IDs.
If the value is false, VPN BGP instances are disabled from automatically selecting router IDs.
default: no_use
choices: ['no_use','true','false']
router_id:
description:
- ID of a router that is in IPv4 address format.
keepalive_time:
description:
- If the value of a timer changes, the BGP peer relationship between the routers is disconnected.
The value is an integer ranging from 0 to 21845. The default value is 60.
hold_time:
description:
- Hold time, in seconds. The value of the hold time can be 0 or range from 3 to 65535.
min_hold_time:
description:
- Min hold time, in seconds. The value of the hold time can be 0 or range from 20 to 65535.
conn_retry_time:
description:
- ConnectRetry interval. The value is an integer, in seconds. The default value is 32s.
ebgp_if_sensitive:
description:
- If the value is true, After the fast EBGP interface awareness function is enabled, EBGP sessions on
an interface are deleted immediately when the interface goes Down.
If the value is false, After the fast EBGP interface awareness function is enabled, EBGP sessions
on an interface are not deleted immediately when the interface goes Down.
default: no_use
choices: ['no_use','true','false']
default_af_type:
description:
- Type of a created address family, which can be IPv4 unicast or IPv6 unicast.
The default type is IPv4 unicast.
choices: ['ipv4uni','ipv6uni']
'''
EXAMPLES = '''
- name: CloudEngine BGP test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Enable BGP"
ce_bgp:
state: present
as_number: 100
confed_id_number: 250
provider: "{{ cli }}"
- name: "Disable BGP"
ce_bgp:
state: absent
as_number: 100
confed_id_number: 250
provider: "{{ cli }}"
- name: "Create confederation peer AS num"
ce_bgp:
state: present
confed_peer_as_num: 260
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"as_number": "100", state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"bgp_enable": [["100"], ["true"]]}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"bgp_enable": [["100"], ["true"]]}
updates:
description: command sent to the device
returned: always
type: list
sample: ["bgp 100"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
SUCCESS = """success"""
FAILED = """failed"""
# get bgp enable
CE_GET_BGP_ENABLE = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpSite>
<bgpEnable></bgpEnable>
<asNumber></asNumber>
</bgpSite>
</bgpcomm>
</bgp>
</filter>
"""
CE_GET_BGP_ENABLE_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpSite>
"""
CE_GET_BGP_ENABLE_TAIL = """
</bgpSite>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp enable
CE_MERGE_BGP_ENABLE_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpSite operation="merge">
"""
CE_MERGE_BGP_ENABLE_TAIL = """
</bgpSite>
</bgpcomm>
</bgp>
</config>
"""
# get bgp confederation peer as
CE_GET_BGP_CONFED_PEER_AS = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpConfedPeerAss>
<bgpConfedPeerAs>
<confedPeerAsNum></confedPeerAsNum>
</bgpConfedPeerAs>
</bgpConfedPeerAss>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp confederation peer as
CE_MERGE_BGP_CONFED_PEER_AS = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpConfedPeerAss>
<bgpConfedPeerAs operation="merge">
<confedPeerAsNum>%s</confedPeerAsNum>
</bgpConfedPeerAs>
</bgpConfedPeerAss>
</bgpcomm>
</bgp>
</config>
"""
# create bgp confederation peer as
CE_CREATE_BGP_CONFED_PEER_AS = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpConfedPeerAss>
<bgpConfedPeerAs operation="create">
<confedPeerAsNum>%s</confedPeerAsNum>
</bgpConfedPeerAs>
</bgpConfedPeerAss>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp confederation peer as
CE_DELETE_BGP_CONFED_PEER_AS = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpConfedPeerAss>
<bgpConfedPeerAs operation="delete">
<confedPeerAsNum>%s</confedPeerAsNum>
</bgpConfedPeerAs>
</bgpConfedPeerAss>
</bgpcomm>
</bgp>
</config>
"""
# get bgp instance
CE_GET_BGP_INSTANCE = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName></vrfName>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# get bgp instance
CE_GET_BGP_INSTANCE_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
"""
CE_GET_BGP_INSTANCE_TAIL = """
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp instance
CE_MERGE_BGP_INSTANCE_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf operation="merge">
"""
CE_MERGE_BGP_INSTANCE_TAIL = """
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# create bgp instance
CE_CREATE_BGP_INSTANCE_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf operation="create">
"""
CE_CREATE_BGP_INSTANCE_TAIL = """
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp instance
CE_DELETE_BGP_INSTANCE_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf operation="delete">
"""
CE_DELETE_BGP_INSTANCE_TAIL = """
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
def check_ip_addr(**kwargs):
""" check_ip_addr """
ipaddr = kwargs["ipaddr"]
addr = ipaddr.strip().split('.')
if len(addr) != 4:
return FAILED
for i in range(4):
addr[i] = int(addr[i])
if addr[i] <= 255 and addr[i] >= 0:
pass
else:
return FAILED
return SUCCESS
def check_bgp_enable_args(**kwargs):
""" check_bgp_enable_args """
module = kwargs["module"]
need_cfg = False
as_number = module.params['as_number']
if as_number:
if len(as_number) > 11 or len(as_number) == 0:
module.fail_json(
msg='Error: The len of as_number %s is out of [1 - 11].' % as_number)
else:
need_cfg = True
return need_cfg
def check_bgp_confed_args(**kwargs):
""" check_bgp_confed_args """
module = kwargs["module"]
need_cfg = False
confed_peer_as_num = module.params['confed_peer_as_num']
if confed_peer_as_num:
if len(confed_peer_as_num) > 11 or len(confed_peer_as_num) == 0:
module.fail_json(
msg='Error: The len of confed_peer_as_num %s is out of [1 - 11].' % confed_peer_as_num)
else:
need_cfg = True
return need_cfg
class Bgp(object):
""" Manages BGP configuration """
def netconf_get_config(self, **kwargs):
""" netconf_get_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" netconf_set_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_bgp_enable_other_args(self, **kwargs):
""" check_bgp_enable_other_args """
module = kwargs["module"]
state = module.params['state']
result = dict()
need_cfg = False
graceful_restart = module.params['graceful_restart']
if graceful_restart != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<gracefulRestart></gracefulRestart>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<gracefulRestart>(.*)</gracefulRestart>.*', recv_xml)
if re_find:
result["graceful_restart"] = re_find
if re_find[0] != graceful_restart:
need_cfg = True
else:
need_cfg = True
time_wait_for_rib = module.params['time_wait_for_rib']
if time_wait_for_rib:
if int(time_wait_for_rib) > 3000 or int(time_wait_for_rib) < 3:
module.fail_json(
msg='Error: The time_wait_for_rib %s is out of [3 - 3000].' % time_wait_for_rib)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<timeWaitForRib></timeWaitForRib>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<timeWaitForRib>(.*)</timeWaitForRib>.*', recv_xml)
if re_find:
result["time_wait_for_rib"] = re_find
if re_find[0] != time_wait_for_rib:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<timeWaitForRib>(.*)</timeWaitForRib>.*', recv_xml)
if re_find:
result["time_wait_for_rib"] = re_find
if re_find[0] == time_wait_for_rib:
need_cfg = True
as_path_limit = module.params['as_path_limit']
if as_path_limit:
if int(as_path_limit) > 2000 or int(as_path_limit) < 1:
module.fail_json(
msg='Error: The as_path_limit %s is out of [1 - 2000].' % as_path_limit)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<asPathLimit></asPathLimit>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<asPathLimit>(.*)</asPathLimit>.*', recv_xml)
if re_find:
result["as_path_limit"] = re_find
if re_find[0] != as_path_limit:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<asPathLimit>(.*)</asPathLimit>.*', recv_xml)
if re_find:
result["as_path_limit"] = re_find
if re_find[0] == as_path_limit:
need_cfg = True
check_first_as = module.params['check_first_as']
if check_first_as != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<checkFirstAs></checkFirstAs>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<checkFirstAs>(.*)</checkFirstAs>.*', recv_xml)
if re_find:
result["check_first_as"] = re_find
if re_find[0] != check_first_as:
need_cfg = True
else:
need_cfg = True
confed_id_number = module.params['confed_id_number']
if confed_id_number:
if len(confed_id_number) > 11 or len(confed_id_number) == 0:
module.fail_json(
msg='Error: The len of confed_id_number %s is out of [1 - 11].' % confed_id_number)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<confedIdNumber></confedIdNumber>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<confedIdNumber>(.*)</confedIdNumber>.*', recv_xml)
if re_find:
result["confed_id_number"] = re_find
if re_find[0] != confed_id_number:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<confedIdNumber>(.*)</confedIdNumber>.*', recv_xml)
if re_find:
result["confed_id_number"] = re_find
if re_find[0] == confed_id_number:
need_cfg = True
confed_nonstanded = module.params['confed_nonstanded']
if confed_nonstanded != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<confedNonstanded></confedNonstanded>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<confedNonstanded>(.*)</confedNonstanded>.*', recv_xml)
if re_find:
result["confed_nonstanded"] = re_find
if re_find[0] != confed_nonstanded:
need_cfg = True
else:
need_cfg = True
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
if bgp_rid_auto_sel != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<bgpRidAutoSel></bgpRidAutoSel>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<bgpRidAutoSel>(.*)</bgpRidAutoSel>.*', recv_xml)
if re_find:
result["bgp_rid_auto_sel"] = re_find
if re_find[0] != bgp_rid_auto_sel:
need_cfg = True
else:
need_cfg = True
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<keepAllRoutes></keepAllRoutes>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepAllRoutes>(.*)</keepAllRoutes>.*', recv_xml)
if re_find:
result["keep_all_routes"] = re_find
if re_find[0] != keep_all_routes:
need_cfg = True
else:
need_cfg = True
memory_limit = module.params['memory_limit']
if memory_limit != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<memoryLimit></memoryLimit>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<memoryLimit>(.*)</memoryLimit>.*', recv_xml)
if re_find:
result["memory_limit"] = re_find
if re_find[0] != memory_limit:
need_cfg = True
else:
need_cfg = True
gr_peer_reset = module.params['gr_peer_reset']
if gr_peer_reset != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<grPeerReset></grPeerReset>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<grPeerReset>(.*)</grPeerReset>.*', recv_xml)
if re_find:
result["gr_peer_reset"] = re_find
if re_find[0] != gr_peer_reset:
need_cfg = True
else:
need_cfg = True
is_shutdown = module.params['is_shutdown']
if is_shutdown != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<isShutdown></isShutdown>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isShutdown>(.*)</isShutdown>.*', recv_xml)
if re_find:
result["is_shutdown"] = re_find
if re_find[0] != is_shutdown:
need_cfg = True
else:
need_cfg = True
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
if suppress_interval:
if not hold_interval or not clear_interval:
module.fail_json(
msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.')
if int(suppress_interval) > 65535 or int(suppress_interval) < 1:
module.fail_json(
msg='Error: The suppress_interval %s is out of [1 - 65535].' % suppress_interval)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<suppressInterval></suppressInterval>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<suppressInterval>(.*)</suppressInterval>.*', recv_xml)
if re_find:
result["suppress_interval"] = re_find
if re_find[0] != suppress_interval:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<suppressInterval>(.*)</suppressInterval>.*', recv_xml)
if re_find:
result["suppress_interval"] = re_find
if re_find[0] == suppress_interval:
need_cfg = True
if hold_interval:
if not suppress_interval or not clear_interval:
module.fail_json(
msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.')
if int(hold_interval) > 65535 or int(hold_interval) < 1:
module.fail_json(
msg='Error: The hold_interval %s is out of [1 - 65535].' % hold_interval)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<holdInterval></holdInterval>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<holdInterval>(.*)</holdInterval>.*', recv_xml)
if re_find:
result["hold_interval"] = re_find
if re_find[0] != hold_interval:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<holdInterval>(.*)</holdInterval>.*', recv_xml)
if re_find:
result["hold_interval"] = re_find
if re_find[0] == hold_interval:
need_cfg = True
if clear_interval:
if not suppress_interval or not hold_interval:
module.fail_json(
msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.')
if int(clear_interval) > 65535 or int(clear_interval) < 1:
module.fail_json(
msg='Error: The clear_interval %s is out of [1 - 65535].' % clear_interval)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<clearInterval></clearInterval>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<clearInterval>(.*)</clearInterval>.*', recv_xml)
if re_find:
result["clear_interval"] = re_find
if re_find[0] != clear_interval:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<clearInterval>(.*)</clearInterval>.*', recv_xml)
if re_find:
result["clear_interval"] = re_find
if re_find[0] == clear_interval:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_bgp_instance_args(self, **kwargs):
""" check_bgp_instance_args """
module = kwargs["module"]
state = module.params['state']
need_cfg = False
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='the len of vrf_name %s is out of [1 - 31].' % vrf_name)
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<vrfName></vrfName>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
check_vrf_name = (vrf_name)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vrfName>(.*)</vrfName>.*', recv_xml)
if re_find:
if check_vrf_name not in re_find:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<vrfName>(.*)</vrfName>.*', recv_xml)
if re_find:
if check_vrf_name in re_find:
need_cfg = True
return need_cfg
def check_bgp_instance_other_args(self, **kwargs):
""" check_bgp_instance_other_args """
module = kwargs["module"]
state = module.params['state']
result = dict()
need_cfg = False
vrf_name = module.params['vrf_name']
router_id = module.params['router_id']
if router_id:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if check_ip_addr(ipaddr=router_id) == FAILED:
module.fail_json(
msg='Error: The router_id %s is invalid.' % router_id)
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<routerId></routerId>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routerId>(.*)</routerId>.*', recv_xml)
if re_find:
result["router_id"] = re_find
if re_find[0] != router_id:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<routerId>(.*)</routerId>.*', recv_xml)
if re_find:
result["router_id"] = re_find
if re_find[0] == router_id:
need_cfg = True
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
if vrf_rid_auto_sel != 'no_use':
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<vrfRidAutoSel></vrfRidAutoSel>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vrfRidAutoSel>(.*)</vrfRidAutoSel>.*', recv_xml)
if re_find:
result["vrf_rid_auto_sel"] = re_find
if re_find[0] != vrf_rid_auto_sel:
need_cfg = True
else:
need_cfg = True
keepalive_time = module.params['keepalive_time']
if keepalive_time:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if int(keepalive_time) > 21845 or int(keepalive_time) < 0:
module.fail_json(
msg='keepalive_time %s is out of [0 - 21845].' % keepalive_time)
else:
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<keepaliveTime></keepaliveTime>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepaliveTime>(.*)</keepaliveTime>.*', recv_xml)
if re_find:
result["keepalive_time"] = re_find
if re_find[0] != keepalive_time:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<keepaliveTime>(.*)</keepaliveTime>.*', recv_xml)
if re_find:
result["keepalive_time"] = re_find
if re_find[0] == keepalive_time:
need_cfg = True
hold_time = module.params['hold_time']
if hold_time:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if int(hold_time) > 65535 or int(hold_time) < 3:
module.fail_json(
msg='hold_time %s is out of [3 - 65535].' % hold_time)
else:
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<holdTime></holdTime>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<holdTime>(.*)</holdTime>.*', recv_xml)
if re_find:
result["hold_time"] = re_find
if re_find[0] != hold_time:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<holdTime>(.*)</holdTime>.*', recv_xml)
if re_find:
result["hold_time"] = re_find
if re_find[0] == hold_time:
need_cfg = True
min_hold_time = module.params['min_hold_time']
if min_hold_time:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if int(min_hold_time) != 0 and (int(min_hold_time) > 65535 or int(min_hold_time) < 20):
module.fail_json(
msg='min_hold_time %s is out of [0, or 20 - 65535].' % min_hold_time)
else:
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<minHoldTime></minHoldTime>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<minHoldTime>(.*)</minHoldTime>.*', recv_xml)
if re_find:
result["min_hold_time"] = re_find
if re_find[0] != min_hold_time:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<minHoldTime>(.*)</minHoldTime>.*', recv_xml)
if re_find:
result["min_hold_time"] = re_find
if re_find[0] == min_hold_time:
need_cfg = True
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if int(conn_retry_time) > 65535 or int(conn_retry_time) < 1:
module.fail_json(
msg='conn_retry_time %s is out of [1 - 65535].' % conn_retry_time)
else:
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<connRetryTime></connRetryTime>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<connRetryTime>(.*)</connRetryTime>.*', recv_xml)
if re_find:
result["conn_retry_time"] = re_find
if re_find[0] != conn_retry_time:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<connRetryTime>(.*)</connRetryTime>.*', recv_xml)
if re_find:
result["conn_retry_time"] = re_find
if re_find[0] == conn_retry_time:
need_cfg = True
else:
pass
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
if ebgp_if_sensitive != 'no_use':
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<ebgpIfSensitive></ebgpIfSensitive>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<ebgpIfSensitive>(.*)</ebgpIfSensitive>.*', recv_xml)
if re_find:
result["ebgp_if_sensitive"] = re_find
if re_find[0] != ebgp_if_sensitive:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<ebgpIfSensitive>(.*)</ebgpIfSensitive>.*', recv_xml)
if re_find:
result["ebgp_if_sensitive"] = re_find
if re_find[0] == ebgp_if_sensitive:
need_cfg = True
else:
pass
default_af_type = module.params['default_af_type']
if default_af_type:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<defaultAfType></defaultAfType>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultAfType>(.*)</defaultAfType>.*', recv_xml)
if re_find:
result["default_af_type"] = re_find
if re_find[0] != default_af_type:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<defaultAfType>(.*)</defaultAfType>.*', recv_xml)
if re_find:
result["default_af_type"] = re_find
if re_find[0] == default_af_type:
need_cfg = True
else:
pass
result["need_cfg"] = need_cfg
return result
def get_bgp_enable(self, **kwargs):
""" get_bgp_enable """
module = kwargs["module"]
conf_str = CE_GET_BGP_ENABLE
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<asNumber>(.*)</asNumber>.*\s*<bgpEnable>(.*)</bgpEnable>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_bgp_enable(self, **kwargs):
""" merge_bgp_enable """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_ENABLE_HEADER
state = module.params['state']
if state == "present":
conf_str += "<bgpEnable>true</bgpEnable>"
else:
conf_str += "<bgpEnable>false</bgpEnable>"
as_number = module.params['as_number']
if as_number:
conf_str += "<asNumber>%s</asNumber>" % as_number
conf_str += CE_MERGE_BGP_ENABLE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp enable failed.')
cmds = []
if state == "present":
cmd = "bgp %s" % as_number
else:
cmd = "undo bgp %s" % as_number
cmds.append(cmd)
return cmds
def merge_bgp_enable_other(self, **kwargs):
""" merge_bgp_enable_other """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_ENABLE_HEADER
cmds = []
graceful_restart = module.params['graceful_restart']
if graceful_restart != 'no_use':
conf_str += "<gracefulRestart>%s</gracefulRestart>" % graceful_restart
if graceful_restart == "true":
cmd = "graceful-restart"
else:
cmd = "undo graceful-restart"
cmds.append(cmd)
time_wait_for_rib = module.params['time_wait_for_rib']
if time_wait_for_rib:
conf_str += "<timeWaitForRib>%s</timeWaitForRib>" % time_wait_for_rib
cmd = "graceful-restart timer wait-for-rib %s" % time_wait_for_rib
cmds.append(cmd)
as_path_limit = module.params['as_path_limit']
if as_path_limit:
conf_str += "<asPathLimit>%s</asPathLimit>" % as_path_limit
cmd = "as-path-limit %s" % as_path_limit
cmds.append(cmd)
check_first_as = module.params['check_first_as']
if check_first_as != 'no_use':
conf_str += "<checkFirstAs>%s</checkFirstAs>" % check_first_as
if check_first_as == "true":
cmd = "check-first-as"
else:
cmd = "undo check-first-as"
cmds.append(cmd)
confed_id_number = module.params['confed_id_number']
if confed_id_number:
conf_str += "<confedIdNumber>%s</confedIdNumber>" % confed_id_number
cmd = "confederation id %s" % confed_id_number
cmds.append(cmd)
confed_nonstanded = module.params['confed_nonstanded']
if confed_nonstanded != 'no_use':
conf_str += "<confedNonstanded>%s</confedNonstanded>" % confed_nonstanded
if confed_nonstanded == "true":
cmd = "confederation nonstandard"
else:
cmd = "undo confederation nonstandard"
cmds.append(cmd)
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
if bgp_rid_auto_sel != 'no_use':
conf_str += "<bgpRidAutoSel>%s</bgpRidAutoSel>" % bgp_rid_auto_sel
if bgp_rid_auto_sel == "true":
cmd = "router-id vpn-instance auto-select"
else:
cmd = "undo router-id"
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str += "<keepAllRoutes>%s</keepAllRoutes>" % keep_all_routes
if keep_all_routes == "true":
cmd = "keep-all-routes"
else:
cmd = "undo keep-all-routes"
cmds.append(cmd)
memory_limit = module.params['memory_limit']
if memory_limit != 'no_use':
conf_str += "<memoryLimit>%s</memoryLimit>" % memory_limit
if memory_limit == "true":
cmd = "prefix memory-limit"
else:
cmd = "undo prefix memory-limit"
cmds.append(cmd)
gr_peer_reset = module.params['gr_peer_reset']
if gr_peer_reset != 'no_use':
conf_str += "<grPeerReset>%s</grPeerReset>" % gr_peer_reset
if gr_peer_reset == "true":
cmd = "graceful-restart peer-reset"
else:
cmd = "undo graceful-restart peer-reset"
cmds.append(cmd)
is_shutdown = module.params['is_shutdown']
if is_shutdown != 'no_use':
conf_str += "<isShutdown>%s</isShutdown>" % is_shutdown
if is_shutdown == "true":
cmd = "shutdown"
else:
cmd = "undo shutdown"
cmds.append(cmd)
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
if suppress_interval:
conf_str += "<suppressInterval>%s</suppressInterval>" % suppress_interval
cmd = "nexthop recursive-lookup restrain suppress-interval %s hold-interval %s " \
"clear-interval %s" % (suppress_interval, hold_interval, clear_interval)
cmds.append(cmd)
if hold_interval:
conf_str += "<holdInterval>%s</holdInterval>" % hold_interval
if clear_interval:
conf_str += "<clearInterval>%s</clearInterval>" % clear_interval
conf_str += CE_MERGE_BGP_ENABLE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp enable failed.')
return cmds
def delete_bgp_enable_other(self, **kwargs):
""" delete bgp enable other args """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_ENABLE_HEADER
cmds = []
graceful_restart = module.params['graceful_restart']
if graceful_restart != 'no_use':
conf_str += "<gracefulRestart>%s</gracefulRestart>" % graceful_restart
if graceful_restart == "true":
cmd = "graceful-restart"
else:
cmd = "undo graceful-restart"
cmds.append(cmd)
time_wait_for_rib = module.params['time_wait_for_rib']
if time_wait_for_rib:
conf_str += "<timeWaitForRib>600</timeWaitForRib>"
cmd = "undo graceful-restart timer wait-for-rib"
cmds.append(cmd)
as_path_limit = module.params['as_path_limit']
if as_path_limit:
conf_str += "<asPathLimit>255</asPathLimit>"
cmd = "undo as-path-limit"
cmds.append(cmd)
check_first_as = module.params['check_first_as']
if check_first_as != 'no_use':
conf_str += "<checkFirstAs>%s</checkFirstAs>" % check_first_as
if check_first_as == "true":
cmd = "check-first-as"
else:
cmd = "undo check-first-as"
cmds.append(cmd)
confed_id_number = module.params['confed_id_number']
if confed_id_number:
conf_str += "<confedIdNumber></confedIdNumber>"
cmd = "undo confederation id"
cmds.append(cmd)
confed_nonstanded = module.params['confed_nonstanded']
if confed_nonstanded != 'no_use':
conf_str += "<confedNonstanded>%s</confedNonstanded>" % confed_nonstanded
if confed_nonstanded == "true":
cmd = "confederation nonstandard"
else:
cmd = "undo confederation nonstandard"
cmds.append(cmd)
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
if bgp_rid_auto_sel != 'no_use':
conf_str += "<bgpRidAutoSel>%s</bgpRidAutoSel>" % bgp_rid_auto_sel
if bgp_rid_auto_sel == "true":
cmd = "router-id vpn-instance auto-select"
else:
cmd = "undo router-id"
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str += "<keepAllRoutes>%s</keepAllRoutes>" % keep_all_routes
if keep_all_routes == "true":
cmd = "keep-all-routes"
else:
cmd = "undo keep-all-routes"
cmds.append(cmd)
memory_limit = module.params['memory_limit']
if memory_limit != 'no_use':
conf_str += "<memoryLimit>%s</memoryLimit>" % memory_limit
if memory_limit == "true":
cmd = "prefix memory-limit"
else:
cmd = "undo prefix memory-limit"
cmds.append(cmd)
gr_peer_reset = module.params['gr_peer_reset']
if gr_peer_reset != 'no_use':
conf_str += "<grPeerReset>%s</grPeerReset>" % gr_peer_reset
if gr_peer_reset == "true":
cmd = "graceful-restart peer-reset"
else:
cmd = "undo graceful-restart peer-reset"
cmds.append(cmd)
is_shutdown = module.params['is_shutdown']
if is_shutdown != 'no_use':
conf_str += "<isShutdown>%s</isShutdown>" % is_shutdown
if is_shutdown == "true":
cmd = "shutdown"
else:
cmd = "undo shutdown"
cmds.append(cmd)
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
if suppress_interval:
conf_str += "<suppressInterval>60</suppressInterval>"
cmd = "nexthop recursive-lookup restrain suppress-interval %s hold-interval %s " \
"clear-interval %s" % (suppress_interval, hold_interval, clear_interval)
cmds.append(cmd)
if hold_interval:
conf_str += "<holdInterval>120</holdInterval>"
if clear_interval:
conf_str += "<clearInterval>600</clearInterval>"
conf_str += CE_MERGE_BGP_ENABLE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp enable failed.')
return cmds
def get_bgp_confed_peer_as(self, **kwargs):
""" get_bgp_confed_peer_as """
module = kwargs["module"]
conf_str = CE_GET_BGP_CONFED_PEER_AS
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<confedPeerAsNum>(.*)</confedPeerAsNum>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_bgp_confed_peer_as(self, **kwargs):
""" merge_bgp_confed_peer_as """
module = kwargs["module"]
confed_peer_as_num = module.params['confed_peer_as_num']
conf_str = CE_MERGE_BGP_CONFED_PEER_AS % confed_peer_as_num
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp confed peer as failed.')
cmds = []
cmd = "confederation peer-as %s" % confed_peer_as_num
cmds.append(cmd)
return cmds
def create_bgp_confed_peer_as(self, **kwargs):
""" create_bgp_confed_peer_as """
module = kwargs["module"]
confed_peer_as_num = module.params['confed_peer_as_num']
conf_str = CE_CREATE_BGP_CONFED_PEER_AS % confed_peer_as_num
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp confed peer as failed.')
cmds = []
cmd = "confederation peer-as %s" % confed_peer_as_num
cmds.append(cmd)
return cmds
def delete_bgp_confed_peer_as(self, **kwargs):
""" delete_bgp_confed_peer_as """
module = kwargs["module"]
confed_peer_as_num = module.params['confed_peer_as_num']
conf_str = CE_DELETE_BGP_CONFED_PEER_AS % confed_peer_as_num
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp confed peer as failed.')
cmds = []
cmd = "undo confederation peer-as %s" % confed_peer_as_num
cmds.append(cmd)
return cmds
def get_bgp_instance(self, **kwargs):
""" get_bgp_instance """
module = kwargs["module"]
conf_str = CE_GET_BGP_INSTANCE
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<vrfName>(.*)</vrfName>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_bgp_instance(self, **kwargs):
""" merge_bgp_instance """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
conf_str += "<vrfName>%s</vrfName>" % vrf_name
conf_str += CE_MERGE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp instance failed.')
def create_bgp_instance(self, **kwargs):
""" create_bgp_instance """
module = kwargs["module"]
conf_str = CE_CREATE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
if vrf_name:
conf_str += "<vrfName>%s</vrfName>" % vrf_name
conf_str += CE_CREATE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp instance failed.')
cmds = []
if vrf_name != "_public_":
cmd = "ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
return cmds
def delete_bgp_instance(self, **kwargs):
""" delete_bgp_instance """
module = kwargs["module"]
conf_str = CE_DELETE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
if vrf_name:
conf_str += "<vrfName>%s</vrfName>" % vrf_name
conf_str += CE_DELETE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp instance failed.')
cmds = []
if vrf_name != "_public_":
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
return cmds
def merge_bgp_instance_other(self, **kwargs):
""" merge_bgp_instance_other """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
conf_str += "<vrfName>%s</vrfName>" % vrf_name
cmds = []
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
if vrf_rid_auto_sel != 'no_use':
conf_str += "<vrfRidAutoSel>%s</vrfRidAutoSel>" % vrf_rid_auto_sel
if vrf_rid_auto_sel == "true":
cmd = "router-id vpn-instance auto-select"
else:
cmd = "undo router-id vpn-instance auto-select"
cmds.append(cmd)
router_id = module.params['router_id']
if router_id:
conf_str += "<routerId>%s</routerId>" % router_id
cmd = "router-id %s" % router_id
cmds.append(cmd)
keepalive_time = module.params['keepalive_time']
if keepalive_time:
conf_str += "<keepaliveTime>%s</keepaliveTime>" % keepalive_time
cmd = "timer keepalive %s" % keepalive_time
cmds.append(cmd)
hold_time = module.params['hold_time']
if hold_time:
conf_str += "<holdTime>%s</holdTime>" % hold_time
cmd = "timer hold %s" % hold_time
cmds.append(cmd)
min_hold_time = module.params['min_hold_time']
if min_hold_time:
conf_str += "<minHoldTime>%s</minHoldTime>" % min_hold_time
cmd = "timer min-holdtime %s" % min_hold_time
cmds.append(cmd)
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
conf_str += "<connRetryTime>%s</connRetryTime>" % conn_retry_time
cmd = "timer connect-retry %s" % conn_retry_time
cmds.append(cmd)
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
if ebgp_if_sensitive != 'no_use':
conf_str += "<ebgpIfSensitive>%s</ebgpIfSensitive>" % ebgp_if_sensitive
if ebgp_if_sensitive == "true":
cmd = "ebgp-interface-sensitive"
else:
cmd = "undo ebgp-interface-sensitive"
cmds.append(cmd)
default_af_type = module.params['default_af_type']
if default_af_type:
conf_str += "<defaultAfType>%s</defaultAfType>" % default_af_type
if vrf_name != "_public_":
if default_af_type == "ipv6uni":
cmd = "ipv6-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
cmd = "ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
if vrf_name != "_public_":
cmd = "ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
conf_str += CE_MERGE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp instance other failed.')
return cmds
def delete_bgp_instance_other_comm(self, **kwargs):
""" delete_bgp_instance_other_comm """
module = kwargs["module"]
conf_str = CE_DELETE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
conf_str += "<vrfName>%s</vrfName>" % vrf_name
cmds = []
router_id = module.params['router_id']
if router_id:
conf_str += "<routerId>%s</routerId>" % router_id
cmd = "undo router-id"
cmds.append(cmd)
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
if vrf_rid_auto_sel != 'no_use':
conf_str += "<vrfRidAutoSel>%s</vrfRidAutoSel>" % vrf_rid_auto_sel
cmd = "undo router-id vpn-instance auto-select"
cmds.append(cmd)
keepalive_time = module.params['keepalive_time']
if keepalive_time:
conf_str += "<keepaliveTime>%s</keepaliveTime>" % keepalive_time
cmd = "undo timer keepalive"
cmds.append(cmd)
hold_time = module.params['hold_time']
if hold_time:
conf_str += "<holdTime>%s</holdTime>" % hold_time
cmd = "undo timer hold"
cmds.append(cmd)
min_hold_time = module.params['min_hold_time']
if min_hold_time:
conf_str += "<minHoldTime>%s</minHoldTime>" % min_hold_time
cmd = "undo timer min-holdtime"
cmds.append(cmd)
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
conf_str += "<connRetryTime>%s</connRetryTime>" % conn_retry_time
cmd = "undo timer connect-retry"
cmds.append(cmd)
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
if ebgp_if_sensitive != 'no_use':
conf_str += "<ebgpIfSensitive>%s</ebgpIfSensitive>" % ebgp_if_sensitive
cmd = "undo ebgp-interface-sensitive"
cmds.append(cmd)
default_af_type = module.params['default_af_type']
if default_af_type:
conf_str += "<defaultAfType>%s</defaultAfType>" % default_af_type
if vrf_name != "_public_":
if default_af_type == "ipv6uni":
cmd = "undo ipv6-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
if vrf_name != "_public_":
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
conf_str += CE_DELETE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Delete common vpn bgp instance other args failed.')
return cmds
def delete_instance_other_public(self, **kwargs):
""" delete_instance_other_public """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
conf_str += "<vrfName>%s</vrfName>" % vrf_name
cmds = []
router_id = module.params['router_id']
if router_id:
conf_str += "<routerId></routerId>"
cmd = "undo router-id"
cmds.append(cmd)
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
if vrf_rid_auto_sel != 'no_use':
conf_str += "<vrfRidAutoSel>%s</vrfRidAutoSel>" % vrf_rid_auto_sel
cmd = "undo router-id vpn-instance auto-select"
cmds.append(cmd)
keepalive_time = module.params['keepalive_time']
if keepalive_time:
conf_str += "<keepaliveTime>%s</keepaliveTime>" % "60"
cmd = "undo timer keepalive"
cmds.append(cmd)
hold_time = module.params['hold_time']
if hold_time:
conf_str += "<holdTime>%s</holdTime>" % "180"
cmd = "undo timer hold"
cmds.append(cmd)
min_hold_time = module.params['min_hold_time']
if min_hold_time:
conf_str += "<minHoldTime>%s</minHoldTime>" % "0"
cmd = "undo timer min-holdtime"
cmds.append(cmd)
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
conf_str += "<connRetryTime>%s</connRetryTime>" % "32"
cmd = "undo timer connect-retry"
cmds.append(cmd)
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
if ebgp_if_sensitive != 'no_use':
conf_str += "<ebgpIfSensitive>%s</ebgpIfSensitive>" % "true"
cmd = "undo ebgp-interface-sensitive"
cmds.append(cmd)
default_af_type = module.params['default_af_type']
if default_af_type:
conf_str += "<defaultAfType>%s</defaultAfType>" % "ipv4uni"
if vrf_name != "_public_":
if default_af_type == "ipv6uni":
cmd = "undo ipv6-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
if vrf_name != "_public_":
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
conf_str += CE_MERGE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Delete default vpn bgp instance other args failed.')
return cmds
def main():
""" main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
as_number=dict(type='str'),
graceful_restart=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
time_wait_for_rib=dict(type='str'),
as_path_limit=dict(type='str'),
check_first_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
confed_id_number=dict(type='str'),
confed_nonstanded=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
bgp_rid_auto_sel=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
keep_all_routes=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
memory_limit=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
gr_peer_reset=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
is_shutdown=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
suppress_interval=dict(type='str'),
hold_interval=dict(type='str'),
clear_interval=dict(type='str'),
confed_peer_as_num=dict(type='str'),
vrf_name=dict(type='str'),
vrf_rid_auto_sel=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
router_id=dict(type='str'),
keepalive_time=dict(type='str'),
hold_time=dict(type='str'),
min_hold_time=dict(type='str'),
conn_retry_time=dict(type='str'),
ebgp_if_sensitive=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
default_af_type=dict(type='str', choices=['ipv4uni', 'ipv6uni'])
)
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
as_number = module.params['as_number']
graceful_restart = module.params['graceful_restart']
time_wait_for_rib = module.params['time_wait_for_rib']
as_path_limit = module.params['as_path_limit']
check_first_as = module.params['check_first_as']
confed_id_number = module.params['confed_id_number']
confed_nonstanded = module.params['confed_nonstanded']
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
keep_all_routes = module.params['keep_all_routes']
memory_limit = module.params['memory_limit']
gr_peer_reset = module.params['gr_peer_reset']
is_shutdown = module.params['is_shutdown']
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
confed_peer_as_num = module.params['confed_peer_as_num']
router_id = module.params['router_id']
vrf_name = module.params['vrf_name']
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
keepalive_time = module.params['keepalive_time']
hold_time = module.params['hold_time']
min_hold_time = module.params['min_hold_time']
conn_retry_time = module.params['conn_retry_time']
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
default_af_type = module.params['default_af_type']
ce_bgp_obj = Bgp()
if not ce_bgp_obj:
module.fail_json(msg='Error: Init module failed.')
# get proposed
proposed["state"] = state
if as_number:
proposed["as_number"] = as_number
if graceful_restart != 'no_use':
proposed["graceful_restart"] = graceful_restart
if time_wait_for_rib:
proposed["time_wait_for_rib"] = time_wait_for_rib
if as_path_limit:
proposed["as_path_limit"] = as_path_limit
if check_first_as != 'no_use':
proposed["check_first_as"] = check_first_as
if confed_id_number:
proposed["confed_id_number"] = confed_id_number
if confed_nonstanded != 'no_use':
proposed["confed_nonstanded"] = confed_nonstanded
if bgp_rid_auto_sel != 'no_use':
proposed["bgp_rid_auto_sel"] = bgp_rid_auto_sel
if keep_all_routes != 'no_use':
proposed["keep_all_routes"] = keep_all_routes
if memory_limit != 'no_use':
proposed["memory_limit"] = memory_limit
if gr_peer_reset != 'no_use':
proposed["gr_peer_reset"] = gr_peer_reset
if is_shutdown != 'no_use':
proposed["is_shutdown"] = is_shutdown
if suppress_interval:
proposed["suppress_interval"] = suppress_interval
if hold_interval:
proposed["hold_interval"] = hold_interval
if clear_interval:
proposed["clear_interval"] = clear_interval
if confed_peer_as_num:
proposed["confed_peer_as_num"] = confed_peer_as_num
if router_id:
proposed["router_id"] = router_id
if vrf_name:
proposed["vrf_name"] = vrf_name
if vrf_rid_auto_sel != 'no_use':
proposed["vrf_rid_auto_sel"] = vrf_rid_auto_sel
if keepalive_time:
proposed["keepalive_time"] = keepalive_time
if hold_time:
proposed["hold_time"] = hold_time
if min_hold_time:
proposed["min_hold_time"] = min_hold_time
if conn_retry_time:
proposed["conn_retry_time"] = conn_retry_time
if ebgp_if_sensitive != 'no_use':
proposed["ebgp_if_sensitive"] = ebgp_if_sensitive
if default_af_type:
proposed["default_af_type"] = default_af_type
need_bgp_enable = check_bgp_enable_args(module=module)
need_bgp_enable_other_rst = ce_bgp_obj.check_bgp_enable_other_args(
module=module)
need_bgp_confed = check_bgp_confed_args(module=module)
need_bgp_instance = ce_bgp_obj.check_bgp_instance_args(module=module)
need_bgp_instance_other_rst = ce_bgp_obj.check_bgp_instance_other_args(
module=module)
# bgp enable/disable
if need_bgp_enable:
bgp_enable_exist = ce_bgp_obj.get_bgp_enable(module=module)
existing["bgp enable"] = bgp_enable_exist
asnumber_exist = bgp_enable_exist[0][0]
bgpenable_exist = bgp_enable_exist[0][1]
if state == "present":
bgp_enable_new = (as_number, "true")
if bgp_enable_new in bgp_enable_exist:
pass
elif bgpenable_exist == "true" and asnumber_exist != as_number:
module.fail_json(
msg='Error: BGP is already running. The AS is %s.' % asnumber_exist)
else:
cmd = ce_bgp_obj.merge_bgp_enable(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if need_bgp_enable_other_rst["need_cfg"] or need_bgp_confed or need_bgp_instance_other_rst["need_cfg"]:
pass
elif bgpenable_exist == "false":
pass
elif bgpenable_exist == "true" and asnumber_exist == as_number:
cmd = ce_bgp_obj.merge_bgp_enable(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
module.fail_json(
msg='Error: BGP is already running. The AS is %s.' % asnumber_exist)
bgp_enable_end = ce_bgp_obj.get_bgp_enable(module=module)
end_state["bgp enable"] = bgp_enable_end
# bgp enable/disable other args
exist_tmp = dict()
for item in need_bgp_enable_other_rst:
if item != "need_cfg":
exist_tmp[item] = need_bgp_enable_other_rst[item]
if exist_tmp:
existing["bgp enable other"] = exist_tmp
if need_bgp_enable_other_rst["need_cfg"]:
if state == "present":
cmd = ce_bgp_obj.merge_bgp_enable_other(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
cmd = ce_bgp_obj.delete_bgp_enable_other(module=module)
changed = True
for item in cmd:
updates.append(item)
need_bgp_enable_other_rst = ce_bgp_obj.check_bgp_enable_other_args(
module=module)
end_tmp = dict()
for item in need_bgp_enable_other_rst:
if item != "need_cfg":
end_tmp[item] = need_bgp_enable_other_rst[item]
if end_tmp:
end_state["bgp enable other"] = end_tmp
# bgp confederation peer as
if need_bgp_confed:
confed_exist = ce_bgp_obj.get_bgp_confed_peer_as(module=module)
existing["confederation peer as"] = confed_exist
confed_new = (confed_peer_as_num)
if state == "present":
if len(confed_exist) == 0:
cmd = ce_bgp_obj.create_bgp_confed_peer_as(module=module)
changed = True
for item in cmd:
updates.append(item)
elif confed_new not in confed_exist:
cmd = ce_bgp_obj.merge_bgp_confed_peer_as(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if len(confed_exist) == 0:
pass
elif confed_new not in confed_exist:
pass
else:
cmd = ce_bgp_obj.delete_bgp_confed_peer_as(module=module)
changed = True
for item in cmd:
updates.append(item)
confed_end = ce_bgp_obj.get_bgp_confed_peer_as(module=module)
end_state["confederation peer as"] = confed_end
# bgp instance
router_id_exist = ce_bgp_obj.get_bgp_instance(module=module)
existing["bgp instance"] = router_id_exist
if need_bgp_instance:
router_id_new = (vrf_name)
if state == "present":
if len(router_id_exist) == 0:
cmd = ce_bgp_obj.create_bgp_instance(module=module)
changed = True
updates.append(cmd)
elif router_id_new not in router_id_exist:
ce_bgp_obj.merge_bgp_instance(module=module)
changed = True
else:
if not need_bgp_instance_other_rst["need_cfg"]:
if vrf_name != "_public_":
if len(router_id_exist) == 0:
pass
elif router_id_new not in router_id_exist:
pass
else:
cmd = ce_bgp_obj.delete_bgp_instance(module=module)
changed = True
for item in cmd:
updates.append(item)
router_id_end = ce_bgp_obj.get_bgp_instance(module=module)
end_state["bgp instance"] = router_id_end
# bgp instance other
exist_tmp = dict()
for item in need_bgp_instance_other_rst:
if item != "need_cfg":
exist_tmp[item] = need_bgp_instance_other_rst[item]
if exist_tmp:
existing["bgp instance other"] = exist_tmp
if need_bgp_instance_other_rst["need_cfg"]:
if state == "present":
cmd = ce_bgp_obj.merge_bgp_instance_other(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if vrf_name == "_public_":
cmd = ce_bgp_obj.delete_instance_other_public(
module=module)
changed = True
for item in cmd:
updates.append(item)
else:
cmd = ce_bgp_obj.delete_bgp_instance_other_comm(module=module)
changed = True
for item in cmd:
updates.append(item)
need_bgp_instance_other_rst = ce_bgp_obj.check_bgp_instance_other_args(
module=module)
end_tmp = dict()
for item in need_bgp_instance_other_rst:
if item != "need_cfg":
end_tmp[item] = need_bgp_instance_other_rst[item]
if end_tmp:
end_state["bgp instance other"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
orlov-vo/mtasa | vendor/google-breakpad/src/tools/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| gpl-3.0 |
endlessm/chromium-browser | third_party/catapult/third_party/gsutil/gslib/vendored/boto/tests/integration/logs/test_layer1.py | 114 | 1814 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from tests.compat import unittest
class TestCloudWatchLogs(unittest.TestCase):
def setUp(self):
self.logs = boto.connect_logs()
def test_logs(self):
logs = self.logs
response = logs.describe_log_groups(log_group_name_prefix='test')
self.assertIsInstance(response['logGroups'], list)
mfilter = '[ip, id, user, ..., status_code=500, size]'
sample = [
'127.0.0.1 - frank "GET /apache_pb.gif HTTP/1.0" 200 1534',
'127.0.0.1 - frank "GET /apache_pb.gif HTTP/1.0" 500 5324',
]
response = logs.test_metric_filter(mfilter, sample)
self.assertEqual(len(response['matches']), 1)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.