repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
chengduoZH/Paddle | python/paddle/fluid/tests/unittests/test_squeeze2_op.py | 2 | 2134 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
# Correct: General.
class TestSqueezeOp(OpTest):
def setUp(self):
self.op_type = "squeeze2"
self.init_test_case()
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.ori_shape).astype("float32")
}
def test_check_output(self):
self.check_output(no_check_set=['XShape'])
def test_check_grad(self):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (1, 3, 1, 5)
self.axes = (0, 2)
self.new_shape = (3, 5)
def init_attrs(self):
self.attrs = {"axes": self.axes}
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 3, 1, 5)
self.axes = (0, -2)
self.new_shape = (3, 5)
# Correct: No axes input.
class TestSqueezeOp2(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 3, 1, 5)
self.axes = ()
self.new_shape = (3, 5)
# Correct: Just part of axes be squeezed.
class TestSqueezeOp3(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (3, 1, 5, 1, 4, 1)
self.axes = (1, -1)
self.new_shape = (3, 5, 1, 4)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_bgp_service_communities_operations.py | 1 | 5134 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BgpServiceCommunitiesOperations(object):
"""BgpServiceCommunitiesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BgpServiceCommunityListResult"]
"""Gets all the available bgp service communities.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BgpServiceCommunityListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.BgpServiceCommunityListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BgpServiceCommunityListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('BgpServiceCommunityListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/bgpServiceCommunities'} # type: ignore
| mit |
Qubad786/pr-code-review | gitcodereview/settings.py | 1 | 3117 | """
Django settings for gitcodereview project.
"""
import os
from os.path import abspath, dirname, join
import dj_database_url
from django.core.urlresolvers import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', True)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
'web.pullrequest',
'web.user',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gitcodereview.urls'
# Custom user model
AUTH_USER_MODEL = "user.User"
AUTHENTICATION_BACKENDS = [
'web.user.auth_backend.UserAuthenticationBackend',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gitcodereview.wsgi.application'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
# Login URL
LOGIN_URL = reverse_lazy('index')
# Login Redirect URL
LOGIN_REDIRECT_URL = reverse_lazy('dashboard')
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Secret key used in production secret.
SECRET_KEY = os.environ.get('SECRET_KEY', 'secret_key')
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# Github Oauth settings
OAUTH_SETTINGS = {
'CLIENT_ID': os.environ.get('CLIENT_ID', None),
'CLIENT_SECRET': os.environ.get('CLIENT_SECRET', None),
'BASE_URL': os.environ.get('BASE_URL', None),
'ACCESS_TOKEN_URL': os.environ.get('ACCESS_TOKEN_URL', None),
'REDIRECT_URL': os.environ.get('REDIRECT_URL', None),
}
# Use developer's overrides if environment variables are not set.
if os.path.isfile(join(dirname(abspath(__file__)), 'private.py')):
from private import *
| mit |
gao-feng/net | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
aabbox/kbengine | kbe/res/scripts/common/Lib/test/test_raise.py | 118 | 11344 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Tests for the raise statement."""
from test import support
import re
import sys
import types
import unittest
def get_tb():
try:
raise OSError()
except:
return sys.exc_info()[2]
class Context:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
return True
class TestRaise(unittest.TestCase):
def test_invalid_reraise(self):
try:
raise
except RuntimeError as e:
self.assertIn("No active exception", str(e))
else:
self.fail("No exception raised")
def test_reraise(self):
try:
try:
raise IndexError()
except IndexError as e:
exc1 = e
raise
except IndexError as exc2:
self.assertTrue(exc1 is exc2)
else:
self.fail("No exception raised")
def test_except_reraise(self):
def reraise():
try:
raise TypeError("foo")
except:
try:
raise KeyError("caught")
except KeyError:
pass
raise
self.assertRaises(TypeError, reraise)
def test_finally_reraise(self):
def reraise():
try:
raise TypeError("foo")
except:
try:
raise KeyError("caught")
finally:
raise
self.assertRaises(KeyError, reraise)
def test_nested_reraise(self):
def nested_reraise():
raise
def reraise():
try:
raise TypeError("foo")
except:
nested_reraise()
self.assertRaises(TypeError, reraise)
def test_raise_from_None(self):
try:
try:
raise TypeError("foo")
except:
raise ValueError() from None
except ValueError as e:
self.assertTrue(isinstance(e.__context__, TypeError))
self.assertIsNone(e.__cause__)
def test_with_reraise1(self):
def reraise():
try:
raise TypeError("foo")
except:
with Context():
pass
raise
self.assertRaises(TypeError, reraise)
def test_with_reraise2(self):
def reraise():
try:
raise TypeError("foo")
except:
with Context():
raise KeyError("caught")
raise
self.assertRaises(TypeError, reraise)
def test_yield_reraise(self):
def reraise():
try:
raise TypeError("foo")
except:
yield 1
raise
g = reraise()
next(g)
self.assertRaises(TypeError, lambda: next(g))
self.assertRaises(StopIteration, lambda: next(g))
def test_erroneous_exception(self):
class MyException(Exception):
def __init__(self):
raise RuntimeError()
try:
raise MyException
except RuntimeError:
pass
else:
self.fail("No exception raised")
def test_new_returns_invalid_instance(self):
# See issue #11627.
class MyException(Exception):
def __new__(cls, *args):
return object()
with self.assertRaises(TypeError):
raise MyException
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
except AssertionError as e:
self.assertEqual(str(e), "(3,)")
class TestCause(unittest.TestCase):
def testCauseSyntax(self):
try:
try:
try:
raise TypeError
except Exception:
raise ValueError from None
except ValueError as exc:
self.assertIsNone(exc.__cause__)
self.assertTrue(exc.__suppress_context__)
exc.__suppress_context__ = False
raise exc
except ValueError as exc:
e = exc
self.assertIsNone(e.__cause__)
self.assertFalse(e.__suppress_context__)
self.assertIsInstance(e.__context__, TypeError)
def test_invalid_cause(self):
try:
raise IndexError from 5
except TypeError as e:
self.assertIn("exception cause", str(e))
else:
self.fail("No exception raised")
def test_class_cause(self):
try:
raise IndexError from KeyError
except IndexError as e:
self.assertIsInstance(e.__cause__, KeyError)
else:
self.fail("No exception raised")
def test_instance_cause(self):
cause = KeyError()
try:
raise IndexError from cause
except IndexError as e:
self.assertTrue(e.__cause__ is cause)
else:
self.fail("No exception raised")
def test_erroneous_cause(self):
class MyException(Exception):
def __init__(self):
raise RuntimeError()
try:
raise IndexError from MyException
except RuntimeError:
pass
else:
self.fail("No exception raised")
class TestTraceback(unittest.TestCase):
def test_sets_traceback(self):
try:
raise IndexError()
except IndexError as e:
self.assertIsInstance(e.__traceback__, types.TracebackType)
else:
self.fail("No exception raised")
def test_accepts_traceback(self):
tb = get_tb()
try:
raise IndexError().with_traceback(tb)
except IndexError as e:
self.assertNotEqual(e.__traceback__, tb)
self.assertEqual(e.__traceback__.tb_next, tb)
else:
self.fail("No exception raised")
class TestContext(unittest.TestCase):
def test_instance_context_instance_raise(self):
context = IndexError()
try:
try:
raise context
except:
raise OSError()
except OSError as e:
self.assertEqual(e.__context__, context)
else:
self.fail("No exception raised")
def test_class_context_instance_raise(self):
context = IndexError
try:
try:
raise context
except:
raise OSError()
except OSError as e:
self.assertNotEqual(e.__context__, context)
self.assertIsInstance(e.__context__, context)
else:
self.fail("No exception raised")
def test_class_context_class_raise(self):
context = IndexError
try:
try:
raise context
except:
raise OSError
except OSError as e:
self.assertNotEqual(e.__context__, context)
self.assertIsInstance(e.__context__, context)
else:
self.fail("No exception raised")
def test_c_exception_context(self):
try:
try:
1/0
except:
raise OSError
except OSError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_c_exception_raise(self):
try:
try:
1/0
except:
xyzzy
except NameError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_noraise_finally(self):
try:
try:
pass
finally:
raise OSError
except OSError as e:
self.assertTrue(e.__context__ is None)
else:
self.fail("No exception raised")
def test_raise_finally(self):
try:
try:
1/0
finally:
raise OSError
except OSError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_context_manager(self):
class ContextManager:
def __enter__(self):
pass
def __exit__(self, t, v, tb):
xyzzy
try:
with ContextManager():
1/0
except NameError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_cycle_broken(self):
# Self-cycles (when re-raising a caught exception) are broken
try:
try:
1/0
except ZeroDivisionError as e:
raise e
except ZeroDivisionError as e:
self.assertTrue(e.__context__ is None, e.__context__)
def test_reraise_cycle_broken(self):
# Non-trivial context cycles (through re-raising a previous exception)
# are broken too.
try:
try:
xyzzy
except NameError as a:
try:
1/0
except ZeroDivisionError:
raise a
except NameError as e:
self.assertTrue(e.__context__.__context__ is None)
def test_3118(self):
# deleting the generator caused the __context__ to be cleared
def gen():
try:
yield 1
finally:
pass
def f():
g = gen()
next(g)
try:
try:
raise ValueError
except:
del g
raise KeyError
except Exception as e:
self.assertIsInstance(e.__context__, ValueError)
f()
def test_3611(self):
# A re-raised exception in a __del__ caused the __context__
# to be cleared
class C:
def __del__(self):
try:
1/0
except:
raise
def f():
x = C()
try:
try:
x.x
except AttributeError:
del x
raise TypeError
except Exception as e:
self.assertNotEqual(e.__context__, None)
self.assertIsInstance(e.__context__, AttributeError)
with support.captured_output("stderr"):
f()
class TestRemovedFunctionality(unittest.TestCase):
def test_tuples(self):
try:
raise (IndexError, KeyError) # This should be a tuple!
except TypeError:
pass
else:
self.fail("No exception raised")
def test_strings(self):
try:
raise "foo"
except TypeError:
pass
else:
self.fail("No exception raised")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
Lektorium-LLC/edx-platform | common/djangoapps/microsite_configuration/tests/test_logic.py | 24 | 1457 | """
Some additional unit tests for Microsite logic. The LMS covers some of the Microsite testing, this adds
some additional coverage
"""
import ddt
from mock import patch
from microsite_configuration.backends.base import BaseMicrositeBackend
from microsite_configuration.microsite import get_backend, get_value_for_org
from microsite_configuration.tests.tests import MICROSITE_BACKENDS, DatabaseMicrositeTestCase
@ddt.ddt
class TestMicrosites(DatabaseMicrositeTestCase):
"""
Run through some Microsite logic
"""
@ddt.data(*MICROSITE_BACKENDS)
def test_get_value_for_org_when_microsite_has_no_org(self, site_backend):
"""
Make sure default value is returned if there's no Microsite ORG match
"""
with patch('microsite_configuration.microsite.BACKEND',
get_backend(site_backend, BaseMicrositeBackend)):
value = get_value_for_org("BogusX", "university", "default_value")
self.assertEquals(value, "default_value")
@ddt.data(*MICROSITE_BACKENDS)
def test_get_value_for_org(self, site_backend):
"""
Make sure get_value_for_org return value of org if it present.
"""
with patch('microsite_configuration.microsite.BACKEND',
get_backend(site_backend, BaseMicrositeBackend)):
value = get_value_for_org("TestSiteX", "university", "default_value")
self.assertEquals(value, "test_site")
| agpl-3.0 |
openstack/monasca-api | monasca_api/common/repositories/metrics_repository.py | 2 | 2340 | # (C) Copyright 2014,2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AbstractMetricsRepository(object):
MULTIPLE_METRICS_MESSAGE = ("Found multiple metrics matching metric name" +
" and dimensions. Please refine your search" +
" criteria using a unique" +
" metric name or additional dimensions." +
" Alternatively, you may specify" +
" 'merge_metrics=True' as a query" +
" parameter to combine all metrics" +
" matching search criteria into a single" +
" series.")
@abc.abstractmethod
def list_metrics(self, tenant_id, region, name, dimensions, offset, limit):
pass
@abc.abstractmethod
def list_metric_names(self, tenant_id, region, dimensions):
pass
@abc.abstractmethod
def measurement_list(self, tenant_id, region, name, dimensions,
start_timestamp, end_timestamp, offset, limit,
merge_metrics_flag,
group_by):
pass
@abc.abstractmethod
def metrics_statistics(self, tenant_id, region, name, dimensions,
start_timestamp, end_timestamp, statistics,
period, offset, limit, merge_metrics_flag,
group_by):
pass
@abc.abstractmethod
def alarm_history(self, tenant_id, alarm_id_list,
offset, limit, start_timestamp, end_timestamp):
pass
@staticmethod
@abc.abstractmethod
def check_status():
pass
| apache-2.0 |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/urlpatterns_reverse/namespace_urls.py | 64 | 2555 | from django.conf.urls import include, url
from . import views
from .utils import URLObject
testobj1 = URLObject('testapp', 'test-ns1')
testobj2 = URLObject('testapp', 'test-ns2')
default_testobj = URLObject('testapp', 'testapp')
otherobj1 = URLObject('nodefault', 'other-ns1')
otherobj2 = URLObject('nodefault', 'other-ns2')
newappobj1 = URLObject('newapp')
urlpatterns = [
url(r'^normal/$', views.empty_view, name='normal-view'),
url(r'^normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='normal-view'),
url(r'^resolver_match/$', views.pass_resolver_match_view, name='test-resolver-match'),
url(r'^\+\\\$\*/$', views.empty_view, name='special-view'),
url(r'^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='mixed-args'),
url(r'^no_kwargs/([0-9]+)/([0-9]+)/$', views.empty_view, name='no-kwargs'),
url(r'^view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance, name='view-class'),
url(r'^unnamed/normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view),
url(r'^unnamed/view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance),
url(r'^test1/', include(testobj1.urls)),
url(r'^test2/', include(testobj2.urls)),
url(r'^default/', include(default_testobj.urls)),
url(r'^other1/', include(otherobj1.urls)),
url(r'^other[246]/', include(otherobj2.urls)),
url(r'^newapp1/', include(newappobj1.app_urls, 'new-ns1')),
url(r'^new-default/', include(newappobj1.app_urls)),
url(r'^app-included[135]/', include('urlpatterns_reverse.included_app_urls', namespace='app-ns1')),
url(r'^app-included2/', include('urlpatterns_reverse.included_app_urls', namespace='app-ns2')),
url(r'^ns-included[135]/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-ns1')),
url(r'^ns-included2/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-ns2')),
url(r'^app-included/', include('urlpatterns_reverse.included_namespace_urls', 'inc-app', 'inc-app')),
url(r'^included/', include('urlpatterns_reverse.included_namespace_urls')),
url(r'^inc(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_urls', namespace='inc-ns5')),
url(r'^included/([0-9]+)/', include('urlpatterns_reverse.included_namespace_urls')),
url(
r'^ns-outer/(?P<outer>[0-9]+)/',
include('urlpatterns_reverse.included_namespace_urls', namespace='inc-outer')
),
url(r'^\+\\\$\*/', include('urlpatterns_reverse.namespace_urls', namespace='special')),
]
| apache-2.0 |
larroy/mxnet | ci/docker/qemu/runtime_functions.py | 6 | 4673 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
"""Runtime functions to use in docker / testing"""
__author__ = 'Pedro Larroy'
__version__ = '0.1'
import os
import sys
import subprocess
import argparse
import logging
from subprocess import call, check_call, Popen, DEVNULL, PIPE
import time
import sys
import types
import glob
import vmcontrol
from vmcontrol import qemu_ssh, qemu_provision, qemu_rsync_to_host, VM
def activate_this(base):
import site
import os
import sys
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
def run_ut_py3_qemu():
"""Run unit tests in the emulator and copy the results back to the host through the mounted
volume in /mxnet"""
from vmcontrol import VM
with VM() as vm:
qemu_provision(vm.ssh_port)
logging.info("execute tests")
qemu_ssh(vm.ssh_port, "./runtime_functions.py", "run_ut_python3_qemu_internal")
qemu_rsync_to_host(vm.ssh_port, "*.xml", "mxnet")
logging.info("copied to host")
logging.info("tests finished, vm shutdown.")
vm.shutdown()
def run_ut_python3_qemu_internal():
"""this runs inside the vm"""
pkg = glob.glob('mxnet_dist/*.whl')[0]
logging.info("=== NOW Running inside QEMU ===")
logging.info("PIP Installing %s", pkg)
check_call(['sudo', 'pip3', 'install', pkg])
logging.info("PIP Installing mxnet/test_requirements.txt")
check_call(['sudo', 'pip3', 'install', '-r', 'mxnet/test_requirements.txt'])
logging.info("Running tests in mxnet/tests/python/unittest/")
check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_engine.py'])
# Example to run a single unit test:
# check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_ndarray.py:test_ndarray_fluent'])
def run_qemu_interactive():
vm = VM(interactive=True)
vm.detach()
vm.start()
vm.wait()
logging.info("QEMU finished")
################################
def parsed_args():
parser = argparse.ArgumentParser(description="""python runtime functions""", epilog="")
parser.add_argument('command',nargs='*',
help="Name of the function to run with arguments")
args = parser.parse_args()
return (args, parser)
def script_name() -> str:
return os.path.split(sys.argv[0])[1]
def chdir_to_script_directory():
# We need to be in the same directory than the script so the commands in the dockerfiles work as
# expected. But the script can be invoked from a different path
base = os.path.split(os.path.realpath(__file__))[0]
os.chdir(base)
def main():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='{}: %(asctime)-15s %(message)s'.format(script_name()))
chdir_to_script_directory()
# Run function with name passed as argument
(args, parser) = parsed_args()
logging.info("%s", args.command)
if args.command:
fargs = args.command[1:]
globals()[args.command[0]](*fargs)
return 0
else:
parser.print_help()
fnames = [x for x in globals() if type(globals()[x]) is types.FunctionType]
print('\nAvailable functions: {}'.format(' '.join(fnames)))
return 1
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
BadrYoubiIdrissi/TIPE-Algorithme-Genetique | Source/NEAT/test.py | 1 | 2640 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 12 11:36:14 2016
@author: Badr Youbi Idrissi
"""
import pygame
import pygame.gfxdraw
import numpy as np
from pygame.locals import *
from individu import Individu
from phenotype import Phenotype
from population import Population
from datadisplay import DataDisplay
import utilitaires as ut
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pygame.init()
screen = pygame.display.set_mode((860, 600), DOUBLEBUF and RESIZABLE)
pygame.display.set_caption("Test")
f = pygame.font.SysFont(pygame.font.get_default_font(), 20)
clock = pygame.time.Clock()
nb_e = 3
nb_s = 1
pop = Population(10, nb_e, nb_s)
pop.generer()
status = DataDisplay((0,0), padding = 20)
status.add("FPS", lambda : clock.get_fps())
status.add("Current generation", lambda : pop.generationCount)
status.add("Number of species", lambda : len(pop.especes))
status.add("Best fitness", pop.getBestFitness)
status.add("Best shared fitness", pop.getBestSharedFitness)
status.add("Average fitness", lambda : pop.averageFitness)
evol = False
while True:
clock.tick()
screen.fill((255,255,255))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
elif event.type == KEYDOWN and event.key == K_UP:
nbPoints = 100
X,Y = np.meshgrid(np.linspace(0,1,nbPoints),np.linspace(0,1,nbPoints))
Z = np.zeros((nbPoints,nbPoints))
for i in range(nbPoints):
for j in range(nbPoints):
pop.best[-1].phenotype.evaluate(ut.entree('1;'+str(X[i,j])+';'+str(Y[i,j])))
Z[i,j] = pop.best[-1].output()
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z)
plt.show()
elif event.type == KEYDOWN and event.key == K_DOWN:
l = [pop.contenu[i].fitness for i in range(pop.length)]
l2 = [pop.contenu[i].sharedFitness for i in range(pop.length)]
plt.plot(range(pop.length), l)
plt.plot(range(pop.length), l2)
plt.show()
elif event.type == KEYDOWN and event.key == K_e:
evol = not(evol)
elif event.type == VIDEORESIZE:
pygame.display.set_mode((event.w, event.h), DOUBLEBUF and RESIZABLE)
if evol:
pop.evoluer()
if (pop.generationCount % 10 == 0):
pop.updateBest()
pop.draw(status.police)
status.draw()
pygame.display.flip()
| gpl-3.0 |
mvdbeek/tools-iuc | deprecated/tools/gatk2/gatk2_wrapper.py | 29 | 7403 | #!/usr/bin/env python
# David Hoover, based on gatk by Dan Blankenberg
"""
A wrapper script for running the GenomeAnalysisTK.jar commands.
"""
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
from binascii import unhexlify
GALAXY_EXT_TO_GATK_EXT = { 'gatk_interval': 'intervals', 'bam_index': 'bam.bai', 'gatk_dbsnp': 'dbSNP', 'picard_interval_list': 'interval_list' } # items not listed here will use the galaxy extension as-is
GALAXY_EXT_TO_GATK_FILE_TYPE = GALAXY_EXT_TO_GATK_EXT # for now, these are the same, but could be different if needed
DEFAULT_GATK_PREFIX = "gatk_file"
CHUNK_SIZE = 2**20 # 1mb
def cleanup_before_exit( tmp_dir ):
if tmp_dir and os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
def gatk_filename_from_galaxy( galaxy_filename, galaxy_ext, target_dir=None, prefix=None ):
suffix = GALAXY_EXT_TO_GATK_EXT.get( galaxy_ext, galaxy_ext )
if prefix is None:
prefix = DEFAULT_GATK_PREFIX
if target_dir is None:
target_dir = os.getcwd()
gatk_filename = os.path.join( target_dir, "%s.%s" % ( prefix, suffix ) )
os.symlink( galaxy_filename, gatk_filename )
return gatk_filename
def gatk_filetype_argument_substitution( argument, galaxy_ext ):
return argument % dict( file_type=GALAXY_EXT_TO_GATK_FILE_TYPE.get( galaxy_ext, galaxy_ext ) )
def open_file_from_option( filename, mode='rb' ):
if filename:
return open( filename, mode=mode )
return None
def html_report_from_directory( html_out, dir ):
html_out.write( '<html>\n<head>\n<title>Galaxy - GATK Output</title>\n</head>\n<body>\n<p/>\n<ul>\n' )
for fname in sorted( os.listdir( dir ) ):
html_out.write( '<li><a href="%s">%s</a></li>\n' % ( fname, fname ) )
html_out.write( '</ul>\n</body>\n</html>\n' )
def index_bam_files( bam_filenames ):
for bam_filename in bam_filenames:
bam_index_filename = "%s.bai" % bam_filename
if not os.path.exists( bam_index_filename ):
# need to index this bam file
stderr_name = tempfile.NamedTemporaryFile( prefix="bam_index_stderr" ).name
command = 'samtools index %s %s' % ( bam_filename, bam_index_filename )
try:
subprocess.check_call( args=command, shell=True, stderr=open( stderr_name, 'wb' ) )
except:
for line in open( stderr_name ):
print >> sys.stderr, line
raise Exception( "Error indexing BAM file" )
finally:
os.unlink( stderr_name )
def __main__():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-p', '--pass_through', dest='pass_through_options', action='append', type="string", help='These options are passed through directly to GATK, without any modification.' )
parser.add_option( '-o', '--pass_through_options', dest='pass_through_options_encoded', action='append', type="string", help='These options are passed through directly to GATK, with decoding from binascii.unhexlify.' )
parser.add_option( '-d', '--dataset', dest='datasets', action='append', type="string", nargs=4, help='"-argument" "original_filename" "galaxy_filetype" "name_prefix"' )
parser.add_option( '', '--max_jvm_heap', dest='max_jvm_heap', action='store', type="string", default=None, help='If specified, the maximum java virtual machine heap size will be set to the provide value.' )
parser.add_option( '', '--max_jvm_heap_fraction', dest='max_jvm_heap_fraction', action='store', type="int", default=None, help='If specified, the maximum java virtual machine heap size will be set to the provide value as a fraction of total physical memory.' )
parser.add_option( '', '--stdout', dest='stdout', action='store', type="string", default=None, help='If specified, the output of stdout will be written to this file.' )
parser.add_option( '', '--stderr', dest='stderr', action='store', type="string", default=None, help='If specified, the output of stderr will be written to this file.' )
parser.add_option( '', '--html_report_from_directory', dest='html_report_from_directory', action='append', type="string", nargs=2, help='"Target HTML File" "Directory"')
parser.add_option( '-e', '--phone_home', dest='phone_home', action='store', type="string", default='STANDARD', help='What kind of GATK run report should we generate(NO_ET|STANDARD|STDOUT)' )
parser.add_option( '-K', '--gatk_key', dest='gatk_key', action='store', type="string", default=None, help='What kind of GATK run report should we generate(NO_ET|STANDARD|STDOUT)' )
(options, args) = parser.parse_args()
if options.pass_through_options:
cmd = ' '.join( options.pass_through_options )
else:
cmd = ''
if options.pass_through_options_encoded:
cmd = '%s %s' % ( cmd, ' '.join( map( unhexlify, options.pass_through_options_encoded ) ) )
if options.max_jvm_heap is not None:
cmd = cmd.replace( 'java ', 'java -Xmx%s ' % ( options.max_jvm_heap ), 1 )
elif options.max_jvm_heap_fraction is not None:
cmd = cmd.replace( 'java ', 'java -XX:DefaultMaxRAMFraction=%s -XX:+UseParallelGC ' % ( options.max_jvm_heap_fraction ), 1 )
bam_filenames = []
tmp_dir = tempfile.mkdtemp( prefix='tmp-gatk-' )
try:
if options.datasets:
for ( dataset_arg, filename, galaxy_ext, prefix ) in options.datasets:
gatk_filename = gatk_filename_from_galaxy( filename, galaxy_ext, target_dir=tmp_dir, prefix=prefix )
if dataset_arg:
cmd = '%s %s "%s"' % ( cmd, gatk_filetype_argument_substitution( dataset_arg, galaxy_ext ), gatk_filename )
if galaxy_ext == "bam":
bam_filenames.append( gatk_filename )
if galaxy_ext == 'fasta':
subprocess.check_call( 'samtools faidx "%s"' % gatk_filename, shell=True )
subprocess.check_call( 'java -jar %s R=%s O=%s QUIET=true' % ( os.path.join(os.environ['JAVA_JAR_PATH'], 'CreateSequenceDictionary.jar'), gatk_filename, os.path.splitext(gatk_filename)[0] + '.dict' ), shell=True )
index_bam_files( bam_filenames )
# set up stdout and stderr output options
stdout = open_file_from_option( options.stdout, mode='wb' )
stderr = open_file_from_option( options.stderr, mode='wb' )
# if no stderr file is specified, we'll use our own
if stderr is None:
stderr = tempfile.NamedTemporaryFile( prefix="gatk-stderr-", dir=tmp_dir )
proc = subprocess.Popen( args=cmd, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
finally:
cleanup_before_exit( tmp_dir )
# generate html reports
if options.html_report_from_directory:
for ( html_filename, html_dir ) in options.html_report_from_directory:
html_report_from_directory( open( html_filename, 'wb' ), html_dir )
if __name__ == "__main__":
__main__()
| mit |
harisibrahimkv/django | tests/template_tests/syntax_tests/test_width_ratio.py | 56 | 5956 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class WidthRatioTagTests(SimpleTestCase):
libraries = {'custom': 'template_tests.templatetags.custom'}
@setup({'widthratio01': '{% widthratio a b 0 %}'})
def test_widthratio01(self):
output = self.engine.render_to_string('widthratio01', {'a': 50, 'b': 100})
self.assertEqual(output, '0')
@setup({'widthratio02': '{% widthratio a b 100 %}'})
def test_widthratio02(self):
output = self.engine.render_to_string('widthratio02', {'a': 0, 'b': 0})
self.assertEqual(output, '0')
@setup({'widthratio03': '{% widthratio a b 100 %}'})
def test_widthratio03(self):
output = self.engine.render_to_string('widthratio03', {'a': 0, 'b': 100})
self.assertEqual(output, '0')
@setup({'widthratio04': '{% widthratio a b 100 %}'})
def test_widthratio04(self):
output = self.engine.render_to_string('widthratio04', {'a': 50, 'b': 100})
self.assertEqual(output, '50')
@setup({'widthratio05': '{% widthratio a b 100 %}'})
def test_widthratio05(self):
output = self.engine.render_to_string('widthratio05', {'a': 100, 'b': 100})
self.assertEqual(output, '100')
@setup({'widthratio06': '{% widthratio a b 100 %}'})
def test_widthratio06(self):
"""
62.5 should round to 62
"""
output = self.engine.render_to_string('widthratio06', {'a': 50, 'b': 80})
self.assertEqual(output, '62')
@setup({'widthratio07': '{% widthratio a b 100 %}'})
def test_widthratio07(self):
"""
71.4 should round to 71
"""
output = self.engine.render_to_string('widthratio07', {'a': 50, 'b': 70})
self.assertEqual(output, '71')
# Raise exception if we don't have 3 args, last one an integer
@setup({'widthratio08': '{% widthratio %}'})
def test_widthratio08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio08')
@setup({'widthratio09': '{% widthratio a b %}'})
def test_widthratio09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio09', {'a': 50, 'b': 100})
@setup({'widthratio10': '{% widthratio a b 100.0 %}'})
def test_widthratio10(self):
output = self.engine.render_to_string('widthratio10', {'a': 50, 'b': 100})
self.assertEqual(output, '50')
@setup({'widthratio11': '{% widthratio a b c %}'})
def test_widthratio11(self):
"""
#10043: widthratio should allow max_width to be a variable
"""
output = self.engine.render_to_string('widthratio11', {'a': 50, 'c': 100, 'b': 100})
self.assertEqual(output, '50')
# #18739: widthratio should handle None args consistently with
# non-numerics
@setup({'widthratio12a': '{% widthratio a b c %}'})
def test_widthratio12a(self):
output = self.engine.render_to_string('widthratio12a', {'a': 'a', 'c': 100, 'b': 100})
self.assertEqual(output, '')
@setup({'widthratio12b': '{% widthratio a b c %}'})
def test_widthratio12b(self):
output = self.engine.render_to_string('widthratio12b', {'a': None, 'c': 100, 'b': 100})
self.assertEqual(output, '')
@setup({'widthratio13a': '{% widthratio a b c %}'})
def test_widthratio13a(self):
output = self.engine.render_to_string('widthratio13a', {'a': 0, 'c': 100, 'b': 'b'})
self.assertEqual(output, '')
@setup({'widthratio13b': '{% widthratio a b c %}'})
def test_widthratio13b(self):
output = self.engine.render_to_string('widthratio13b', {'a': 0, 'c': 100, 'b': None})
self.assertEqual(output, '')
@setup({'widthratio14a': '{% widthratio a b c %}'})
def test_widthratio14a(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio14a', {'a': 0, 'c': 'c', 'b': 100})
@setup({'widthratio14b': '{% widthratio a b c %}'})
def test_widthratio14b(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio14b', {'a': 0, 'c': None, 'b': 100})
@setup({'widthratio15': '{% load custom %}{% widthratio a|noop:"x y" b 0 %}'})
def test_widthratio15(self):
"""
Test whitespace in filter argument
"""
output = self.engine.render_to_string('widthratio15', {'a': 50, 'b': 100})
self.assertEqual(output, '0')
# Widthratio with variable assignment
@setup({'widthratio16': '{% widthratio a b 100 as variable %}-{{ variable }}-'})
def test_widthratio16(self):
output = self.engine.render_to_string('widthratio16', {'a': 50, 'b': 100})
self.assertEqual(output, '-50-')
@setup({'widthratio17': '{% widthratio a b 100 as variable %}-{{ variable }}-'})
def test_widthratio17(self):
output = self.engine.render_to_string('widthratio17', {'a': 100, 'b': 100})
self.assertEqual(output, '-100-')
@setup({'widthratio18': '{% widthratio a b 100 as %}'})
def test_widthratio18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio18')
@setup({'widthratio19': '{% widthratio a b 100 not_as variable %}'})
def test_widthratio19(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio19')
@setup({'widthratio20': '{% widthratio a b 100 %}'})
def test_widthratio20(self):
output = self.engine.render_to_string('widthratio20', {'a': float('inf'), 'b': float('inf')})
self.assertEqual(output, '')
@setup({'widthratio21': '{% widthratio a b 100 %}'})
def test_widthratio21(self):
output = self.engine.render_to_string('widthratio21', {'a': float('inf'), 'b': 2})
self.assertEqual(output, '')
| bsd-3-clause |
cnsoft/kbengine-cocos2dx | kbe/res/scripts/common/Lib/sqlite3/test/hooks.py | 4 | 7346 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/test/hooks.py: tests for various SQLite-specific hooks
#
# Copyright (C) 2006-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
import sqlite3 as sqlite
class CollationTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def CheckCreateCollationNotCallable(self):
con = sqlite.connect(":memory:")
try:
con.create_collation("X", 42)
self.fail("should have raised a TypeError")
except TypeError as e:
self.assertEqual(e.args[0], "parameter must be callable")
def CheckCreateCollationNotAscii(self):
con = sqlite.connect(":memory:")
try:
con.create_collation("collä", lambda x, y: (x > y) - (x < y))
self.fail("should have raised a ProgrammingError")
except sqlite.ProgrammingError as e:
pass
@unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 1),
'old SQLite versions crash on this test')
def CheckCollationIsUsed(self):
def mycoll(x, y):
# reverse order
return -((x > y) - (x < y))
con = sqlite.connect(":memory:")
con.create_collation("mycoll", mycoll)
sql = """
select x from (
select 'a' as x
union
select 'b' as x
union
select 'c' as x
) order by x collate mycoll
"""
result = con.execute(sql).fetchall()
if result[0][0] != "c" or result[1][0] != "b" or result[2][0] != "a":
self.fail("the expected order was not returned")
con.create_collation("mycoll", None)
try:
result = con.execute(sql).fetchall()
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.assertEqual(e.args[0].lower(), "no such collation sequence: mycoll")
def CheckCollationReturnsLargeInteger(self):
def mycoll(x, y):
# reverse order
return -((x > y) - (x < y)) * 2**32
con = sqlite.connect(":memory:")
con.create_collation("mycoll", mycoll)
sql = """
select x from (
select 'a' as x
union
select 'b' as x
union
select 'c' as x
) order by x collate mycoll
"""
result = con.execute(sql).fetchall()
self.assertEqual(result, [('c',), ('b',), ('a',)],
msg="the expected order was not returned")
def CheckCollationRegisterTwice(self):
"""
Register two different collation functions under the same name.
Verify that the last one is actually used.
"""
con = sqlite.connect(":memory:")
con.create_collation("mycoll", lambda x, y: (x > y) - (x < y))
con.create_collation("mycoll", lambda x, y: -((x > y) - (x < y)))
result = con.execute("""
select x from (select 'a' as x union select 'b' as x) order by x collate mycoll
""").fetchall()
if result[0][0] != 'b' or result[1][0] != 'a':
self.fail("wrong collation function is used")
def CheckDeregisterCollation(self):
"""
Register a collation, then deregister it. Make sure an error is raised if we try
to use it.
"""
con = sqlite.connect(":memory:")
con.create_collation("mycoll", lambda x, y: (x > y) - (x < y))
con.create_collation("mycoll", None)
try:
con.execute("select 'a' as x union select 'b' as x order by x collate mycoll")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
if not e.args[0].startswith("no such collation sequence"):
self.fail("wrong OperationalError raised")
class ProgressTests(unittest.TestCase):
def CheckProgressHandlerUsed(self):
"""
Test that the progress handler is invoked once it is set.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 0
con.set_progress_handler(progress, 1)
con.execute("""
create table foo(a, b)
""")
self.assertTrue(progress_calls)
def CheckOpcodeCount(self):
"""
Test that the opcode argument is respected.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 0
con.set_progress_handler(progress, 1)
curs = con.cursor()
curs.execute("""
create table foo (a, b)
""")
first_count = len(progress_calls)
progress_calls = []
con.set_progress_handler(progress, 2)
curs.execute("""
create table bar (a, b)
""")
second_count = len(progress_calls)
self.assertTrue(first_count > second_count)
def CheckCancelOperation(self):
"""
Test that returning a non-zero value stops the operation in progress.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 1
con.set_progress_handler(progress, 1)
curs = con.cursor()
self.assertRaises(
sqlite.OperationalError,
curs.execute,
"create table bar (a, b)")
def CheckClearHandler(self):
"""
Test that setting the progress handler to None clears the previously set handler.
"""
con = sqlite.connect(":memory:")
action = 0
def progress():
nonlocal action
action = 1
return 0
con.set_progress_handler(progress, 1)
con.set_progress_handler(None, 1)
con.execute("select 1 union select 2 union select 3").fetchall()
self.assertEqual(action, 0, "progress handler was not cleared")
def suite():
collation_suite = unittest.makeSuite(CollationTests, "Check")
progress_suite = unittest.makeSuite(ProgressTests, "Check")
return unittest.TestSuite((collation_suite, progress_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| lgpl-3.0 |
tu-darmstadt-ros-pkg/hector_flexbe_behavior | behaviors/behavior_pathdrivemission/src/behavior_pathdrivemission/pathdrivemission_sm.py | 1 | 2883 | #!/usr/bin/env python
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
import roslib; roslib.load_manifest('behavior_pathdrivemission')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from hector_flexbe_states.create_path import CreatePath
from hector_flexbe_states.invert_path import InvertPath
from hector_flexbe_states.move_along_path import MoveAlongPath
from hector_flexbe_states.sparse_path import SparsePath
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
from geometry_msgs.msg import PoseStamped
# [/MANUAL_IMPORT]
'''
Created on Thu Jun 02 2016
@author: Gabriel, Elisa
'''
class PathDriveMissionSM(Behavior):
'''
Robot moves along a given path
'''
def __init__(self):
super(PathDriveMissionSM, self).__init__()
self.name = 'PathDriveMission'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:52 y:481, x:179 y:505
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.speed = 0.2
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:169 y:61
OperatableStateMachine.add('Create_Path',
CreatePath(),
transitions={'succeeded': 'Invert_Path', 'retry': 'Create_Path'},
autonomy={'succeeded': Autonomy.Off, 'retry': Autonomy.Off},
remapping={'path': 'path'})
# x:309 y:56
OperatableStateMachine.add('Invert_Path',
InvertPath(),
transitions={'reached': 'Sparse_Path', 'failed': 'failed'},
autonomy={'reached': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'path': 'path'})
# x:670 y:162
OperatableStateMachine.add('Move_Along_Path',
MoveAlongPath(),
transitions={'reached': 'finished', 'failed': 'failed'},
autonomy={'reached': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'path': 'path', 'speed': 'speed'})
# x:482 y:64
OperatableStateMachine.add('Sparse_Path',
SparsePath(max_dist=.2, max_angle=.2, min_dist=.1),
transitions={'done': 'Move_Along_Path'},
autonomy={'done': Autonomy.Off},
remapping={'path': 'path'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| bsd-3-clause |
borysiasty/QGIS | python/plugins/processing/algs/qgis/KNearestConcaveHull.py | 30 | 21491 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
KNearestConcaveHull.py
----------------------
Date : November 2014
Copyright : (C) 2014 by Detlev Neumann
Dr. Neumann Consulting - Geospatial Services
Email : dneumann@geospatial-services.de
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Detlev Neumann'
__date__ = 'November 2014'
__copyright__ = '(C) 2014, Detlev Neumann'
import os.path
import math
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsApplication,
QgsExpression,
QgsFeature,
QgsFeatureRequest,
QgsFeatureSink,
QgsField,
QgsFields,
QgsGeometry,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsPoint,
QgsPointXY,
QgsWkbTypes)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class KNearestConcaveHull(QgisAlgorithm):
KNEIGHBORS = 'KNEIGHBORS'
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
def name(self):
return 'knearestconcavehull'
def displayName(self):
return self.tr('Concave hull (k-nearest neighbor)')
def shortDescription(self):
return self.tr('Creates a concave hull using the k-nearest neighbor algorithm.')
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmConcaveHull.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmConcaveHull.svg")
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.KNEIGHBORS,
self.tr('Number of neighboring points to consider (a lower number is more concave, a higher number is smoother)'),
QgsProcessingParameterNumber.Integer,
defaultValue=3, minValue=3))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Field (set if creating concave hulls by class)'),
parentLayerParameterName=self.INPUT, optional=True))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Concave hull'),
QgsProcessing.TypeVectorPolygon))
def processAlgorithm(self, parameters, context, feedback):
# Get variables from dialog
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
field_name = self.parameterAsString(parameters, self.FIELD, context)
kneighbors = self.parameterAsInt(parameters, self.KNEIGHBORS, context)
use_field = bool(field_name)
field_index = -1
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 20))
current = 0
# Get properties of the field the grouping is based on
if use_field:
field_index = source.fields().lookupField(field_name)
if field_index >= 0:
fields.append(source.fields()[field_index]) # Add a field with the name of the grouping field
# Initialize writer
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Polygon, source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
success = False
fid = 0
# Get unique values of grouping field
unique_values = source.uniqueValues(field_index)
total = 100.0 / float(source.featureCount() * len(unique_values))
for unique in unique_values:
points = []
filter = QgsExpression.createFieldEqualityExpression(field_name, unique)
request = QgsFeatureRequest().setFilterExpression(filter)
request.setSubsetOfAttributes([])
# Get features with the grouping attribute equal to the current grouping value
features = source.getFeatures(request)
for in_feature in features:
if feedback.isCanceled():
break
# Add points or vertices of more complex geometry
points.extend(extract_points(in_feature.geometry()))
current += 1
feedback.setProgress(int(current * total))
# A minimum of 3 points is necessary to proceed
if len(points) >= 3:
out_feature = QgsFeature()
the_hull = concave_hull(points, kneighbors)
if the_hull:
vertex = [QgsPointXY(point[0], point[1]) for point in the_hull]
poly = QgsGeometry().fromPolygonXY([vertex])
out_feature.setGeometry(poly)
# Give the polygon the same attribute as the point grouping attribute
out_feature.setAttributes([fid, unique])
sink.addFeature(out_feature, QgsFeatureSink.FastInsert)
success = True # at least one polygon created
fid += 1
if not success:
raise QgsProcessingException('No hulls could be created. Most likely there were not at least three unique points in any of the groups.')
else:
# Field parameter provided but can't read from it
raise QgsProcessingException('Unable to find grouping field')
else:
# Not grouped by field
# Initialize writer
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Polygon, source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
points = []
request = QgsFeatureRequest()
request.setSubsetOfAttributes([])
features = source.getFeatures(request) # Get all features
total = 100.0 / source.featureCount() if source.featureCount() else 0
for in_feature in features:
if feedback.isCanceled():
break
# Add points or vertices of more complex geometry
points.extend(extract_points(in_feature.geometry()))
current += 1
feedback.setProgress(int(current * total))
# A minimum of 3 points is necessary to proceed
if len(points) >= 3:
out_feature = QgsFeature()
the_hull = concave_hull(points, kneighbors)
if the_hull:
vertex = [QgsPointXY(point[0], point[1]) for point in the_hull]
poly = QgsGeometry().fromPolygonXY([vertex])
out_feature.setGeometry(poly)
out_feature.setAttributes([0])
sink.addFeature(out_feature, QgsFeatureSink.FastInsert)
else:
# the_hull returns None only when there are less than three points after cleaning
raise QgsProcessingException('At least three unique points are required to create a concave hull.')
else:
raise QgsProcessingException('At least three points are required to create a concave hull.')
return {self.OUTPUT: dest_id}
def clean_list(list_of_points):
"""
Deletes duplicate points in list_of_points
"""
return list(set(list_of_points))
def find_min_y_point(list_of_points):
"""
Returns that point of *list_of_points* having minimal y-coordinate
:param list_of_points: list of tuples
:return: tuple (x, y)
"""
min_y_pt = list_of_points[0]
for point in list_of_points[1:]:
if point[1] < min_y_pt[1] or (point[1] == min_y_pt[1] and point[0] < min_y_pt[0]):
min_y_pt = point
return min_y_pt
def add_point(vector, element):
"""
Returns vector with the given element append to the right
"""
vector.append(element)
return vector
def remove_point(vector, element):
"""
Returns a copy of vector without the given element
"""
vector.pop(vector.index(element))
return vector
def euclidean_distance(point1, point2):
"""
Returns the euclidean distance of the 2 given points.
:param point1: tuple (x, y)
:param point2: tuple (x, y)
:return: float
"""
return math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
def nearest_points(list_of_points, point, k):
"""
Returns a list of the indices of the k closest neighbors from list_of_points to the specified point. The measure
of proximity is the Euclidean distance. Internally, k becomes the minimum between the given value for k and the
number of points in list_of_points
:param list_of_points: list of tuples
:param point: tuple (x, y)
:param k: integer
:return: list of k tuples
"""
# build a list of tuples of distances between point *point* and every point in *list_of_points*, and
# their respective index of list *list_of_distances*
list_of_distances = []
for index in range(len(list_of_points)):
list_of_distances.append((euclidean_distance(list_of_points[index], point), index))
# sort distances in ascending order
list_of_distances.sort()
# get the k nearest neighbors of point
nearest_list = []
for index in range(min(k, len(list_of_points))):
nearest_list.append((list_of_points[list_of_distances[index][1]]))
return nearest_list
def angle(from_point, to_point):
"""
Returns the angle of the directed line segment, going from *from_point* to *to_point*, in radians. The angle is
positive for segments with upward direction (north), otherwise negative (south). Values ranges from 0 at the
right (east) to pi at the left side (west).
:param from_point: tuple (x, y)
:param to_point: tuple (x, y)
:return: float
"""
return math.atan2(to_point[1] - from_point[1], to_point[0] - from_point[0])
def angle_difference(angle1, angle2):
"""
Calculates the difference between the given angles in clockwise direction as radians.
:param angle1: float
:param angle2: float
:return: float; between 0 and 2*Pi
"""
if (angle1 > 0 and angle2 >= 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif (angle1 >= 0 and angle2 > 0) and angle1 < angle2:
return 2 * math.pi + angle1 - angle2
elif (angle1 < 0 and angle2 <= 0) and angle1 < angle2:
return 2 * math.pi + angle1 + abs(angle2)
elif (angle1 <= 0 and angle2 < 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif angle1 <= 0 < angle2:
return 2 * math.pi + angle1 - angle2
elif angle1 >= 0 >= angle2:
return angle1 + abs(angle2)
else:
return 0
def intersect(line1, line2):
"""
Returns True if the two given line segments intersect each other, and False otherwise.
:param line1: 2-tuple of tuple (x, y)
:param line2: 2-tuple of tuple (x, y)
:return: boolean
"""
a1 = line1[1][1] - line1[0][1]
b1 = line1[0][0] - line1[1][0]
c1 = a1 * line1[0][0] + b1 * line1[0][1]
a2 = line2[1][1] - line2[0][1]
b2 = line2[0][0] - line2[1][0]
c2 = a2 * line2[0][0] + b2 * line2[0][1]
tmp = (a1 * b2 - a2 * b1)
if tmp == 0:
return False
sx = (c1 * b2 - c2 * b1) / tmp
if (sx > line1[0][0] and sx > line1[1][0]) or (sx > line2[0][0] and sx > line2[1][0]) or\
(sx < line1[0][0] and sx < line1[1][0]) or (sx < line2[0][0] and sx < line2[1][0]):
return False
sy = (a1 * c2 - a2 * c1) / tmp
if (sy > line1[0][1] and sy > line1[1][1]) or (sy > line2[0][1] and sy > line2[1][1]) or\
(sy < line1[0][1] and sy < line1[1][1]) or (sy < line2[0][1] and sy < line2[1][1]):
return False
return True
def point_in_polygon_q(point, list_of_points):
"""
Return True if given point *point* is laying in the polygon described by the vertices *list_of_points*,
otherwise False
Based on the "Ray Casting Method" described by Joel Lawhead in this blog article:
http://geospatialpython.com/2011/01/point-in-polygon.html
"""
x = point[0]
y = point[1]
poly = [(pt[0], pt[1]) for pt in list_of_points]
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def extract_points(geom):
"""
Generate list of QgsPoints from QgsGeometry *geom* ( can be point, line, or polygon )
Code taken from fTools plugin
:param geom: an arbitrary geometry feature
:return: list of points
"""
multi_geom = QgsGeometry()
temp_geom = []
# point geometry
if geom.type() == 0:
if geom.isMultipart():
temp_geom = geom.asMultiPoint()
else:
temp_geom.append(geom.asPoint())
# line geometry
if geom.type() == 1:
# if multipart feature explode to single part
if geom.isMultipart():
multi_geom = geom.asMultiPolyline()
for i in multi_geom:
temp_geom.extend(i)
else:
temp_geom = geom.asPolyline()
# polygon geometry
elif geom.type() == 2:
# if multipart feature explode to single part
if geom.isMultipart():
multi_geom = geom.asMultiPolygon()
# now single part polygons
for i in multi_geom:
# explode to line segments
for j in i:
temp_geom.extend(j)
else:
multi_geom = geom.asPolygon()
# explode to line segments
for i in multi_geom:
temp_geom.extend(i)
return temp_geom
def sort_by_angle(list_of_points, last_point, last_angle):
"""
returns the points in list_of_points in descending order of angle to the last segment of the envelope, measured
in a clockwise direction. Thus, the rightmost of the neighboring points is always selected. The first point of
this list will be the next point of the envelope.
"""
def getkey(item):
return angle_difference(last_angle, angle(last_point, item))
vertex_list = sorted(list_of_points, key=getkey, reverse=True)
return vertex_list
def concave_hull(points_list, k):
"""
Calculates a valid concave hull polygon containing all given points. The algorithm searches for that
point in the neighborhood of k nearest neighbors which maximizes the rotation angle in clockwise direction
without intersecting any previous line segments.
This is an implementation of the algorithm described by Adriano Moreira and Maribel Yasmina Santos:
CONCAVE HULL: A neighborhood_k-NEAREST NEIGHBORS APPROACH FOR THE COMPUTATION OF THE REGION OCCUPIED BY A SET OF POINTS.
GRAPP 2007 - International Conference on Computer Graphics Theory and Applications; pp 61-68.
:param points_list: list of tuples (x, y)
:param k: integer
:return: list of tuples (x, y)
"""
# return an empty list if not enough points are given
if k > len(points_list):
k = len(points_list)
# the number of nearest neighbors k must be greater than or equal to 3
kk = max(k, 3)
# delete duplicate points
point_set = clean_list(points_list)
# if point_set has less then 3 points no polygon can be created and an empty list will be returned
if len(point_set) < 3:
return None
# if point_set has 3 points then these are already vertices of the hull. Append the first point to
# close the hull polygon
if len(point_set) == 3:
return add_point(point_set, point_set[0])
# make sure that k neighbors can be found
kk = min(kk, len(point_set))
# start with the point having the smallest y-coordinate (most southern point)
first_point = find_min_y_point(point_set)
# add this points as the first vertex of the hull
hull = [first_point]
# make the first vertex of the hull to the current point
current_point = first_point
# remove the point from the point_set, to prevent him being among the nearest points
point_set = remove_point(point_set, first_point)
previous_angle = math.pi
# step counts the number of segments
step = 2
# as long as point_set is not empty or search is returning to the starting point
while (current_point != first_point) or (step == 2) and (len(point_set) > 0):
# after 3 iterations add the first point to point_set again, otherwise a hull cannot be closed
if step == 5:
point_set = add_point(point_set, first_point)
# search the k nearest neighbors of the current point
k_nearest_points = nearest_points(point_set, current_point, kk)
# sort the candidates (neighbors) in descending order of right-hand turn. This way the algorithm progresses
# in clockwise direction through as many points as possible
c_points = sort_by_angle(k_nearest_points, current_point, previous_angle)
its = True
i = -1
# search for the nearest point to which the connecting line does not intersect any existing segment
while its is True and (i < len(c_points) - 1):
i += 1
if c_points[i] == first_point:
last_point = 1
else:
last_point = 0
j = 2
its = False
while its is False and (j < len(hull) - last_point):
its = intersect((hull[step - 2], c_points[i]), (hull[step - 2 - j], hull[step - 1 - j]))
j += 1
# there is no candidate to which the connecting line does not intersect any existing segment, so the
# for the next candidate fails. The algorithm starts again with an increased number of neighbors
if its is True:
return concave_hull(points_list, kk + 1)
# the first point which complies with the requirements is added to the hull and gets the current point
current_point = c_points[i]
hull = add_point(hull, current_point)
# calculate the angle between the last vertex and his precursor, that is the last segment of the hull
# in reversed direction
previous_angle = angle(hull[step - 1], hull[step - 2])
# remove current_point from point_set
point_set = remove_point(point_set, current_point)
# increment counter
step += 1
all_inside = True
i = len(point_set) - 1
# check if all points are within the created polygon
while (all_inside is True) and (i >= 0):
all_inside = point_in_polygon_q(point_set[i], hull)
i -= 1
# since at least one point is out of the computed polygon, try again with a higher number of neighbors
if all_inside is False:
return concave_hull(points_list, kk + 1)
# a valid hull has been constructed
return hull
| gpl-2.0 |
gef756/scipy | scipy/interpolate/interpolate.py | 25 | 80287 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'RegularGridInterpolator',
'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised.
fill_value : float, optional
If provided, then this value will be used to fill in for requested
points outside of the data range. If not provided, then the default
is NaN.
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0,'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
y = self._reshape_yi(y)
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self.__class__._call_nearest
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(x, y, order=order)
self._call = self.__class__._call_spline
if len(x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self._kind = kind
self.x = x
self._y = y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(self, x_new)
if len(y_new) > 0:
y_new[out_of_bounds] = self.fill_value
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class _PPolyBase(object):
"""
Base class for piecewise polynomials.
"""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
# construct a compatible polynomial
return self.construct_fast(c, self.x, self.extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
# Compute the integral
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.integrate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate),
out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : bool, optional
Whether to return roots from the polynomial extrapolated
based on first and last intervals.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]``
is written in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1))
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = comb(k, a) * t**k * (1 - t)**(k - a)
with ``t = (x - x[i]) / (x[i+1] - x[i])``.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k - nu representing the derivative
of this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k + nu representing the
antiderivative of this polynomial.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
return self.construct_fast(c2, x, self.extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Defaults to ``self.extrapolate``.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is not None:
ib.extrapolate = extrapolate
return ib(b) - ib(a)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, integer_types):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating)
or np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype')
and not np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| bsd-3-clause |
liberorbis/libernext | env/lib/python2.7/site-packages/setuptools/command/install_egg_info.py | 423 | 4001 | from distutils import log, dir_util
import os
from setuptools import Command
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self.target)
filename += '-nspkg.pth'
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
_nspkg_tmpl = (
"import sys, types, os",
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)",
"ie = os.path.exists(os.path.join(p,'__init__.py'))",
"m = not ie and "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
@classmethod
def _gen_nspkg_line(cls, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
tmpl_lines = cls._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += cls._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
nsp = set()
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp.add('.'.join(pkg))
pkg.pop()
return sorted(nsp)
| gpl-2.0 |
bobvanderlinden/machinekit | src/emc/usr_intf/axis/scripts/lintini.py | 25 | 2733 | #!/usr/bin/env python
#!/usr/bin/python
# This is a component of AXIS, a front-end for LinuxCNC
# Copyright 2004, 2005, 2006 Jeff Epler <jepler@unpythonic.net> and
# Chris Radek <chris@timeguy.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os
import linuxcnc
ini = linuxcnc.ini(sys.argv[1])
nproblems = 0
def report_problem(msg, *args):
global nproblems
nproblems += 1
if args:
print msg % args
else:
print msg
def get_int(section, key):
return int(ini.find(section, key).split()[0])
def get_float(section, key):
return float(ini.find(section, key).split()[0])
period = get_int("EMCMOT", "BASE_PERIOD")
naxes = get_int("TRAJ", "AXES")
is_stepper = ini.find("AXIS_0", "STEPGEN_MAXVEL") is not None
if is_stepper: print "Appears to be a stepper configuration"
else: print "Appears to be a servo configuration"
for i in range(naxes):
axis = "AXIS_%d" % i
scale = get_float(axis, "INPUT_SCALE")
if is_stepper:
vel = get_float(axis, "STEPGEN_MAXVEL")
cycles_per_step = 2
required_period = 1000000000 / vel / scale / cycles_per_step
if required_period < period:
report_problem(
"Max Velocity %g and scale %g require BASE_PERIOD below %d",
vel, scale, int(required_period))
vel = get_float(axis, "MAX_VELOCITY")
headroom_vel = get_float(axis, "STEPGEN_MAXVEL")
if headroom_vel < vel * 1.01:
report_problem(
"Less than 1%% velocity headroom from %g to %g",
vel, headroom_vel)
acc = get_float(axis, "MAX_ACCELERATION")
headroom_acc = get_float(axis, "STEPGEN_MAXACCEL")
if headroom_acc < acc * 1.01:
report_problem(
"Less than 1%% acceleration headroom from %g to %g",
acc, headroom_acc)
if nproblems == 0:
print "No problems found"
elif nproblems == 1:
print "One problem found"
else:
print "%d problems found" % nproblems
# vim:sw=4:sts=4:et
| lgpl-2.1 |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global_/hellos/__init__.py | 1 | 18060 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class hellos(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/signaling-protocols/rsvp-te/global/hellos. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top level container for RSVP hello parameters
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "hellos"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"rsvp-te",
"global",
"hellos",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global/hellos/config (container)
YANG Description: Configuration parameters relating to RSVP
hellos
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global/hellos/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to RSVP
hellos
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global/hellos/state (container)
YANG Description: State information associated with RSVP hellos
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global/hellos/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information associated with RSVP hellos
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class hellos(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/signaling-protocols/rsvp-te/global/hellos. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top level container for RSVP hello parameters
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "hellos"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"rsvp-te",
"global",
"hellos",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global/hellos/config (container)
YANG Description: Configuration parameters relating to RSVP
hellos
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global/hellos/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to RSVP
hellos
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global/hellos/state (container)
YANG Description: State information associated with RSVP hellos
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/global/hellos/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information associated with RSVP hellos
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| apache-2.0 |
themurph/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.28/roles/lib_openshift/src/test/unit/test_oc_process.py | 82 | 21084 | '''
Unit tests for oc process
'''
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_process import OCProcess, locate_oc_binary # noqa: E402
# pylint: disable=too-many-public-methods
class OCProcessTest(unittest.TestCase):
'''
Test class for OCProcess
'''
mysql = '''{
"kind": "Template",
"apiVersion": "v1",
"metadata": {
"name": "mysql-ephemeral",
"namespace": "openshift",
"selfLink": "/oapi/v1/namespaces/openshift/templates/mysql-ephemeral",
"uid": "fb8b5f04-e3d3-11e6-a982-0e84250fc302",
"resourceVersion": "480",
"creationTimestamp": "2017-01-26T14:30:27Z",
"annotations": {
"iconClass": "icon-mysql-database",
"openshift.io/display-name": "MySQL (Ephemeral)",
"tags": "database,mysql"
}
},
"objects": [
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"creationTimestamp": null,
"name": "${DATABASE_SERVICE_NAME}"
},
"spec": {
"ports": [
{
"name": "mysql",
"nodePort": 0,
"port": 3306,
"protocol": "TCP",
"targetPort": 3306
}
],
"selector": {
"name": "${DATABASE_SERVICE_NAME}"
},
"sessionAffinity": "None",
"type": "ClusterIP"
},
"status": {
"loadBalancer": {}
}
},
{
"apiVersion": "v1",
"kind": "DeploymentConfig",
"metadata": {
"creationTimestamp": null,
"name": "${DATABASE_SERVICE_NAME}"
},
"spec": {
"replicas": 1,
"selector": {
"name": "${DATABASE_SERVICE_NAME}"
},
"strategy": {
"type": "Recreate"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
},
"spec": {
"containers": [
{
"capabilities": {},
"env": [
{
"name": "MYSQL_USER",
"value": "${MYSQL_USER}"
},
{
"name": "MYSQL_PASSWORD",
"value": "${MYSQL_PASSWORD}"
},
{
"name": "MYSQL_DATABASE",
"value": "${MYSQL_DATABASE}"
}
],
"image": " ",
"imagePullPolicy": "IfNotPresent",
"livenessProbe": {
"initialDelaySeconds": 30,
"tcpSocket": {
"port": 3306
},
"timeoutSeconds": 1
},
"name": "mysql",
"ports": [
{
"containerPort": 3306,
"protocol": "TCP"
}
],
"readinessProbe": {
"exec": {
"command": [
"/bin/sh",
"-i",
"-c",
"MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"
]
},
"initialDelaySeconds": 5,
"timeoutSeconds": 1
},
"resources": {
"limits": {
"memory": "${MEMORY_LIMIT}"
}
},
"securityContext": {
"capabilities": {},
"privileged": false
},
"terminationMessagePath": "/dev/termination-log",
"volumeMounts": [
{
"mountPath": "/var/lib/mysql/data",
"name": "${DATABASE_SERVICE_NAME}-data"
}
]
}
],
"dnsPolicy": "ClusterFirst",
"restartPolicy": "Always",
"volumes": [
{
"emptyDir": {
"medium": ""
},
"name": "${DATABASE_SERVICE_NAME}-data"
}
]
}
},
"triggers": [
{
"imageChangeParams": {
"automatic": true,
"containerNames": [
"mysql"
],
"from": {
"kind": "ImageStreamTag",
"name": "mysql:${MYSQL_VERSION}",
"namespace": "${NAMESPACE}"
},
"lastTriggeredImage": ""
},
"type": "ImageChange"
},
{
"type": "ConfigChange"
}
]
},
"status": {}
}
],
"parameters": [
{
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
"value": "512Mi"
},
{
"name": "NAMESPACE",
"displayName": "Namespace",
"description": "The OpenShift Namespace where the ImageStream resides.",
"value": "openshift"
},
{
"name": "DATABASE_SERVICE_NAME",
"displayName": "Database Service Name",
"description": "The name of the OpenShift Service exposed for the database.",
"value": "mysql",
"required": true
},
{
"name": "MYSQL_USER",
"displayName": "MySQL Connection Username",
"description": "Username for MySQL user that will be used for accessing the database.",
"generate": "expression",
"from": "user[A-Z0-9]{3}",
"required": true
},
{
"name": "MYSQL_PASSWORD",
"displayName": "MySQL Connection Password",
"description": "Password for the MySQL connection user.",
"generate": "expression",
"from": "[a-zA-Z0-9]{16}",
"required": true
},
{
"name": "MYSQL_DATABASE",
"displayName": "MySQL Database Name",
"description": "Name of the MySQL database accessed.",
"value": "sampledb",
"required": true
},
{
"name": "MYSQL_VERSION",
"displayName": "Version of MySQL Image",
"description": "Version of MySQL image to be used (5.5, 5.6 or latest).",
"value": "5.6",
"required": true
}
],
"labels": {
"template": "mysql-ephemeral-template"
}
}'''
@mock.patch('oc_process.Utils.create_tmpfile_copy')
@mock.patch('oc_process.OCProcess._run')
def test_state_list(self, mock_cmd, mock_tmpfile_copy):
''' Testing a get '''
params = {'template_name': 'mysql-ephermeral',
'namespace': 'test',
'content': None,
'state': 'list',
'reconcile': False,
'create': False,
'params': {'NAMESPACE': 'test', 'DATABASE_SERVICE_NAME': 'testdb'},
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
mock_cmd.side_effect = [
(0, OCProcessTest.mysql, '')
]
mock_tmpfile_copy.side_effect = [
'/tmp/mock_kubeconfig',
]
results = OCProcess.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mysql-ephemeral')
@mock.patch('oc_process.Utils.create_tmpfile_copy')
@mock.patch('oc_process.OCProcess._run')
def test_process_no_create(self, mock_cmd, mock_tmpfile_copy):
''' Testing a process with no create '''
params = {'template_name': 'mysql-ephermeral',
'namespace': 'test',
'content': None,
'state': 'present',
'reconcile': False,
'create': False,
'params': {'NAMESPACE': 'test', 'DATABASE_SERVICE_NAME': 'testdb'},
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
mysqlproc = '''{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"creationTimestamp": null,
"labels": {
"template": "mysql-ephemeral-template"
},
"name": "testdb"
},
"spec": {
"ports": [
{
"name": "mysql",
"nodePort": 0,
"port": 3306,
"protocol": "TCP",
"targetPort": 3306
}
],
"selector": {
"name": "testdb"
},
"sessionAffinity": "None",
"type": "ClusterIP"
},
"status": {
"loadBalancer": {}
}
},
{
"apiVersion": "v1",
"kind": "DeploymentConfig",
"metadata": {
"creationTimestamp": null,
"labels": {
"template": "mysql-ephemeral-template"
},
"name": "testdb"
},
"spec": {
"replicas": 1,
"selector": {
"name": "testdb"
},
"strategy": {
"type": "Recreate"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"name": "testdb"
}
},
"spec": {
"containers": [
{
"capabilities": {},
"env": [
{
"name": "MYSQL_USER",
"value": "userHJJ"
},
{
"name": "MYSQL_PASSWORD",
"value": "GITOAduAMaV6k688"
},
{
"name": "MYSQL_DATABASE",
"value": "sampledb"
}
],
"image": " ",
"imagePullPolicy": "IfNotPresent",
"livenessProbe": {
"initialDelaySeconds": 30,
"tcpSocket": {
"port": 3306
},
"timeoutSeconds": 1
},
"name": "mysql",
"ports": [
{
"containerPort": 3306,
"protocol": "TCP"
}
],
"readinessProbe": {
"exec": {
"command": [
"/bin/sh",
"-i",
"-c",
"MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"
]
},
"initialDelaySeconds": 5,
"timeoutSeconds": 1
},
"resources": {
"limits": {
"memory": "512Mi"
}
},
"securityContext": {
"capabilities": {},
"privileged": false
},
"terminationMessagePath": "/dev/termination-log",
"volumeMounts": [
{
"mountPath": "/var/lib/mysql/data",
"name": "testdb-data"
}
]
}
],
"dnsPolicy": "ClusterFirst",
"restartPolicy": "Always",
"volumes": [
{
"emptyDir": {
"medium": ""
},
"name": "testdb-data"
}
]
}
},
"triggers": [
{
"imageChangeParams": {
"automatic": true,
"containerNames": [
"mysql"
],
"from": {
"kind": "ImageStreamTag",
"name": "mysql:5.6",
"namespace": "test"
},
"lastTriggeredImage": ""
},
"type": "ImageChange"
},
{
"type": "ConfigChange"
}
]
}
}
]
}'''
mock_cmd.side_effect = [
(0, OCProcessTest.mysql, ''),
(0, OCProcessTest.mysql, ''),
(0, mysqlproc, ''),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mock_kubeconfig',
]
results = OCProcess.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['results']['results']['items'][0]['metadata']['name'], 'testdb')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
| apache-2.0 |
sarvex/tensorflow | tensorflow/python/saved_model/builder.py | 37 | 1183 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel builder.
Builds a SavedModel that can be saved to storage, is language neutral, and
enables systems to produce, consume, or transform TensorFlow Models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model.builder_impl import _SavedModelBuilder
from tensorflow.python.saved_model.builder_impl import SavedModelBuilder
# pylint: enable=unused-import
| apache-2.0 |
stonegithubs/odoo | addons/web/static/lib/py.js/doc/conf.py | 543 | 7829 | # -*- coding: utf-8 -*-
#
# py.js documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 9 19:36:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py.js'
copyright = u'2012, Xavier Morel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# Default sphinx domain
default_domain = 'js'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# default code-block highlighting
highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyjsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyjs.tex', u'py.js Documentation',
u'Xavier Morel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyjs', u'py.js Documentation',
[u'Xavier Morel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyjs', u'py.js Documentation',
u'Xavier Morel', 'pyjs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| agpl-3.0 |
pilou-/ansible | lib/ansible/modules/network/f5/bigiq_utility_license_assignment.py | 38 | 20784 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigiq_utility_license_assignment
short_description: Manage utility license assignment on BIG-IPs from a BIG-IQ
description:
- Manages the assignment of utility licenses on a BIG-IQ. Assignment means that
the license is assigned to a BIG-IP, or, it needs to be assigned to a BIG-IP.
Additionally, this module supported revoking the assignments from BIG-IP devices.
version_added: 2.7
options:
unit_of_measure:
description:
- Sets the rate at which this license usage is billed.
- Depending on your license, you may have different units of measures
available to you. If a particular unit is not available to you, the module
will notify you at licensing time.
type: str
choices:
- hourly
- daily
- monthly
- yearly
default: hourly
key:
description:
- The registration key that you want choose an offering from.
type: str
required: True
offering:
description:
- Name of the license offering to assign to the device.
type: str
device:
description:
- When C(managed) is C(no), specifies the address, or hostname, where the BIG-IQ
can reach the remote device to register.
- When C(managed) is C(yes), specifies the managed device, or device UUID, that
you want to register.
- If C(managed) is C(yes), it is very important that you do not have more than
one device with the same name. BIG-IQ internally recognizes devices by their ID,
and therefore, this module's cannot guarantee that the correct device will be
registered. The device returned is the device that will be used.
type: str
managed:
description:
- Whether the specified device is a managed or un-managed device.
- When C(state) is C(present), this parameter is required.
type: bool
device_port:
description:
- Specifies the port of the remote device to connect to.
- If this parameter is not specified, the default of C(443) will be used.
type: int
default: 443
device_username:
description:
- The username used to connect to the remote device.
- This username should be one that has sufficient privileges on the remote device
to do licensing. Usually this is the C(Administrator) role.
- When C(managed) is C(no), this parameter is required.
type: str
device_password:
description:
- The password of the C(device_username).
- When C(managed) is C(no), this parameter is required.
type: str
state:
description:
- When C(present), ensures that the device is assigned the specified license.
- When C(absent), ensures the license is revokes from the remote device and freed
on the BIG-IQ.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Register an unmanaged device
bigiq_utility_license_assignment:
key: XXXX-XXXX-XXXX-XXXX-XXXX
offering: F5-BIG-MSP-AFM-10G-LIC
device: 1.1.1.1
managed: no
device_username: admin
device_password: secret
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Register a managed device, by name
bigiq_utility_license_assignment:
key: XXXX-XXXX-XXXX-XXXX-XXXX
offering: F5-BIG-MSP-AFM-10G-LIC
device: bigi1.foo.com
managed: yes
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Register a managed device, by UUID
bigiq_utility_license_assignment:
key: XXXX-XXXX-XXXX-XXXX-XXXX
offering: F5-BIG-MSP-AFM-10G-LIC
device: 7141a063-7cf8-423f-9829-9d40599fa3e0
managed: yes
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import re
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'deviceReference': 'device_reference',
'deviceAddress': 'device_address',
'httpsPort': 'device_port',
'unitOfMeasure': 'unit_of_measure'
}
api_attributes = [
'deviceReference', 'deviceAddress', 'httpsPort', 'managed', 'unitOfMeasure'
]
returnables = [
'device_address', 'device_reference', 'device_username', 'device_password',
'device_port', 'managed', 'unit_of_measure'
]
updatables = [
'device_reference', 'device_address', 'device_username', 'device_password',
'device_port', 'managed', 'unit_of_measure'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def device_password(self):
if self._values['device_password'] is None:
return None
return self._values['device_password']
@property
def device_username(self):
if self._values['device_username'] is None:
return None
return self._values['device_username']
@property
def device_address(self):
if self.device_is_address:
return self._values['device']
@property
def device_port(self):
if self._values['device_port'] is None:
return None
return int(self._values['device_port'])
@property
def device_is_address(self):
if is_valid_ip(self.device):
return True
return False
@property
def device_is_id(self):
pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
if re.match(pattern, self.device):
return True
return False
@property
def device_is_name(self):
if not self.device_is_address and not self.device_is_id:
return True
return False
@property
def device_reference(self):
if not self.managed:
return None
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "address+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "hostname+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "uuid+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/?$filter={2}&$top=1".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No device with the specified address was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
id = response['items'][0]['uuid']
result = dict(
link='https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/{0}'.format(id)
)
return result
@property
def offering_id(self):
filter = "(name+eq+'{0}')".format(self.offering)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings?$filter={3}&$top=1'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.key,
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No offering with the specified name was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['items'][0]['id']
@property
def member_id(self):
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "deviceAddress+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "deviceName+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "deviceMachineId+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/?$filter={4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.key,
self.offering_id,
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = response['items'][0]['id']
return result
class Changes(Parameters):
pass
class UsableChanges(Changes):
@property
def device_port(self):
if self._values['managed']:
return None
return self._values['device_port']
@property
def device_username(self):
if self._values['managed']:
return None
return self._values['device_username']
@property
def device_password(self):
if self._values['managed']:
return None
return self._values['device_password']
@property
def device_reference(self):
if not self._values['managed']:
return None
return self._values['device_reference']
@property
def device_address(self):
if self._values['managed']:
return None
return self._values['device_address']
@property
def managed(self):
return None
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params, client=self.client)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
return self.create()
def exists(self):
if self.want.member_id is None:
return False
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.key,
self.want.offering_id,
self.want.member_id
)
resp = self.client.api.get(uri)
if resp.status == 200:
return True
return False
def remove(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if not self.want.managed:
if self.want.device_username is None:
raise F5ModuleError(
"You must specify a 'device_username' when working with unmanaged devices."
)
if self.want.device_password is None:
raise F5ModuleError(
"You must specify a 'device_password' when working with unmanaged devices."
)
if self.module.check_mode:
return True
self.create_on_device()
if not self.exists():
raise F5ModuleError(
"Failed to license the remote device."
)
self.wait_for_device_to_be_licensed()
return True
def create_on_device(self):
params = self.changes.api_params()
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.key,
self.want.offering_id,
)
if not self.want.managed:
params['username'] = self.want.device_username
params['password'] = self.want.device_password
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def wait_for_device_to_be_licensed(self):
count = 0
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.key,
self.want.offering_id,
self.want.member_id,
)
while count < 3:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] == 'LICENSED':
count += 1
else:
count = 0
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.key,
self.want.offering_id,
self.want.member_id
)
params = {}
if not self.want.managed:
params.update(self.changes.api_params())
params['id'] = self.want.member_id
params['username'] = self.want.device_username
params['password'] = self.want.device_password
self.client.api.delete(uri, json=params)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
offering=dict(required=True),
unit_of_measure=dict(
default='hourly',
choices=[
'hourly', 'daily', 'monthly', 'yearly'
]
),
key=dict(required=True, no_log=True),
device=dict(required=True),
managed=dict(type='bool'),
device_port=dict(type='int', default=443),
device_username=dict(no_log=True),
device_password=dict(no_log=True),
state=dict(default='present', choices=['absent', 'present'])
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['state', 'present', ['key', 'managed']],
['managed', False, ['device', 'device_username', 'device_password']],
['managed', True, ['device']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_if=spec.required_if
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
Metaswitch/calico-nova | nova/api/openstack/compute/plugins/v3/fping.py | 6 | 5144 | # Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova import utils
ALIAS = "os-fping"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
authorize_all_tenants = extensions.extension_authorizer(
'compute', 'v3:%s:all_tenants' % ALIAS)
CONF = cfg.CONF
CONF.import_opt('fping_path', 'nova.api.openstack.compute.contrib.fping')
class FpingController(wsgi.Controller):
def __init__(self, network_api=None):
self.compute_api = compute.API()
self.last_call = {}
def check_fping(self):
if not os.access(CONF.fping_path, os.X_OK):
raise exc.HTTPServiceUnavailable(
explanation=_("fping utility is not found."))
@staticmethod
def fping(ips):
fping_ret = utils.execute(CONF.fping_path, *ips,
check_exit_code=False)
if not fping_ret:
return set()
alive_ips = set()
for line in fping_ret[0].split("\n"):
ip = line.split(" ", 1)[0]
if "alive" in line:
alive_ips.add(ip)
return alive_ips
@staticmethod
def _get_instance_ips(context, instance):
ret = []
for network in common.get_networks_for_instance(
context, instance).values():
all_ips = itertools.chain(network["ips"], network["floating_ips"])
ret += [ip["address"] for ip in all_ips]
return ret
@extensions.expected_errors(503)
def index(self, req):
context = req.environ["nova.context"]
search_opts = dict(deleted=False)
if "all_tenants" in req.GET:
authorize_all_tenants(context)
else:
authorize(context)
if context.project_id:
search_opts["project_id"] = context.project_id
else:
search_opts["user_id"] = context.user_id
self.check_fping()
include = req.GET.get("include", None)
if include:
include = set(include.split(","))
exclude = set()
else:
include = None
exclude = req.GET.get("exclude", None)
if exclude:
exclude = set(exclude.split(","))
else:
exclude = set()
instance_list = self.compute_api.get_all(
context, search_opts=search_opts)
ip_list = []
instance_ips = {}
instance_projects = {}
for instance in instance_list:
uuid = instance["uuid"]
if uuid in exclude or (include is not None and
uuid not in include):
continue
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
instance_ips[uuid] = ips
instance_projects[uuid] = instance["project_id"]
ip_list += ips
alive_ips = self.fping(ip_list)
res = []
for instance_uuid, ips in instance_ips.iteritems():
res.append({
"id": instance_uuid,
"project_id": instance_projects[instance_uuid],
"alive": bool(set(ips) & alive_ips),
})
return {"servers": res}
@extensions.expected_errors((404, 503))
def show(self, req, id):
context = req.environ["nova.context"]
authorize(context)
self.check_fping()
instance = common.get_instance(self.compute_api, context, id)
try:
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
alive_ips = self.fping(ips)
return {
"server": {
"id": instance["uuid"],
"project_id": instance["project_id"],
"alive": bool(set(ips) & alive_ips),
}
}
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
class Fping(extensions.V3APIExtensionBase):
"""Fping Management Extension."""
name = "Fping"
alias = ALIAS
version = 1
def get_resources(self):
res = extensions.ResourceExtension(ALIAS, FpingController())
return [res]
def get_controller_extensions(self):
return []
| apache-2.0 |
haitdai/qtbase5-inprocess-surface-compositor-remote-display | myqt/qtbase/src/3rdparty/freetype/src/tools/docmaker/formatter.py | 515 | 4962 | # Formatter (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
from sources import *
from content import *
from utils import *
# This is the base Formatter class. Its purpose is to convert
# a content processor's data into specific documents (i.e., table of
# contents, global index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example,
# the file tohtml.py contains the definition of the HtmlFormatter sub-class
# used to output -- you guessed it -- HTML.
#
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( index_sort )
def add_identifier( self, name, block ):
if self.identifiers.has_key( name ):
# duplicate name!
sys.stderr.write( \
"WARNING: duplicate definition for '" + name + "' in " + \
block.location() + ", previous definition in " + \
self.identifiers[name].location() + "\n" )
else:
self.identifiers[name] = block
#
# Formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# Formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# Formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
block = self.identifiers[name]
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof
| gpl-3.0 |
woggle/mesos-old | third_party/boto-2.0b2/boto/sdb/db/sequence.py | 8 | 8168 | # Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import SDBResponseError
class SequenceGenerator(object):
"""Generic Sequence Generator object, this takes a single
string as the "sequence" and uses that to figure out
what the next value in a string is. For example
if you give "ABC" and pass in "A" it will give you "B",
and if you give it "C" it will give you "AA".
If you set "rollover" to True in the above example, passing
in "C" would give you "A" again.
The Sequence string can be a string or any iterable
that has the "index" function and is indexable.
"""
__name__ = "SequenceGenerator"
def __init__(self, sequence_string, rollover=False):
"""Create a new SequenceGenerator using the sequence_string
as how to generate the next item.
:param sequence_string: The string or list that explains
how to generate the next item in the sequence
:type sequence_string: str,iterable
:param rollover: Rollover instead of incrementing when
we hit the end of the sequence
:type rollover: bool
"""
self.sequence_string = sequence_string
self.sequence_length = len(sequence_string[0])
self.rollover = rollover
self.last_item = sequence_string[-1]
self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string)
def __call__(self, val, last=None):
"""Get the next value in the sequence"""
# If they pass us in a string that's not at least
# the lenght of our sequence, then return the
# first element in our sequence
if val == None or len(val) < self.sequence_length:
return self.sequence_string[0]
last_value = val[-self.sequence_length:]
if (not self.rollover) and (last_value == self.last_item):
val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value))
else:
val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value))
return val
def _inc(self, val):
"""Increment a single value"""
assert(len(val) == self.sequence_length)
return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)]
#
# Simple Sequence Functions
#
def increment_by_one(cv=None, lv=None):
if cv == None:
return 0
return cv + 1
def double(cv=None, lv=None):
if cv == None:
return 1
return cv * 2
def fib(cv=1, lv=0):
"""The fibonacci sequence, this incrementer uses the
last value"""
if cv == None:
cv = 1
if lv == None:
lv = 0
return cv + lv
increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
class Sequence(object):
"""A simple Sequence using the new SDB "Consistent" features
Based largly off of the "Counter" example from mitch garnaat:
http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py"""
def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None):
"""Create a new Sequence, using an optional function to
increment to the next number, by default we just increment by one.
Every parameter here is optional, if you don't specify any options
then you'll get a new SequenceGenerator with a random ID stored in the
default domain that increments by one and uses the default botoweb
environment
:param id: Optional ID (name) for this counter
:type id: str
:param domain_name: Optional domain name to use, by default we get this out of the
environment configuration
:type domain_name:str
:param fnc: Optional function to use for the incrementation, by default we just increment by one
There are several functions defined in this module.
Your function must accept "None" to get the initial value
:type fnc: function, str
:param init_val: Initial value, by default this is the first element in your sequence,
but you can pass in any value, even a string if you pass in a function that uses
strings instead of ints to increment
"""
self._db = None
self._value = None
self.last_value = None
self.domain_name = domain_name
self.id = id
if self.id == None:
import uuid
self.id = str(uuid.uuid4())
if init_val == None:
init_val = fnc(init_val)
self.val = init_val
self.item_type = type(fnc(None))
self.timestamp = None
# Allow us to pass in a full name to a function
if type(fnc) == str:
from boto.utils import find_class
fnc = find_class(fnc)
self.fnc = fnc
def set(self, val):
"""Set the value"""
import time
now = time.time()
expected_values = []
new_val = {}
new_val['timestamp'] = now
if self._value != None:
new_val['last_value'] = self._value
expected_values = ['current_value', str(self._value)]
new_val['current_value'] = val
try:
self.db.put_attributes(self.id, new_val, expected_values=expected_values)
self.timestamp = new_val['timestamp']
except SDBResponseError, e:
if e.status == 409:
raise ValueError, "Sequence out of sync"
else:
raise
def get(self):
"""Get the value"""
val = self.db.get_attributes(self.id, consistent_read=True)
if val and val.has_key('timestamp'):
self.timestamp = val['timestamp']
if val and val.has_key('current_value'):
self._value = self.item_type(val['current_value'])
if val.has_key("last_value") and val['last_value'] != None:
self.last_value = self.item_type(val['last_value'])
return self._value
val = property(get, set)
def __repr__(self):
return "%s('%s', '%s', '%s.%s', '%s')" % (
self.__class__.__name__,
self.id,
self.domain_name,
self.fnc.__module__, self.fnc.__name__,
self.val)
def _connect(self):
"""Connect to our domain"""
if not self._db:
if not self.domain_name:
import boto
sdb = boto.connect_sdb()
self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default"))
try:
self._db = sdb.get_domain(self.domain_name)
except SDBResponseError, e:
if e.status == 400:
self._db = sdb.create_domain(self.domain_name)
else:
raise
return self._db
db = property(_connect)
def next(self):
self.val = self.fnc(self.val, self.last_value)
return self.val
def delete(self):
"""Remove this sequence"""
self.db.delete_attributes(self.id)
def __del__(self):
self.delete()
| apache-2.0 |
psav/cfme_tests | cfme/dashboard.py | 4 | 16505 | # -*- coding: utf-8 -*-
import attr
import re
from cached_property import cached_property
from navmazing import NavigateToAttribute
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.timeutil import parsetime
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from cfme.utils.wait import wait_for
from widgetastic.utils import ParametrizedLocator, ParametrizedString, Parameter
from widgetastic.widget import ParametrizedView, Text, View, Widget, ConditionalSwitchableView
from widgetastic.xpath import quote
from widgetastic_patternfly import Button, Dropdown, Tab
from widgetastic_manageiq import Table
from .base.login import BaseLoggedInPage
# TODO: Move this into widgetastic_patternfly
class Kebab(Widget):
"""The so-called "kebab" widget of Patternfly.
<http://www.patternfly.org/pattern-library/widgets/#kebabs>
Args:
button_id: id of the button tag inside the kebab. If not specified, first kebab available
will be used
"""
ROOT = ParametrizedLocator('{@locator}')
UL = './ul[contains(@class, "dropdown-menu")]'
BUTTON = './button'
ITEM = './ul/li/a[normalize-space(.)={}]'
ITEMS = './ul/li/a'
def __init__(self, parent, button_id=None, logger=None):
super(Kebab, self).__init__(parent, logger=logger)
if button_id is not None:
self.locator = (
'.//div[contains(@class, "dropdown-kebab-pf") and ./button[@id={}]]'.format(
quote(button_id)))
else:
self.locator = './/div[contains(@class, "dropdown-kebab-pf") and ./button][1]'
@property
def is_opened(self):
"""Returns opened state of the kebab."""
return self.browser.is_displayed(self.UL)
@property
def items(self):
"""Lists all items in the kebab.
Returns:
:py:class:`list` of :py:class:`str`
"""
return [self.browser.text(item) for item in self.browser.elements(self.ITEMS)]
def open(self):
"""Open the kebab"""
if not self.is_opened:
self.browser.click(self.BUTTON)
def close(self):
"""Close the kebab"""
if self.is_opened:
self.browser.click(self.BUTTON)
def select(self, item, close=True):
"""Select a specific item from the kebab.
Args:
item: Item to be selected.
close: Whether to close the kebab after selection. If the item is a link, you may want
to set this to ``False``
"""
try:
self.open()
self.browser.click(self.ITEM.format(quote(item)))
finally:
if close:
self.close()
class DashboardView(BaseLoggedInPage):
"""View that represents the Intelligence/Dashboard."""
reset_button = Button(title="Reset Dashboard Widgets to the defaults")
def reset_widgets(self, cancel=False):
"""Clicks the reset button to reset widgets and handles the alert."""
self.browser.click(self.reset_button, ignore_ajax=True)
self.browser.handle_alert(cancel=cancel, wait=10.0)
self.browser.plugin.ensure_page_safe()
add_widget = Dropdown('Add a widget')
@View.nested
class zoomed(View): # noqa
"""Represents the zoomed modal panel"""
title = Text('.//div[@id="lightbox-panel"]//h2[contains(@class, "card-pf-title")]')
close = Text('.//div[@id="lightbox-panel"]//a[normalize-space(@title)="Close"]')
def ensure_zoom_closed(self):
if self.zoomed.title.is_displayed:
self.zoomed.close.click()
@ParametrizedView.nested
class dashboards(Tab, ParametrizedView): # noqa
PARAMETERS = ('title', )
ALL_LOCATOR = './/ul[contains(@class, "nav-tabs-pf")]/li/a'
COLUMN_LOCATOR = '//div[@id="col{}"]//h2'
tab_name = Parameter('title')
@classmethod
def all(cls, browser):
return [(browser.text(e), ) for e in browser.elements(cls.ALL_LOCATOR)]
def column_widget_names(self, column_index):
"""Returns names of widgets in column specified.
Args:
column_index: Position of the column. Numbered from 1!
Returns:
:py:class:`list` of :py:class:`str`
"""
return [
self.browser.text(e)
for e
in self.browser.elements(self.COLUMN_LOCATOR.format(column_index))]
@ParametrizedView.nested
class widgets(ParametrizedView): # noqa
PARAMETERS = ('title', )
ALL_LOCATOR = '//div[starts-with(@id, "w_")]//h2[contains(@class, "card-pf-title")]'
BLANK_SLATE = './/div[contains(@class, "blank-slate-pf")]//h1'
CHART = './div/div/div[starts-with(@id, "miq_widgetchart_")]'
RSS = './div/div[contains(@class, "rss_widget")]'
RSS_TABLE = './div[./div[contains(@class, "rss_widget")]]/div/table'
TABLE = './div/table|./div/div/table'
MC = (
'.//div[contains(@class, "mc")]/*[1]|.//div[starts-with(@id, "dd_w") '
'and contains(@id, "_box")]/*[1]')
ROOT = ParametrizedLocator(
'.//div[starts-with(@id, "w_") and .//h2[contains(@class, "card-pf-title")'
' and normalize-space(.)={title|quote}]]')
title = Text('.//h2[contains(@class, "card-pf-title")]')
menu = Kebab(button_id=ParametrizedString('btn_{@widget_id}'))
contents = ConditionalSwitchableView(reference='content_type')
# Unsupported reading yet
contents.register(None, default=True, widget=Widget())
contents.register('chart', widget=Widget())
# Reading supported
contents.register('table', widget=Table(TABLE))
contents.register('rss', widget=Table(RSS_TABLE))
footer = Text('.//div[contains(@class, "card-pf-footer")]')
@property
def column(self):
"""Returns the column position of this widget. Numbered from 1!"""
parent = self.browser.element('..')
try:
parent_id = self.browser.get_attribute('id', parent).strip()
return int(re.sub(r'^col(\d+)$', '\\1', parent_id))
except (ValueError, TypeError, AttributeError):
raise ValueError('Could not get the column index of widget')
@property
def minimized(self):
return not self.browser.is_displayed(self.MC)
@cached_property
def widget_id(self):
id_attr = self.browser.get_attribute('id', self)
return int(id_attr.rsplit('_', 1)[-1])
@cached_property
def content_type(self):
if self.browser.elements(self.BLANK_SLATE):
# No data yet
return None
elif self.browser.elements(self.RSS):
return 'rss'
elif self.browser.is_displayed(self.CHART):
return 'chart'
elif self.browser.is_displayed(self.TABLE):
return 'table'
else:
return None
@property
def blank(self):
return bool(self.browser.elements(self.BLANK_SLATE))
@classmethod
def all(cls, browser):
return [(browser.text(e), ) for e in browser.elements(cls.ALL_LOCATOR)]
@property
def is_displayed(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Cloud Intel', 'Dashboard'])
class ParticularDashboardView(DashboardView):
@property
def is_displayed(self):
return (
super(ParticularDashboardView, self).is_displayed and
self.dashboards(title=self.obj.name).is_active)
@attr.s
class DashboardWidget(BaseEntity):
"""Represents a single UI dashboard widget.
Args:
name: Name of the widget as displayed in the title.
widget_collection: The widget collection linked to a dashboard
"""
name = attr.ib()
@property
def dashboard(self):
return self.parent.parent
@property
def widget_view(self):
"""Returns a view of the particular widget."""
return self.dashboard.dashboard_view.widgets(title=self.name)
@property
def last_in_column(self):
"""Returns whether this widget is the last in its column"""
try:
return (
self.widget_view.parent.column_widget_names(self.widget_view.column)[-1] ==
self.name)
except IndexError:
return False
@property
def footer(self):
"""Return parsed footer value"""
self.close_zoom()
cleaned = [
x.strip()
for x
in self.widget_view.footer.text.encode("utf-8").strip().split("|")
]
result = {}
for item in cleaned:
name, time = item.split(" ", 1)
time = time.strip()
if time.lower() == "never":
result[name.strip().lower()] = None
else:
try:
result[name.strip().lower()] = parsetime.from_american_minutes(time.strip())
except ValueError:
result[name.strip().lower()] = parsetime.from_long_date_format(time.strip())
return result
@property
def time_updated(self):
"""Returns a datetime when the widget was last updated."""
return self.footer["updated"]
@property
def time_next(self):
"""Returns a datetime when the widget will be updated."""
return self.footer["next"]
@property
def minimized(self):
"""Returns whether the widget is minimized or not."""
self.close_zoom()
return self.widget_view.minimized
@property
def blank(self):
"""Returns whether the widget has not been generated before."""
self.close_zoom()
return self.widget_view.blank
@property
def content_type(self):
"""Returns the type of content of this widget"""
self.close_zoom()
return self.widget_view.content_type
@property
def contents(self):
"""Returns the WT widget with contents of this dashboard widget."""
self.close_zoom()
return self.widget_view.contents
def minimize(self):
"""Minimize this widget."""
self.close_zoom()
view = self.widget_view
if 'Maximize' not in view.menu.items and 'Minimize' not in view.menu.items:
raise ValueError('The widget {} cannot be maximized or minimized'.format(self.name))
if 'Minimize' in view.menu.items:
view.menu.select('Minimize')
def restore(self):
"""Maximize this widget."""
self.close_zoom()
view = self.widget_view
view.parent.parent.ensure_zoom_closed()
if 'Maximize' not in view.menu.items and 'Minimize' not in view.menu.items:
raise ValueError('The widget {} cannot be maximized or minimized'.format(self.name))
if 'Maximize' in view.menu.items:
view.menu.select('Maximize')
def remove(self):
"""Remove this widget."""
self.close_zoom()
view = self.widget_view
view.menu.select('Remove Widget')
@property
def is_zoomed(self):
"""Returns whether this widget is zoomed now."""
view = self.create_view(DashboardView)
return view.zoomed.title.is_displayed and view.zoomed.title == self.name
def zoom(self):
"""Zoom this widget in."""
if not self.is_zoomed:
self.close_zoom()
view = self.widget_view
view.menu.select('Zoom in', close=False)
wait_for(lambda: self.is_zoomed, delay=0.2, timeout=10)
@property
def can_zoom(self):
"""Returns whether this widget can be zoomed."""
self.close_zoom()
view = self.widget_view
return 'Zoom in' in view.menu.items
def close_zoom(self):
"""Close zoom. Works theoretically for any widget, it is just exposed here."""
view = self.create_view(DashboardView)
if view.is_displayed:
view.ensure_zoom_closed()
@attr.s
class DashboardWidgetCollection(BaseCollection):
ENTITY = DashboardWidget
@property
def dashboard_view(self):
return self.parent.dashboard_view
def all(self, content_type=None): # widgets
view = self.dashboard_view
result = []
# TODO: Idiomatize the following line
for (widget_name, ) in view.widgets.view_class.all(view.browser):
w = self.instantiate(widget_name)
if content_type is None or w.content_type == content_type:
result.append(self.instantiate(widget_name))
return result
def reset(self, cancel=False):
"""Clicks the Reset widgets button."""
navigate_to(self.parent, 'Details').reset_widgets()
@attr.s
class Dashboard(BaseEntity):
name = attr.ib()
_collections = {'widgets': DashboardWidgetCollection}
@property
def dashboard_view(self):
"""Returns a view pointed at a particular dashboard."""
return navigate_to(self, 'Details').dashboards(title=self.name)
def drag_and_drop(self, dragged_widget_or_name, dropped_widget_or_name):
"""Drags and drops widgets onto each other."""
if isinstance(dragged_widget_or_name, DashboardWidget):
dragged_widget_or_name = dragged_widget_or_name.name
if isinstance(dropped_widget_or_name, DashboardWidget):
dropped_widget_object = dropped_widget_or_name
dropped_widget_or_name = dropped_widget_or_name.name
else:
dropped_widget_object = self.collections.widgets.instantiate(dropped_widget_or_name)
view = self.dashboard_view
first_widget = view.widgets(title=dragged_widget_or_name).title
if dropped_widget_object.last_in_column:
# Different behaviour
dropped_widget = view.widgets(title=dropped_widget_or_name)
middle = view.browser.middle_of(dropped_widget)
position = view.browser.location_of(dropped_widget)
size = view.browser.size_of(dropped_widget)
drop_x = middle.x
drop_y = position.x + size.height + 10
view.browser.drag_and_drop_to(first_widget, to_x=drop_x, to_y=drop_y)
else:
second_widget = view.widgets(title=dropped_widget_or_name).footer
view.browser.drag_and_drop(first_widget, second_widget)
view.browser.plugin.ensure_page_safe()
@attr.s
class DashboardCollection(BaseCollection):
"""Represents the Dashboard page and can jump around various dashboards present."""
ENTITY = Dashboard
@property
def default(self):
"""Returns an instance of the ``Default Dashboard``"""
return self.instantiate('Default Dashboard')
def all(self):
view = navigate_to(self.appliance.server, 'Dashboard')
result = []
# TODO: Idiomatize the following line
for (dashboard_name, ) in view.dashboards.view_class.all(view.browser):
result.append(self.instantiate(dashboard_name))
return result
def refresh(self):
"""Refreshes the dashboard view by forcibly clicking the navigation again."""
view = navigate_to(self.appliance.server, 'Dashboard')
view.navigation.select('Cloud Intel', 'Dashboard')
@property
def zoomed_name(self):
"""Grabs the name of the currently zoomed widget."""
view = navigate_to(self.appliance.server, 'Dashboard')
if not view.zoomed.is_displayed:
return None
return view.zoomed.title.text
def close_zoom(self):
"""Closes any zoomed widget."""
navigate_to(self.appliance.server, 'Dashboard').ensure_zoom_closed()
@navigator.register(Dashboard, 'Details')
class DashboardDetails(CFMENavigateStep):
VIEW = ParticularDashboardView
prerequisite = NavigateToAttribute('appliance.server', 'Dashboard')
def step(self):
self.prerequisite_view.dashboards(title=self.obj.name).select()
| gpl-2.0 |
javierTerry/odoo | addons/hw_escpos/escpos/exceptions.py | 170 | 2884 | """ ESC/POS Exceptions classes """
import os
class Error(Exception):
""" Base class for ESC/POS errors """
def __init__(self, msg, status=None):
Exception.__init__(self)
self.msg = msg
self.resultcode = 1
if status is not None:
self.resultcode = status
def __str__(self):
return self.msg
# Result/Exit codes
# 0 = success
# 10 = No Barcode type defined
# 20 = Barcode size values are out of range
# 30 = Barcode text not supplied
# 40 = Image height is too large
# 50 = No string supplied to be printed
# 60 = Invalid pin to send Cash Drawer pulse
class BarcodeTypeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 10
def __str__(self):
return "No Barcode type is defined"
class BarcodeSizeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 20
def __str__(self):
return "Barcode size is out of range"
class BarcodeCodeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 30
def __str__(self):
return "Code was not supplied"
class ImageSizeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 40
def __str__(self):
return "Image height is longer than 255px and can't be printed"
class TextError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 50
def __str__(self):
return "Text string must be supplied to the text() method"
class CashDrawerError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 60
def __str__(self):
return "Valid pin must be set to send pulse"
class NoStatusError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 70
def __str__(self):
return "Impossible to get status from the printer"
class TicketNotPrinted(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 80
def __str__(self):
return "A part of the ticket was not been printed"
class NoDeviceError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 90
def __str__(self):
return "Impossible to find the printer Device"
class HandleDeviceError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 100
def __str__(self):
return "Impossible to handle device"
| agpl-3.0 |
GodotNativeTools/godot-cpp | binding_generator.py | 1 | 27665 | #!/usr/bin/env python
import json
# comment.
# Convenience function for using template get_node
def correct_method_name(method_list):
for method in method_list:
if method["name"] == "get_node":
method["name"] = "get_node_internal"
classes = []
def generate_bindings(path, use_template_get_node):
global classes
classes = json.load(open(path))
icalls = set()
for c in classes:
# print c['name']
used_classes = get_used_classes(c)
if use_template_get_node and c["name"] == "Node":
correct_method_name(c["methods"])
header = generate_class_header(used_classes, c, use_template_get_node)
impl = generate_class_implementation(icalls, used_classes, c, use_template_get_node)
header_file = open("include/gen/" + strip_name(c["name"]) + ".hpp", "w+")
header_file.write(header)
source_file = open("src/gen/" + strip_name(c["name"]) + ".cpp", "w+")
source_file.write(impl)
icall_header_file = open("include/gen/__icalls.hpp", "w+")
icall_header_file.write(generate_icall_header(icalls))
register_types_file = open("src/gen/__register_types.cpp", "w+")
register_types_file.write(generate_type_registry(classes))
init_method_bindings_file = open("src/gen/__init_method_bindings.cpp", "w+")
init_method_bindings_file.write(generate_init_method_bindings(classes))
def is_reference_type(t):
for c in classes:
if c['name'] != t:
continue
if c['is_reference']:
return True
return False
def make_gdnative_type(t, ref_allowed):
if is_enum(t):
return remove_enum_prefix(t) + " "
elif is_class_type(t):
if is_reference_type(t) and ref_allowed:
return "Ref<" + strip_name(t) + "> "
else:
return strip_name(t) + " *"
else:
if t == "int":
return "int64_t "
if t == "float" or t == "real":
return "real_t "
return strip_name(t) + " "
def generate_class_header(used_classes, c, use_template_get_node):
source = []
source.append("#ifndef GODOT_CPP_" + strip_name(c["name"]).upper() + "_HPP")
source.append("#define GODOT_CPP_" + strip_name(c["name"]).upper() + "_HPP")
source.append("")
source.append("")
source.append("#include <gdnative_api_struct.gen.h>")
source.append("#include <stdint.h>")
source.append("")
source.append("#include <core/CoreTypes.hpp>")
class_name = strip_name(c["name"])
# Ref<T> is not included in object.h in Godot either,
# so don't include it here because it's not needed
if class_name != "Object" and class_name != "Reference":
source.append("#include <core/Ref.hpp>")
ref_allowed = True
else:
source.append("#include <core/TagDB.hpp>")
ref_allowed = False
included = []
for used_class in used_classes:
if is_enum(used_class) and is_nested_type(used_class):
used_class_name = remove_enum_prefix(extract_nested_type(used_class))
if used_class_name not in included:
included.append(used_class_name)
source.append("#include \"" + used_class_name + ".hpp\"")
elif is_enum(used_class) and is_nested_type(used_class) and not is_nested_type(used_class, class_name):
used_class_name = remove_enum_prefix(used_class)
if used_class_name not in included:
included.append(used_class_name)
source.append("#include \"" + used_class_name + ".hpp\"")
source.append("")
if c["base_class"] != "":
source.append("#include \"" + strip_name(c["base_class"]) + ".hpp\"")
source.append("namespace godot {")
source.append("")
for used_type in used_classes:
if is_enum(used_type) or is_nested_type(used_type, class_name):
continue
else:
source.append("class " + strip_name(used_type) + ";")
source.append("")
vararg_templates = ""
# generate the class definition here
source.append("class " + class_name + (" : public _Wrapped" if c["base_class"] == "" else (" : public " + strip_name(c["base_class"])) ) + " {")
if c["base_class"] == "":
source.append("public: enum { ___CLASS_IS_SCRIPT = 0, };")
source.append("")
source.append("private:")
if c["singleton"]:
source.append("\tstatic " + class_name + " *_singleton;")
source.append("")
source.append("\t" + class_name + "();")
source.append("")
# Generate method table
source.append("\tstruct ___method_bindings {")
for method in c["methods"]:
source.append("\t\tgodot_method_bind *mb_" + method["name"] + ";")
source.append("\t};")
source.append("\tstatic ___method_bindings ___mb;")
source.append("\tstatic void *_detail_class_tag;")
source.append("")
source.append("public:")
source.append("\tstatic void ___init_method_bindings();")
# class id from core engine for casting
source.append("\tinline static size_t ___get_id() { return (size_t)_detail_class_tag; }")
source.append("")
if c["singleton"]:
source.append("\tstatic inline " + class_name + " *get_singleton()")
source.append("\t{")
source.append("\t\tif (!" + class_name + "::_singleton) {")
source.append("\t\t\t" + class_name + "::_singleton = new " + class_name + ";")
source.append("\t\t}")
source.append("\t\treturn " + class_name + "::_singleton;")
source.append("\t}")
source.append("")
# godot::api->godot_global_get_singleton((char *) \"" + strip_name(c["name"]) + "\");"
# ___get_class_name
source.append("\tstatic inline const char *___get_class_name() { return (const char *) \"" + strip_name(c["name"]) + "\"; }")
source.append("\tstatic inline Object *___get_from_variant(Variant a) { godot_object *o = (godot_object*) a; return (o) ? (Object *) godot::nativescript_1_1_api->godot_nativescript_get_instance_binding_data(godot::_RegisterState::language_index, o) : nullptr; }")
enum_values = []
source.append("\n\t// enums")
for enum in c["enums"]:
source.append("\tenum " + strip_name(enum["name"]) + " {")
for value in enum["values"]:
source.append("\t\t" + remove_nested_type_prefix(value) + " = " + str(enum["values"][value]) + ",")
enum_values.append(value)
source.append("\t};")
source.append("\n\t// constants")
for name in c["constants"]:
if name not in enum_values:
source.append("\tconst static int " + name + " = " + str(c["constants"][name]) + ";")
if c["instanciable"]:
source.append("")
source.append("")
source.append("\tstatic " + class_name + " *_new();")
source.append("\n\t// methods")
if class_name == "Object":
source.append("#ifndef GODOT_CPP_NO_OBJECT_CAST")
source.append("\ttemplate<class T>")
source.append("\tstatic T *cast_to(const Object *obj);")
source.append("#endif")
source.append("")
for method in c["methods"]:
method_signature = ""
# TODO decide what to do about virtual methods
# method_signature += "virtual " if method["is_virtual"] else ""
method_signature += make_gdnative_type(method["return_type"], ref_allowed)
method_name = escape_cpp(method["name"])
method_signature += method_name + "("
has_default_argument = False
method_arguments = ""
for i, argument in enumerate(method["arguments"]):
method_signature += "const " + make_gdnative_type(argument["type"], ref_allowed)
argument_name = escape_cpp(argument["name"])
method_signature += argument_name
method_arguments += argument_name
# default arguments
def escape_default_arg(_type, default_value):
if _type == "Color":
return "Color(" + default_value + ")"
if _type == "bool" or _type == "int":
return default_value.lower()
if _type == "Array":
return "Array()"
if _type in ["PoolVector2Array", "PoolStringArray", "PoolVector3Array", "PoolColorArray", "PoolIntArray", "PoolRealArray"]:
return _type + "()"
if _type == "Vector2":
return "Vector2" + default_value
if _type == "Vector3":
return "Vector3" + default_value
if _type == "Transform":
return "Transform()"
if _type == "Transform2D":
return "Transform2D()"
if _type == "Rect2":
return "Rect2" + default_value
if _type == "Variant":
return "Variant()" if default_value == "Null" else default_value
if _type == "String":
return "\"" + default_value + "\""
if _type == "RID":
return "RID()"
if default_value == "Null" or default_value == "[Object:null]":
return "nullptr"
return default_value
if argument["has_default_value"] or has_default_argument:
method_signature += " = " + escape_default_arg(argument["type"], argument["default_value"])
has_default_argument = True
if i != len(method["arguments"]) - 1:
method_signature += ", "
method_arguments += ","
if method["has_varargs"]:
if len(method["arguments"]) > 0:
method_signature += ", "
method_arguments += ", "
vararg_templates += "\ttemplate <class... Args> " + method_signature + "Args... args){\n\t\treturn " + method_name + "(" + method_arguments + "Array::make(args...));\n\t}\n"""
method_signature += "const Array& __var_args = Array()"
method_signature += ")" + (" const" if method["is_const"] else "")
source.append("\t" + method_signature + ";")
source.append(vararg_templates)
if use_template_get_node and class_name == "Node":
# Extra definition for template get_node that calls the renamed get_node_internal; has a default template parameter for backwards compatibility.
source.append("\ttemplate <class T = Node>")
source.append("\tT *get_node(const NodePath path) const {")
source.append("\t\treturn Object::cast_to<T>(get_node_internal(path));")
source.append("\t}")
source.append("};")
source.append("")
# ...And a specialized version so we don't unnecessarily cast when using the default.
source.append("template <>")
source.append("inline Node *Node::get_node<Node>(const NodePath path) const {")
source.append("\treturn get_node_internal(path);")
source.append("}")
source.append("")
else:
source.append("};")
source.append("")
source.append("}")
source.append("")
source.append("#endif")
return "\n".join(source)
def generate_class_implementation(icalls, used_classes, c, use_template_get_node):
class_name = strip_name(c["name"])
ref_allowed = class_name != "Object" and class_name != "Reference"
source = []
source.append("#include \"" + class_name + ".hpp\"")
source.append("")
source.append("")
source.append("#include <core/GodotGlobal.hpp>")
source.append("#include <core/CoreTypes.hpp>")
source.append("#include <core/Ref.hpp>")
source.append("#include <core/Godot.hpp>")
source.append("")
source.append("#include \"__icalls.hpp\"")
source.append("")
source.append("")
for used_class in used_classes:
if is_enum(used_class):
continue
else:
source.append("#include \"" + strip_name(used_class) + ".hpp\"")
source.append("")
source.append("")
source.append("namespace godot {")
core_object_name = "this"
source.append("")
source.append("")
if c["singleton"]:
source.append("" + class_name + " *" + class_name + "::_singleton = NULL;")
source.append("")
source.append("")
# FIXME Test if inlining has a huge impact on binary size
source.append(class_name + "::" + class_name + "() {")
source.append("\t_owner = godot::api->godot_global_get_singleton((char *) \"" + strip_name(c["name"]) + "\");")
source.append("}")
source.append("")
source.append("")
# Method table initialization
source.append(class_name + "::___method_bindings " + class_name + "::___mb = {};")
source.append("")
source.append("void *" + class_name + "::_detail_class_tag = nullptr;")
source.append("")
source.append("void " + class_name + "::___init_method_bindings() {")
for method in c["methods"]:
source.append("\t___mb.mb_" + method["name"] + " = godot::api->godot_method_bind_get_method(\"" + c["name"] + "\", \"" + ("get_node" if use_template_get_node and method["name"] == "get_node_internal" else method["name"]) + "\");")
source.append("\tgodot_string_name class_name;")
source.append("\tgodot::api->godot_string_name_new_data(&class_name, \"" + c["name"] + "\");")
source.append("\t_detail_class_tag = godot::core_1_2_api->godot_get_class_tag(&class_name);")
source.append("\tgodot::api->godot_string_name_destroy(&class_name);")
source.append("}")
source.append("")
if c["instanciable"]:
source.append(class_name + " *" + strip_name(c["name"]) + "::_new()")
source.append("{")
source.append("\treturn (" + class_name + " *) godot::nativescript_1_1_api->godot_nativescript_get_instance_binding_data(godot::_RegisterState::language_index, godot::api->godot_get_class_constructor((char *)\"" + c["name"] + "\")());")
source.append("}")
for method in c["methods"]:
method_signature = ""
method_signature += make_gdnative_type(method["return_type"], ref_allowed)
method_signature += strip_name(c["name"]) + "::" + escape_cpp(method["name"]) + "("
for i, argument in enumerate(method["arguments"]):
method_signature += "const " + make_gdnative_type(argument["type"], ref_allowed)
method_signature += escape_cpp(argument["name"])
if i != len(method["arguments"]) - 1:
method_signature += ", "
if method["has_varargs"]:
if len(method["arguments"]) > 0:
method_signature += ", "
method_signature += "const Array& __var_args"
method_signature += ")" + (" const" if method["is_const"] else "")
source.append(method_signature + " {")
if method["name"] == "free":
# dirty hack because Object::free is marked virtual but doesn't actually exist...
source.append("\tgodot::api->godot_object_destroy(_owner);")
source.append("}")
source.append("")
continue
return_statement = ""
return_type_is_ref = is_reference_type(method["return_type"]) and ref_allowed
if method["return_type"] != "void":
if is_class_type(method["return_type"]):
if is_enum(method["return_type"]):
return_statement += "return (" + remove_enum_prefix(method["return_type"]) + ") "
elif return_type_is_ref:
return_statement += "return Ref<" + strip_name(method["return_type"]) + ">::__internal_constructor(";
else:
return_statement += "return " + ("(" + strip_name(method["return_type"]) + " *) " if is_class_type(method["return_type"]) else "")
else:
return_statement += "return "
def get_icall_type_name(name):
if is_enum(name):
return "int"
if is_class_type(name):
return "Object"
return name
if method["has_varargs"]:
if len(method["arguments"]) != 0:
source.append("\tVariant __given_args[" + str(len(method["arguments"])) + "];")
for i, argument in enumerate(method["arguments"]):
source.append("\tgodot::api->godot_variant_new_nil((godot_variant *) &__given_args[" + str(i) + "]);")
source.append("")
for i, argument in enumerate(method["arguments"]):
source.append("\t__given_args[" + str(i) + "] = " + escape_cpp(argument["name"]) + ";")
source.append("")
size = ""
if method["has_varargs"]:
size = "(__var_args.size() + " + str(len(method["arguments"])) + ")"
else:
size = "(" + str(len(method["arguments"])) + ")"
source.append("\tgodot_variant **__args = (godot_variant **) alloca(sizeof(godot_variant *) * " + size + ");")
source.append("")
for i, argument in enumerate(method["arguments"]):
source.append("\t__args[" + str(i) + "] = (godot_variant *) &__given_args[" + str(i) + "];")
source.append("")
if method["has_varargs"]:
source.append("\tfor (int i = 0; i < __var_args.size(); i++) {")
source.append("\t\t__args[i + " + str(len(method["arguments"])) + "] = (godot_variant *) &((Array &) __var_args)[i];")
source.append("\t}")
source.append("")
source.append("\tVariant __result;")
source.append("\t*(godot_variant *) &__result = godot::api->godot_method_bind_call(___mb.mb_" + method["name"] + ", ((const Object *) " + core_object_name + ")->_owner, (const godot_variant **) __args, " + size + ", nullptr);")
source.append("")
if is_class_type(method["return_type"]):
source.append("\tObject *obj = Object::___get_from_variant(__result);")
source.append("\tif (obj->has_method(\"reference\"))")
source.append("\t\tobj->callv(\"reference\", Array());")
source.append("")
for i, argument in enumerate(method["arguments"]):
source.append("\tgodot::api->godot_variant_destroy((godot_variant *) &__given_args[" + str(i) + "]);")
source.append("")
if method["return_type"] != "void":
cast = ""
if is_class_type(method["return_type"]):
if return_type_is_ref:
cast += "Ref<" + strip_name(method["return_type"]) + ">::__internal_constructor(__result);"
else:
cast += "(" + strip_name(method["return_type"]) + " *) " + strip_name(method["return_type"] + "::___get_from_variant(") + "__result);"
else:
cast += "__result;"
source.append("\treturn " + cast)
else:
args = []
for arg in method["arguments"]:
args.append(get_icall_type_name(arg["type"]))
icall_ret_type = get_icall_type_name(method["return_type"])
icall_sig = tuple((icall_ret_type, tuple(args)))
icalls.add(icall_sig)
icall_name = get_icall_name(icall_sig)
return_statement += icall_name + "(___mb.mb_" + method["name"] + ", (const Object *) " + core_object_name
for arg in method["arguments"]:
arg_is_ref = is_reference_type(arg["type"]) and ref_allowed
return_statement += ", " + escape_cpp(arg["name"]) + (".ptr()" if arg_is_ref else "")
return_statement += ")"
if return_type_is_ref:
return_statement += ")"
source.append("\t" + return_statement + ";")
source.append("}")
source.append("")
source.append("}")
return "\n".join(source)
def generate_icall_header(icalls):
source = []
source.append("#ifndef GODOT_CPP__ICALLS_HPP")
source.append("#define GODOT_CPP__ICALLS_HPP")
source.append("")
source.append("#include <gdnative_api_struct.gen.h>")
source.append("#include <stdint.h>")
source.append("")
source.append("#include <core/GodotGlobal.hpp>")
source.append("#include <core/CoreTypes.hpp>")
source.append("#include \"Object.hpp\"")
source.append("")
source.append("")
source.append("namespace godot {")
source.append("")
for icall in icalls:
ret_type = icall[0]
args = icall[1]
method_signature = "static inline "
method_signature += get_icall_return_type(ret_type) + get_icall_name(icall) + "(godot_method_bind *mb, const Object *inst"
for i, arg in enumerate(args):
method_signature += ", const "
if is_core_type(arg):
method_signature += arg + "&"
elif arg == "int":
method_signature += "int64_t "
elif arg == "float":
method_signature += "double "
elif is_primitive(arg):
method_signature += arg + " "
else:
method_signature += "Object *"
method_signature += "arg" + str(i)
method_signature += ")"
source.append(method_signature + " {")
if ret_type != "void":
source.append("\t" + ("godot_object *" if is_class_type(ret_type) else get_icall_return_type(ret_type)) + "ret;")
if is_class_type(ret_type):
source.append("\tret = nullptr;")
source.append("\tconst void *args[" + ("1" if len(args) == 0 else "") + "] = {")
for i, arg in enumerate(args):
wrapped_argument = "\t\t"
if is_primitive(arg) or is_core_type(arg):
wrapped_argument += "(void *) &arg" + str(i)
else:
wrapped_argument += "(void *) (arg" + str(i) + ") ? arg" + str(i) + "->_owner : nullptr"
wrapped_argument += ","
source.append(wrapped_argument)
source.append("\t};")
source.append("")
source.append("\tgodot::api->godot_method_bind_ptrcall(mb, inst->_owner, args, " + ("nullptr" if ret_type == "void" else "&ret") + ");")
if ret_type != "void":
if is_class_type(ret_type):
source.append("\tif (ret) {")
source.append("\t\treturn (Object *) godot::nativescript_1_1_api->godot_nativescript_get_instance_binding_data(godot::_RegisterState::language_index, ret);")
source.append("\t}")
source.append("")
source.append("\treturn (Object *) ret;")
else:
source.append("\treturn ret;")
source.append("}")
source.append("")
source.append("}")
source.append("")
source.append("#endif")
return "\n".join(source)
def generate_type_registry(classes):
source = []
source.append("#include \"TagDB.hpp\"")
source.append("#include <typeinfo>")
source.append("\n")
for c in classes:
source.append("#include <" + strip_name(c["name"]) + ".hpp>")
source.append("")
source.append("")
source.append("namespace godot {")
source.append("void ___register_types()")
source.append("{")
for c in classes:
class_name = strip_name(c["name"])
base_class_name = strip_name(c["base_class"])
class_type_hash = "typeid(" + class_name + ").hash_code()"
base_class_type_hash = "typeid(" + base_class_name + ").hash_code()"
if base_class_name == "":
base_class_type_hash = "0"
source.append("\tgodot::_TagDB::register_global_type(\"" + c["name"] + "\", " + class_type_hash + ", " + base_class_type_hash + ");")
source.append("}")
source.append("")
source.append("}")
return "\n".join(source)
def generate_init_method_bindings(classes):
source = []
for c in classes:
source.append("#include <" + strip_name(c["name"]) + ".hpp>")
source.append("")
source.append("")
source.append("namespace godot {")
source.append("void ___init_method_bindings()")
source.append("{")
for c in classes:
class_name = strip_name(c["name"])
source.append("\t" + strip_name(c["name"]) + "::___init_method_bindings();")
source.append("}")
source.append("")
source.append("}")
return "\n".join(source)
def get_icall_return_type(t):
if is_class_type(t):
return "Object *"
if t == "int":
return "int64_t "
if t == "float" or t == "real":
return "double "
return t + " "
def get_icall_name(sig):
ret_type = sig[0]
args = sig[1]
name = "___godot_icall_"
name += strip_name(ret_type)
for arg in args:
name += "_" + strip_name(arg)
return name
def get_used_classes(c):
classes = []
for method in c["methods"]:
if is_class_type(method["return_type"]) and not (method["return_type"] in classes):
classes.append(method["return_type"])
for arg in method["arguments"]:
if is_class_type(arg["type"]) and not (arg["type"] in classes):
classes.append(arg["type"])
return classes
def strip_name(name):
if len(name) == 0:
return name
if name[0] == '_':
return name[1:]
return name
def extract_nested_type(nested_type):
return strip_name(nested_type[:nested_type.find("::")])
def remove_nested_type_prefix(name):
return name if name.find("::") == -1 else strip_name(name[name.find("::") + 2:])
def remove_enum_prefix(name):
return strip_name(name[name.find("enum.") + 5:])
def is_nested_type(name, type = ""):
return name.find(type + "::") != -1
def is_enum(name):
return name.find("enum.") == 0
def is_class_type(name):
return not is_core_type(name) and not is_primitive(name)
def is_core_type(name):
core_types = ["Array",
"Basis",
"Color",
"Dictionary",
"Error",
"NodePath",
"Plane",
"PoolByteArray",
"PoolIntArray",
"PoolRealArray",
"PoolStringArray",
"PoolVector2Array",
"PoolVector3Array",
"PoolColorArray",
"PoolIntArray",
"PoolRealArray",
"Quat",
"Rect2",
"AABB",
"RID",
"String",
"Transform",
"Transform2D",
"Variant",
"Vector2",
"Vector3"]
return name in core_types
def is_primitive(name):
core_types = ["int", "bool", "real", "float", "void"]
return name in core_types
def escape_cpp(name):
escapes = {
"class": "_class",
"char": "_char",
"short": "_short",
"bool": "_bool",
"int": "_int",
"default": "_default",
"case": "_case",
"switch": "_switch",
"export": "_export",
"template": "_template",
"new": "new_",
"operator": "_operator",
"typename": "_typename"
}
if name in escapes:
return escapes[name]
return name
| mit |
youprofit/kivy | kivy/modules/console.py | 23 | 34178 | # coding=utf-8
"""
Console
=======
.. versionadded:: 1.9.1
Reboot of the old inspector, designed to be modular and keep concerns separated.
It also have a addons architecture that allow you to add a button, panel, or
more in the Console itself.
.. warning::
This module works, but might fail in some cases. Please contribute!
Usage
-----
For normal module usage, please see the :mod:`~kivy.modules` documentation::
python main.py -m console
Mouse navigation
----------------
When "Select" button is activated, you can:
- tap once on a widget to select it without leaving inspect mode
- double tap on a widget to select and leave inspect mode (then you can
manipulate the widget again)
Keyboard navigation
-------------------
- "Ctrl + e": toggle console
- "Escape": cancel widget lookup, then hide inspector view
- "Top": select the parent widget
- "Down": select the first children of the current selected widget
- "Left": select the previous following sibling
- "Right": select the next following sibling
Additionnal informations
------------------------
Some properties can be edited live. However, due to the delayed usage of
some properties, it might crash if you don't handle all the cases.
Addons
------
Addons must be added to `Console.addons` before the first Clock tick of the
application, or before the create_console is called. You cannot add addons on
the fly currently. Addons are quite cheap until the Console is activated. Panel
are even cheaper, nothing is done until the user select it.
By default, we provide multiple addons activated by default:
- ConsoleAddonFps: display the FPS at the top-right
- ConsoleAddonSelect: activate the selection mode
- ConsoleAddonBreadcrumb: display the hierarchy of the current widget at the
bottom
- ConsoleAddonWidgetTree: panel to display the widget tree of the application
- ConsoleAddonWidgetPanel: panel to display the properties of the selected
widget
If you need to add custom widget in the Console, please use either
:class:`ConsoleButton`, :class:`ConsoleToggleButton` or :class:`ConsoleLabel`
An addon must inherit from the :class:`ConsoleAddon` class.
For example, here is a simple addon for displaying the FPS at the top/right
of the Console::
from kivy.modules.console import Console, ConsoleAddon
class ConsoleAddonFps(ConsoleAddon):
def init(self):
self.lbl = ConsoleLabel(text="0 Fps")
self.console.add_toolbar_widget(self.lbl, right=True)
def activate(self):
Clock.schedule_interval(self.update_fps, 1 / 2.)
def deactivated(self):
Clock.unschedule(self.update_fps)
def update_fps(self, *args):
fps = Clock.get_fps()
self.lbl.text = "{} Fps".format(int(fps))
Console.register_addon(ConsoleAddonFps)
You can create addon that adds panels. Panel activation/deactivation are not
tied to the addon activation/deactivation, but on some cases, you can use the
same callback for deactivating the addon and the panel. Here is a simple About
panel addon::
from kivy.modules.console import Console, ConsoleAddon, ConsoleLabel
class ConsoleAddonAbout(ConsoleAddon):
def init(self):
self.console.add_panel("About", self.panel_activate,
self.panel_deactivate)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def panel_deactivate(self):
self.console.unbind(widget=self.update_content)
def deactivate(self):
self.panel_deactivate()
def update_content(self, *args):
widget = self.console.widget
if not widget:
return
text = "Selected widget is: {!r}".format(widget)
lbl = ConsoleLabel(text=text)
self.console.set_content(lbl)
Console.register_addon(ConsoleAddonAbout)
"""
__all__ = ("start", "stop", "create_console", "Console", "ConsoleAddon",
"ConsoleButton", "ConsoleToggleButton", "ConsoleLabel")
import kivy
kivy.require('1.9.0')
import weakref
from functools import partial
from itertools import chain
from kivy.logger import Logger
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.image import Image
from kivy.uix.treeview import TreeViewNode, TreeView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.modalview import ModalView
from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix
from kivy.graphics.context_instructions import Transform
from kivy.graphics.transformation import Matrix
from kivy.properties import (ObjectProperty, BooleanProperty, ListProperty,
NumericProperty, StringProperty, OptionProperty,
ReferenceListProperty, AliasProperty,
VariableListProperty)
from kivy.graphics.texture import Texture
from kivy.clock import Clock
from kivy.lang import Builder
Builder.load_string("""
<Console>:
size_hint: (1, None) if self.mode == "docked" else (None, None)
height: dp(250)
canvas:
Color:
rgb: .185, .18, .18
Rectangle:
size: self.size
Color:
rgb: .3, .3, .3
Rectangle:
pos: 0, self.height - dp(48)
size: self.width, dp(48)
GridLayout:
cols: 1
id: layout
GridLayout:
id: toolbar
rows: 1
height: "48dp"
size_hint_y: None
padding: "4dp"
spacing: "4dp"
RelativeLayout:
id: content
<ConsoleAddonSeparator>:
size_hint_x: None
width: "10dp"
<ConsoleButton,ConsoleToggleButton,ConsoleLabel>:
size_hint_x: None
width: self.texture_size[0] + dp(20)
<ConsoleAddonBreadcrumbView>:
size_hint_y: None
height: "48dp"
canvas:
Color:
rgb: .3, .3, .3
Rectangle:
size: self.size
ScrollView:
id: sv
do_scroll_y: False
GridLayout:
id: stack
rows: 1
size_hint_x: None
width: self.minimum_width
padding: "4dp"
spacing: "4dp"
<TreeViewProperty>:
height: max(dp(48), max(lkey.texture_size[1], ltext.texture_size[1]))
Label:
id: lkey
text: root.key
text_size: (self.width, None)
width: 150
size_hint_x: None
Label:
id: ltext
text: [repr(getattr(root.widget, root.key, '')), root.refresh][0]\
if root.widget else ''
text_size: (self.width, None)
<ConsoleAddonWidgetTreeView>:
ScrollView:
scroll_type: ['bars', 'content']
bar_width: '10dp'
ConsoleAddonWidgetTreeImpl:
id: widgettree
hide_root: True
size_hint: None, None
height: self.minimum_height
width: max(self.parent.width, self.minimum_width)
selected_widget: root.widget
on_select_widget: root.console.highlight_widget(args[1])
<-TreeViewWidget>:
height: self.texture_size[1] + sp(4)
size_hint_x: None
width: self.texture_size[0] + sp(4)
canvas.before:
Color:
rgba: self.color_selected if self.is_selected else (0, 0, 0, 0)
Rectangle:
pos: self.pos
size: self.size
Color:
rgba: 1, 1, 1, int(not self.is_leaf)
Rectangle:
source:
('atlas://data/images/defaulttheme/tree_%s' %
('opened' if self.is_open else 'closed'))
size: 16, 16
pos: self.x - 20, self.center_y - 8
canvas:
Color:
rgba:
(self.disabled_color if self.disabled else
(self.color if not self.markup else (1, 1, 1, 1)))
Rectangle:
texture: self.texture
size: self.texture_size
pos:
(int(self.center_x - self.texture_size[0] / 2.),
int(self.center_y - self.texture_size[1] / 2.))
""")
def ignore_exception(f):
def f2(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
pass
return f2
class TreeViewProperty(BoxLayout, TreeViewNode):
key = ObjectProperty(None, allownone=True)
refresh = BooleanProperty(False)
widget_ref = ObjectProperty(None, allownone=True)
def _get_widget(self):
wr = self.widget_ref
if wr is None:
return None
wr = wr()
if wr is None:
self.widget_ref = None
return None
return wr
widget = AliasProperty(_get_widget, None, bind=('widget_ref', ))
class ConsoleButton(Button):
"""Button specialized for the Console"""
pass
class ConsoleToggleButton(ToggleButton):
"""ToggleButton specialized for the Console"""
pass
class ConsoleLabel(Label):
"""LabelButton specialized for the Console"""
pass
class ConsoleAddonSeparator(Widget):
pass
class ConsoleAddon(object):
"""Base class for implementing addons"""
#: Console instance
console = None
def __init__(self, console):
super(ConsoleAddon, self).__init__()
self.console = console
self.init()
def init(self):
"""Method called when the addon is instanciated by the Console
"""
pass
def activate(self):
"""Method called when the addon is activated by the console
(when the console is displayed)"""
pass
def deactivate(self):
"""Method called when the addon is deactivated by the console
(when the console is hidden)
"""
pass
class ConsoleAddonMode(ConsoleAddon):
def init(self):
btn = ConsoleToggleButton(text=u"Docked")
self.console.add_toolbar_widget(btn)
class ConsoleAddonSelect(ConsoleAddon):
def init(self):
self.btn = ConsoleToggleButton(text=u"Select")
self.btn.bind(state=self.on_button_state)
self.console.add_toolbar_widget(self.btn)
self.console.bind(inspect_enabled=self.on_inspect_enabled)
def on_inspect_enabled(self, instance, value):
self.btn.state = "down" if value else "normal"
def on_button_state(self, instance, value):
self.console.inspect_enabled = (value == "down")
class ConsoleAddonFps(ConsoleAddon):
def init(self):
self.lbl = ConsoleLabel(text="0 Fps")
self.console.add_toolbar_widget(self.lbl, right=True)
def activate(self):
Clock.schedule_interval(self.update_fps, 1 / 2.)
def deactivated(self):
Clock.unschedule(self.update_fps)
def update_fps(self, *args):
fps = Clock.get_fps()
self.lbl.text = "{} Fps".format(int(fps))
class ConsoleAddonBreadcrumbView(RelativeLayout):
widget = ObjectProperty(None, allownone=True)
parents = []
def on_widget(self, instance, value):
stack = self.ids.stack
# determine if we can just highlight the current one
# or if we need to rebuild the breadcrumb
prefs = [btn.widget_ref() for btn in self.parents]
if value in prefs:
# ok, so just toggle this one instead.
index = prefs.index(value)
for btn in self.parents:
btn.state = "normal"
self.parents[index].state = "down"
return
# we need to rebuild the breadcrumb.
stack.clear_widgets()
if not value:
return
widget = value
parents = []
while True:
btn = ConsoleButton(text=widget.__class__.__name__)
btn.widget_ref = weakref.ref(widget)
btn.bind(on_release=self.highlight_widget)
parents.append(btn)
if widget == widget.parent:
break
widget = widget.parent
for btn in reversed(parents):
stack.add_widget(btn)
self.ids.sv.scroll_x = 1
self.parents = parents
btn.state = "down"
def highlight_widget(self, instance):
self.console.widget = instance.widget_ref()
class ConsoleAddonBreadcrumb(ConsoleAddon):
def init(self):
self.view = ConsoleAddonBreadcrumbView()
self.view.console = self.console
self.console.ids.layout.add_widget(self.view)
def activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
self.view.widget = self.console.widget
class ConsoleAddonWidgetPanel(ConsoleAddon):
def init(self):
self.console.add_panel("Properties", self.panel_activate,
self.deactivate)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
widget = self.console.widget
if not widget:
return
from kivy.uix.scrollview import ScrollView
self.root = root = BoxLayout()
self.sv = sv = ScrollView(scroll_type=["bars", "content"],
bar_width='10dp')
treeview = TreeView(hide_root=True, size_hint_y=None)
treeview.bind(minimum_height=treeview.setter("height"))
keys = list(widget.properties().keys())
keys.sort()
node = None
wk_widget = weakref.ref(widget)
for key in keys:
node = TreeViewProperty(key=key, widget_ref=wk_widget)
node.bind(is_selected=self.show_property)
try:
widget.bind(**{
key: partial(self.update_node_content, weakref.ref(node))
})
except:
pass
treeview.add_node(node)
root.add_widget(sv)
sv.add_widget(treeview)
self.console.set_content(root)
def show_property(self, instance, value, key=None, index=-1, *l):
# normal call: (tree node, focus, )
# nested call: (widget, prop value, prop key, index in dict/list)
if value is False:
return
console = self.console
content = None
if key is None:
# normal call
nested = False
widget = instance.widget
key = instance.key
prop = widget.property(key)
value = getattr(widget, key)
else:
# nested call, we might edit subvalue
nested = True
widget = instance
prop = None
dtype = None
if isinstance(prop, AliasProperty) or nested:
# trying to resolve type dynamicly
if type(value) in (str, str):
dtype = 'string'
elif type(value) in (int, float):
dtype = 'numeric'
elif type(value) in (tuple, list):
dtype = 'list'
if isinstance(prop, NumericProperty) or dtype == 'numeric':
content = TextInput(text=str(value) or '', multiline=False)
content.bind(
text=partial(self.save_property_numeric, widget, key, index))
elif isinstance(prop, StringProperty) or dtype == 'string':
content = TextInput(text=value or '', multiline=True)
content.bind(
text=partial(self.save_property_text, widget, key, index))
elif (isinstance(prop, ListProperty) or
isinstance(prop, ReferenceListProperty) or
isinstance(prop, VariableListProperty) or dtype == 'list'):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for i, item in enumerate(value):
button = Button(text=repr(item), size_hint_y=None, height=44)
if isinstance(item, Widget):
button.bind(on_release=partial(console.highlight_widget,
item, False))
else:
button.bind(on_release=partial(self.show_property, widget,
item, key, i))
content.add_widget(button)
elif isinstance(prop, OptionProperty):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for option in prop.options:
button = ToggleButton(
text=option,
state='down' if option == value else 'normal',
group=repr(content.uid),
size_hint_y=None,
height=44)
button.bind(
on_press=partial(self.save_property_option, widget, key))
content.add_widget(button)
elif isinstance(prop, ObjectProperty):
if isinstance(value, Widget):
content = Button(text=repr(value))
content.bind(
on_release=partial(console.highlight_widget, value))
elif isinstance(value, Texture):
content = Image(texture=value)
else:
content = Label(text=repr(value))
elif isinstance(prop, BooleanProperty):
state = 'down' if value else 'normal'
content = ToggleButton(text=key, state=state)
content.bind(on_release=partial(self.save_property_boolean, widget,
key, index))
self.root.clear_widgets()
self.root.add_widget(self.sv)
if content:
self.root.add_widget(content)
@ignore_exception
def save_property_numeric(self, widget, key, index, instance, value):
if index >= 0:
getattr(widget, key)[index] = float(instance.text)
else:
setattr(widget, key, float(instance.text))
@ignore_exception
def save_property_text(self, widget, key, index, instance, value):
if index >= 0:
getattr(widget, key)[index] = instance.text
else:
setattr(widget, key, instance.text)
@ignore_exception
def save_property_boolean(self, widget, key, index, instance, ):
value = instance.state == 'down'
if index >= 0:
getattr(widget, key)[index] = value
else:
setattr(widget, key, value)
@ignore_exception
def save_property_option(self, widget, key, instance, *l):
setattr(widget, key, instance.text)
class TreeViewWidget(Label, TreeViewNode):
widget = ObjectProperty(None)
class ConsoleAddonWidgetTreeImpl(TreeView):
selected_widget = ObjectProperty(None, allownone=True)
__events__ = ('on_select_widget', )
def __init__(self, **kwargs):
super(ConsoleAddonWidgetTreeImpl, self).__init__(**kwargs)
self.update_scroll = Clock.create_trigger(self._update_scroll)
def find_node_by_widget(self, widget):
for node in self.iterate_all_nodes():
if not node.parent_node:
continue
try:
if node.widget == widget:
return node
except ReferenceError:
pass
return None
def update_selected_widget(self, widget):
if widget:
node = self.find_node_by_widget(widget)
if node:
self.select_node(node, False)
while node and isinstance(node, TreeViewWidget):
if not node.is_open:
self.toggle_node(node)
node = node.parent_node
def on_selected_widget(self, inst, widget):
if widget:
self.update_selected_widget(widget)
self.update_scroll()
def select_node(self, node, select_widget=True):
super(ConsoleAddonWidgetTreeImpl, self).select_node(node)
if select_widget:
try:
self.dispatch("on_select_widget", node.widget.__self__)
except ReferenceError:
pass
def on_select_widget(self, widget):
pass
def _update_scroll(self, *args):
node = self._selected_node
if not node:
return
self.parent.scroll_to(node)
class ConsoleAddonWidgetTreeView(RelativeLayout):
widget = ObjectProperty(None, allownone=True)
_window_node = None
def _update_widget_tree_node(self, node, widget, is_open=False):
tree = self.ids.widgettree
update_nodes = []
nodes = {}
for cnode in node.nodes[:]:
try:
nodes[cnode.widget] = cnode
except ReferenceError:
# widget no longer exists, just remove it
pass
tree.remove_node(cnode)
for child in widget.children:
if isinstance(child, Console):
continue
if child in nodes:
cnode = tree.add_node(nodes[child], node)
else:
cnode = tree.add_node(
TreeViewWidget(text=child.__class__.__name__,
widget=child.proxy_ref,
is_open=is_open), node)
update_nodes.append((cnode, child))
return update_nodes
def update_widget_tree(self, *args):
win = self.console.win
if not self._window_node:
self._window_node = self.ids.widgettree.add_node(
TreeViewWidget(text="Window",
widget=win,
is_open=True))
nodes = self._update_widget_tree_node(self._window_node, win,
is_open=True)
while nodes:
ntmp = nodes[:]
nodes = []
for node in ntmp:
nodes += self._update_widget_tree_node(*node)
self.ids.widgettree.update_selected_widget(self.widget)
class ConsoleAddonWidgetTree(ConsoleAddon):
def init(self):
self.content = None
self.console.add_panel("Tree", self.panel_activate, self.deactivate,
self.panel_refresh)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
if self.content:
self.content.widget = None
self.content.console = None
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
widget = self.console.widget
if not self.content:
self.content = ConsoleAddonWidgetTreeView()
self.content.console = self.console
self.content.widget = widget
self.content.update_widget_tree()
self.console.set_content(self.content)
def panel_refresh(self):
if self.content:
self.content.update_widget_tree()
class Console(RelativeLayout):
"""Console interface
This widget is created by create_console(), when the module is loaded.
During that time, you can add addons on the console to extend the
functionnalities, or add your own application stats / debugging module.
"""
#: Array of addons that will be created at Console creation
addons = [ # ConsoleAddonMode,
ConsoleAddonSelect, ConsoleAddonFps, ConsoleAddonWidgetPanel,
ConsoleAddonWidgetTree, ConsoleAddonBreadcrumb]
#: Display mode of the Console, either docked at the bottom, or as a
#: floating window.
mode = OptionProperty("docked", options=["docked", "floated"])
#: Current widget beeing selected
widget = ObjectProperty(None, allownone=True)
#: Indicate if the inspector inspection is enabled. If yes, the next
#: touch down will select a the widget under the touch
inspect_enabled = BooleanProperty(False)
#: True if the Console is activated (showed)
activated = BooleanProperty(False)
def __init__(self, **kwargs):
self.win = kwargs.pop('win', None)
super(Console, self).__init__(**kwargs)
self.avoid_bring_to_top = False
with self.canvas.before:
self.gcolor = Color(1, 0, 0, .25)
PushMatrix()
self.gtransform = Transform(Matrix())
self.grect = Rectangle(size=(0, 0))
PopMatrix()
Clock.schedule_interval(self.update_widget_graphics, 0)
# instanciate all addons
self._toolbar = {"left": [], "panels": [], "right": []}
self._addons = []
self._panel = None
for addon in self.addons:
instance = addon(self)
self._addons.append(instance)
self._init_toolbar()
# select the first panel
self._panel = self._toolbar["panels"][0]
self._panel.state = "down"
self._panel.cb_activate()
def _init_toolbar(self):
toolbar = self.ids.toolbar
for key in ("left", "panels", "right"):
if key == "right":
toolbar.add_widget(Widget())
for el in self._toolbar[key]:
toolbar.add_widget(el)
if key != "right":
toolbar.add_widget(ConsoleAddonSeparator())
@classmethod
def register_addon(cls, addon):
cls.addons.append(addon)
def add_toolbar_widget(self, widget, right=False):
"""Add a widget in the top left toolbar of the Console.
Use `right=True` if you wanna add the widget at the right instead.
"""
key = "right" if right else "left"
self._toolbar[key].append(widget)
def remove_toolbar_widget(self, widget):
"""Remove a widget from the toolbar
"""
self.ids.toolbar.remove_widget(widget)
def add_panel(self, name, cb_activate, cb_deactivate, cb_refresh=None):
"""Add a new panel in the Console.
- `cb_activate` is a callable that will be called when the panel is
activated by the user.
- `cb_deactivate` is a callable that will be called when the panel is
deactivated or when the console will hide.
- `cb_refresh` is an optionnal callable that is called if the user
click again on the button for display the panel
When activated, it's up to the panel to display a content in the
Console by using :meth:`set_content`.
"""
btn = ConsoleToggleButton(text=name)
btn.cb_activate = cb_activate
btn.cb_deactivate = cb_deactivate
btn.cb_refresh = cb_refresh
btn.bind(on_press=self._activate_panel)
self._toolbar["panels"].append(btn)
def _activate_panel(self, instance):
if self._panel != instance:
self._panel.cb_deactivate()
self._panel.state = "normal"
self.ids.content.clear_widgets()
self._panel = instance
self._panel.cb_activate()
self._panel.state = "down"
else:
self._panel.state = "down"
if self._panel.cb_refresh:
self._panel.cb_refresh()
def set_content(self, content):
"""Replace the Console content with a new one.
"""
self.ids.content.clear_widgets()
self.ids.content.add_widget(content)
def on_touch_down(self, touch):
ret = super(Console, self).on_touch_down(touch)
if (('button' not in touch.profile or touch.button == 'left') and
not ret and self.inspect_enabled):
self.highlight_at(*touch.pos)
if touch.is_double_tap:
self.inspect_enabled = False
ret = True
else:
ret = self.collide_point(*touch.pos)
return ret
def on_touch_move(self, touch):
ret = super(Console, self).on_touch_move(touch)
if not ret and self.inspect_enabled:
self.highlight_at(*touch.pos)
ret = True
return ret
def on_touch_up(self, touch):
ret = super(Console, self).on_touch_up(touch)
if not ret and self.inspect_enabled:
ret = True
return ret
def on_window_children(self, win, children):
if self.avoid_bring_to_top:
return
self.avoid_bring_to_top = True
win.remove_widget(self)
win.add_widget(self)
self.avoid_bring_to_top = False
def highlight_at(self, x, y):
"""Select a widget from a x/y window coordinate.
This is mostly used internally when Select mode is activated
"""
widget = None
# reverse the loop - look at children on top first and
# modalviews before others
win_children = self.win.children
children = chain((c for c in reversed(win_children)
if isinstance(c, ModalView)),
(c for c in reversed(win_children)
if not isinstance(c, ModalView)))
for child in children:
if child is self:
continue
widget = self.pick(child, x, y)
if widget:
break
self.highlight_widget(widget)
def highlight_widget(self, widget, *largs):
# no widget to highlight, reduce rectangle to 0, 0
self.widget = widget
if not widget:
self.grect.size = 0, 0
def update_widget_graphics(self, *l):
if not self.activated:
return
if self.widget is None:
self.grect.size = 0, 0
return
self.grect.size = self.widget.size
matrix = self.widget.get_window_matrix()
if self.gtransform.matrix.get() != matrix.get():
self.gtransform.matrix = matrix
def pick(self, widget, x, y):
"""Pick a widget at x/y, given a root `widget`
"""
ret = None
# try to filter widgets that are not visible (invalid inspect target)
if (hasattr(widget, 'visible') and not widget.visible):
return ret
if widget.collide_point(x, y):
ret = widget
x2, y2 = widget.to_local(x, y)
# reverse the loop - look at children on top first
for child in reversed(widget.children):
ret = self.pick(child, x2, y2) or ret
return ret
def on_activated(self, instance, activated):
if activated:
self._activate_console()
else:
self._deactivate_console()
def _activate_console(self):
if not self in self.win.children:
self.win.add_widget(self)
self.y = 0
for addon in self._addons:
addon.activate()
Logger.info('Console: console activated')
def _deactivate_console(self):
for addon in self._addons:
addon.deactivate()
self.grect.size = 0, 0
self.y = -self.height
self.widget = None
self.inspect_enabled = False
#self.win.remove_widget(self)
self._window_node = None
Logger.info('Console: console deactivated')
def keyboard_shortcut(self, win, scancode, *largs):
modifiers = largs[-1]
if scancode == 101 and modifiers == ['ctrl']:
self.activated = not self.activated
if self.activated:
self.inspect_enabled = True
return True
elif scancode == 27:
if self.inspect_enabled:
self.inspect_enabled = False
return True
if self.activated:
self.activated = False
return True
if not self.activated or not self.widget:
return
if scancode == 273: # top
self.widget = self.widget.parent
elif scancode == 274: # down
filtered_children = [c for c in self.widget.children
if not isinstance(c, Console)]
if filtered_children:
self.widget = filtered_children[0]
elif scancode == 276: # left
parent = self.widget.parent
filtered_children = [c for c in parent.children
if not isinstance(c, Console)]
index = filtered_children.index(self.widget)
index = max(0, index - 1)
self.widget = filtered_children[index]
elif scancode == 275: # right
parent = self.widget.parent
filtered_children = [c for c in parent.children
if not isinstance(c, Console)]
index = filtered_children.index(self.widget)
index = min(len(filtered_children) - 1, index + 1)
self.widget = filtered_children[index]
def create_console(win, ctx, *l):
ctx.console = Console(win=win)
win.bind(children=ctx.console.on_window_children,
on_keyboard=ctx.console.keyboard_shortcut)
def start(win, ctx):
"""Create an Console instance attached to the *ctx* and bound to the
Windows :meth:`~kivy.core.window.WindowBase.on_keyboard` event for capturing
the keyboard shortcut.
:Parameters:
`win`: A :class:`Window <kivy.core.window.WindowBase>`
The application Window to bind to.
`ctx`: A :class:`~kivy.uix.widget.Widget` or subclass
The Widget to be inspected.
"""
Clock.schedule_once(partial(create_console, win, ctx))
def stop(win, ctx):
"""Stop and unload any active Inspectors for the given *ctx*."""
if hasattr(ctx, "console"):
win.unbind(children=ctx.console.on_window_children,
on_keyboard=ctx.console.keyboard_shortcut)
win.remove_widget(ctx.console)
del ctx.console
| mit |
MostafaGazar/tensorflow | tensorflow/examples/tutorials/mnist/mnist_with_summaries.py | 7 | 8370 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple MNIST classifier which displays summaries in TensorBoard.
This is an unimpressive MNIST model, but it is a good example of using
tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of
naming summary tags so that they are grouped meaningfully in TensorBoard.
It demonstrates the functionality of every TensorBoard dashboard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
def train():
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir,
one_hot=True,
fake_data=FLAGS.fake_data)
sess = tf.InteractiveSession()
# Create a multilayer model.
# Input placeholders
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.image_summary('input', image_shaped_input, 10)
# We can't initialize these variables to 0 - the network will get stuck.
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('stddev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.histogram_summary(layer_name + '/activations', activations)
return activations
hidden1 = nn_layer(x, 784, 500, 'layer1')
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.scalar_summary('dropout_keep_probability', keep_prob)
dropped = tf.nn.dropout(hidden1, keep_prob)
# Do not apply softmax activation yet, see below.
y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)
with tf.name_scope('cross_entropy'):
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the
# raw outputs of the nn_layer above, and then average across
# the batch.
diff = tf.nn.softmax_cross_entropy_with_logits(y, y_)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.scalar_summary('cross entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', accuracy)
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
merged = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',
sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')
tf.initialize_all_variables().run()
# Train the model, and also write summaries.
# Every 10th step, measure test-set accuracy, and write test summaries
# All other steps, run train_step on training data, & add training summaries
def feed_dict(train):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if train or FLAGS.fake_data:
xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
k = FLAGS.dropout
else:
xs, ys = mnist.test.images, mnist.test.labels
k = 1.0
return {x: xs, y_: ys, keep_prob: k}
for i in range(FLAGS.max_steps):
if i % 10 == 0: # Record summaries and test-set accuracy
summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))
test_writer.add_summary(summary, i)
print('Accuracy at step %s: %s' % (i, acc))
else: # Record train set summaries, and train
if i % 100 == 99: # Record execution stats
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, _ = sess.run([merged, train_step],
feed_dict=feed_dict(True),
options=run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
train_writer.add_summary(summary, i)
print('Adding run metadata for', i)
else: # Record a summary
summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))
train_writer.add_summary(summary, i)
train_writer.close()
test_writer.close()
def main(_):
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fake_data', nargs='?', const=True, type=bool,
default=False,
help='If true, uses fake data for unit testing.')
parser.add_argument('--max_steps', type=int, default=1000,
help='Number of steps to run trainer.')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='Initial learning rate')
parser.add_argument('--dropout', type=float, default=0.9,
help='Keep probability for training dropout.')
parser.add_argument('--data_dir', type=str, default='/tmp/data',
help='Directory for storing data')
parser.add_argument('--summaries_dir', type=str, default='/tmp/mnist_logs',
help='Summaries directory')
FLAGS = parser.parse_args()
tf.app.run()
| apache-2.0 |
androidarmv6/android_external_chromium_org | chrome/common/extensions/docs/server2/link_converter.py | 25 | 2973 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script converts old-style <a> links to API docs to the new $ref links.
# See reference_resolver.py for more info on the format of $ref links.
import optparse
import os
import re
from docs_server_utils import SanitizeAPIName
def _ReadFile(filename):
with open(filename) as f:
return f.read()
def _WriteFile(filename, contents):
with open(filename, 'w') as f:
f.write(contents)
def _Replace(matches, filename):
title = matches.group(3)
if matches.group(2).count('#') != 1:
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
clean = (matches.group(2).replace('\\', '')
.replace("'", '')
.replace('"', '')
.replace('/', ''))
page, link = clean.split('#')
if not page:
page = '%s.html' % SanitizeAPIName(filename.rsplit(os.sep, 1)[-1])
if (not link.startswith('property-') and
not link.startswith('type-') and
not link.startswith('method-') and
not link.startswith('event-')):
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
link = re.sub('^(property|type|method|event)-', '', link).replace('-', '.')
page = page.replace('.html', '.').replace('_', '.')
if matches.group(1) == ' ':
padding = ''
else:
padding = matches.group(1)
if link in title:
return '%s$ref:%s%s' % (padding, page, link)
else:
return '%s$ref:[%s%s %s]' % (padding, page, link, title)
def _ConvertFile(filename, use_stdout):
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
contents = _ReadFile(filename)
contents = re.sub(regex,
lambda m: _Replace(m, filename),
contents)
contents = contents.replace('$ref:extension.lastError',
'$ref:runtime.lastError')
if use_stdout:
print contents
else:
_WriteFile(filename, contents)
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Converts <a> links to $ref links.',
usage='usage: %prog [option] <directory>')
parser.add_option('-f', '--file', default='',
help='Convert links in single file.')
parser.add_option('-o', '--out', action='store_true', default=False,
help='Write to stdout.')
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
opts, argv = parser.parse_args()
if opts.file:
_ConvertFile(opts.file, opts.out)
else:
if len(argv) != 1:
parser.print_usage()
exit(0)
for root, dirs, files in os.walk(argv[0]):
for name in files:
_ConvertFile(os.path.join(root, name), opts.out)
| bsd-3-clause |
jmesteve/saas3 | openerp/addons_extra/account_financial_report_webkit/wizard/balance_common.py | 7 | 16829 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 Camptocamp SA (http://www.camptocamp.com)
#
# Author: Guewen Baconnier (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import time
from lxml import etree
from datetime import datetime
from openerp.osv import fields, orm
from openerp.tools.translate import _
def previous_year_date(date, nb_prev=1):
if not date:
return False
parsed_date = datetime.strptime(date, '%Y-%m-%d')
previous_date = datetime(year=parsed_date.year - nb_prev,
month=parsed_date.month,
day=parsed_date.day)
return previous_date
class AccountBalanceCommonWizard(orm.TransientModel):
"""Will launch trial balance report and pass required args"""
_inherit = "account.common.account.report"
_name = "account.common.balance.report"
_description = "Common Balance Report"
# an update module should be done if changed
# in order to create fields in db
COMPARISON_LEVEL = 3
COMPARE_SELECTION = [('filter_no', 'No Comparison'),
('filter_year', 'Fiscal Year'),
('filter_date', 'Date'),
('filter_period', 'Periods'),
('filter_opening', 'Opening Only')]
M2O_DYNAMIC_FIELDS = [f % index for f in ["comp%s_fiscalyear_id",
"comp%s_period_from",
"comp%s_period_to"]
for index in range(COMPARISON_LEVEL)]
SIMPLE_DYNAMIC_FIELDS = [f % index for f in ["comp%s_filter",
"comp%s_date_from",
"comp%s_date_to"]
for index in range(COMPARISON_LEVEL)]
DYNAMIC_FIELDS = M2O_DYNAMIC_FIELDS + SIMPLE_DYNAMIC_FIELDS
def _get_account_ids(self, cr, uid, context=None):
res = False
if context.get('active_model', False) == 'account.account' and context.get('active_ids', False):
res = context['active_ids']
return res
_columns = {
'account_ids': fields.many2many('account.account', string='Filter on accounts',
help="Only selected accounts will be printed. Leave empty to print all accounts."),
'filter': fields.selection([('filter_no', 'No Filters'),
('filter_date', 'Date'),
('filter_period', 'Periods'),
('filter_opening', 'Opening Only')],
"Filter by",
required=True,
help='Filter by date: no opening balance will be displayed. '
'(opening balance can only be computed based on period to be correct).'),
}
for index in range(COMPARISON_LEVEL):
_columns.update(
{"comp%s_filter" % index: fields.selection(COMPARE_SELECTION, string='Compare By', required=True),
"comp%s_fiscalyear_id" % index: fields.many2one('account.fiscalyear', 'Fiscal Year'),
"comp%s_period_from" % index: fields.many2one('account.period', 'Start Period'),
"comp%s_period_to" % index: fields.many2one('account.period', 'End Period'),
"comp%s_date_from" % index: fields.date("Start Date"),
"comp%s_date_to" % index: fields.date("End Date")})
_defaults = {
'account_ids': _get_account_ids,
}
def _check_fiscalyear(self, cr, uid, ids, context=None):
obj = self.read(cr, uid, ids[0], ['fiscalyear_id', 'filter'], context=context)
if not obj['fiscalyear_id'] and obj['filter'] == 'filter_no':
return False
return True
_constraints = [
(_check_fiscalyear, 'When no Fiscal year is selected, you must choose to filter by periods or by date.', ['filter']),
]
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(AccountBalanceCommonWizard, self).default_get(cr, uid, fields, context=context)
for index in range(self.COMPARISON_LEVEL):
field = "comp%s_filter" % (index,)
if not res.get(field, False):
res[field] = 'filter_no'
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(AccountBalanceCommonWizard, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
res['fields'].update(self.fields_get(cr, uid,
allfields=self.DYNAMIC_FIELDS,
context=context, write_access=True))
eview = etree.fromstring(res['arch'])
placeholder = eview.xpath("//page[@name='placeholder']")
if placeholder:
placeholder = placeholder[0]
for index in range(self.COMPARISON_LEVEL):
page = etree.Element(
'page',
{'name': "comp%s" % index,
'string': _("Comparison %s") % (index + 1, )})
group = etree.Element('group')
page.append(group)
def modifiers_and_append(elem):
orm.setup_modifiers(elem)
group.append(elem)
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_filter" % index,
'on_change': "onchange_comp_filter(%(index)s, filter, comp%(index)s_filter, fiscalyear_id, date_from, date_to)" % {'index': index}}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_fiscalyear_id" % index,
'attrs':
"{'required': [('comp%(index)s_filter','in',('filter_year','filter_opening'))]," \
" 'invisible': [('comp%(index)s_filter','not in',('filter_year','filter_opening'))]}" % {'index': index}}))
dates_attrs = "{'required': [('comp%(index)s_filter','=','filter_date')], " \
" 'invisible': [('comp%(index)s_filter','!=','filter_date')]}" % {'index': index}
modifiers_and_append(etree.Element(
'separator',
{'string': _('Dates'),
'colspan': '4',
'attrs': dates_attrs}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_date_from" % index,
'attrs': dates_attrs}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_date_to" % index,
'attrs': dates_attrs}))
periods_attrs = "{'required': [('comp%(index)s_filter','=','filter_period')]," \
" 'invisible': [('comp%(index)s_filter','!=','filter_period')]}" % {'index': index}
periods_domain = "[('special', '=', False)]"
modifiers_and_append(etree.Element(
'separator',
{'string': _('Periods'),
'colspan': '4',
'attrs': periods_attrs}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_period_from" % index,
'attrs': periods_attrs,
'domain': periods_domain}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_period_to" % index,
'attrs': periods_attrs,
'domain': periods_domain}))
placeholder.addprevious(page)
placeholder.getparent().remove(placeholder)
res['arch'] = etree.tostring(eview)
return res
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = {}
if filter == 'filter_no':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': False, 'date_to': False}
if filter == 'filter_date':
if fiscalyear_id:
fyear = self.pool.get('account.fiscalyear').browse(cr, uid, fiscalyear_id, context=context)
date_from = fyear.date_start
date_to = fyear.date_stop > time.strftime('%Y-%m-%d') and time.strftime('%Y-%m-%d') or fyear.date_stop
else:
date_from, date_to = time.strftime('%Y-01-01'), time.strftime('%Y-%m-%d')
res['value'] = {'period_from': False, 'period_to': False, 'date_from': date_from, 'date_to': date_to}
if filter == 'filter_period' and fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_start ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods:
start_period = end_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period, 'date_from': False, 'date_to': False}
return res
def onchange_comp_filter(self, cr, uid, ids, index, main_filter='filter_no', comp_filter='filter_no', fiscalyear_id=False, start_date=False, stop_date=False, context=None):
res = {}
fy_obj = self.pool.get('account.fiscalyear')
last_fiscalyear_id = False
if fiscalyear_id:
fiscalyear = fy_obj.browse(cr, uid, fiscalyear_id, context=context)
last_fiscalyear_ids = fy_obj.search(cr, uid, [('date_stop', '<', fiscalyear.date_start)],
limit=self.COMPARISON_LEVEL, order='date_start desc', context=context)
if last_fiscalyear_ids:
if len(last_fiscalyear_ids) > index:
last_fiscalyear_id = last_fiscalyear_ids[index] # first element for the comparison 1, second element for the comparison 2
fy_id_field = "comp%s_fiscalyear_id" % (index,)
period_from_field = "comp%s_period_from" % (index,)
period_to_field = "comp%s_period_to" % (index,)
date_from_field = "comp%s_date_from" % (index,)
date_to_field = "comp%s_date_to" % (index,)
if comp_filter == 'filter_no':
res['value'] = {
fy_id_field: False,
period_from_field: False,
period_to_field: False,
date_from_field: False,
date_to_field: False
}
if comp_filter in ('filter_year', 'filter_opening'):
res['value'] = {
fy_id_field: last_fiscalyear_id,
period_from_field: False,
period_to_field: False,
date_from_field: False,
date_to_field: False
}
if comp_filter == 'filter_date':
dates = {}
if main_filter == 'filter_date':
dates = {
'date_start': previous_year_date(start_date, index + 1).strftime('%Y-%m-%d'),
'date_stop': previous_year_date(stop_date, index + 1).strftime('%Y-%m-%d'),
}
elif last_fiscalyear_id:
dates = fy_obj.read(cr, uid, last_fiscalyear_id, ['date_start', 'date_stop'], context=context)
res['value'] = {fy_id_field: False, period_from_field: False, period_to_field: False, date_from_field: dates.get('date_start', False), date_to_field: dates.get('date_stop', False)}
if comp_filter == 'filter_period' and last_fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %(fiscalyear)s
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_start ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %(fiscalyear)s
AND p.date_start < NOW()
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', {'fiscalyear': last_fiscalyear_id})
periods = [i[0] for i in cr.fetchall()]
if periods and len(periods) > 1:
start_period = end_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {fy_id_field: False,
period_from_field: start_period,
period_to_field: end_period,
date_from_field: False,
date_to_field: False}
return res
def pre_print_report(self, cr, uid, ids, data, context=None):
data = super(AccountBalanceCommonWizard, self).pre_print_report(
cr, uid, ids, data, context)
if context is None:
context = {}
# will be used to attach the report on the main account
data['ids'] = [data['form']['chart_account_id']]
fields_to_read = ['account_ids', ]
fields_to_read += self.DYNAMIC_FIELDS
vals = self.read(cr, uid, ids, fields_to_read, context=context)[0]
# extract the id from the m2o tuple (id, name)
for field in self.M2O_DYNAMIC_FIELDS:
if isinstance(vals[field], tuple):
vals[field] = vals[field][0]
vals['max_comparison'] = self.COMPARISON_LEVEL
data['form'].update(vals)
return data
| agpl-3.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/test/test_ossaudiodev.py | 97 | 7216 | from test import support
support.requires('audio')
from test.support import findfile
ossaudiodev = support.import_module('ossaudiodev')
import errno
import sys
import sunau
import time
import audioop
import unittest
# Arggh, AFMT_S16_NE not defined on all platforms -- seems to be a
# fairly recent addition to OSS.
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if sys.byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
def read_sound_file(path):
with open(path, 'rb') as fp:
au = sunau.open(fp)
rate = au.getframerate()
nchannels = au.getnchannels()
encoding = au._encoding
fp.seek(0)
data = fp.read()
if encoding != sunau.AUDIO_FILE_ENCODING_MULAW_8:
raise RuntimeError("Expect .au file with 8-bit mu-law samples")
# Convert the data to 16-bit signed.
data = audioop.ulaw2lin(data, 2)
return (data, rate, 16, nchannels)
class OSSAudioDevTests(unittest.TestCase):
def play_sound_file(self, data, rate, ssize, nchannels):
try:
dsp = ossaudiodev.open('w')
except OSError as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
# at least check that these methods can be invoked
dsp.bufsize()
dsp.obufcount()
dsp.obuffree()
dsp.getptr()
dsp.fileno()
# Make sure the read-only attributes work.
self.assertFalse(dsp.closed)
self.assertEqual(dsp.name, "/dev/dsp")
self.assertEqual(dsp.mode, "w", "bad dsp.mode: %r" % dsp.mode)
# And make sure they're really read-only.
for attr in ('closed', 'name', 'mode'):
try:
setattr(dsp, attr, 42)
except (TypeError, AttributeError):
pass
else:
self.fail("dsp.%s not read-only" % attr)
# Compute expected running time of sound sample (in seconds).
expected_time = float(len(data)) / (ssize/8) / nchannels / rate
# set parameters based on .au file headers
dsp.setparameters(AFMT_S16_NE, nchannels, rate)
self.assertTrue(abs(expected_time - 3.51) < 1e-2, expected_time)
t1 = time.time()
dsp.write(data)
dsp.close()
t2 = time.time()
elapsed_time = t2 - t1
percent_diff = (abs(elapsed_time - expected_time) / expected_time) * 100
self.assertTrue(percent_diff <= 10.0,
"elapsed time (%s) > 10%% off of expected time (%s)" %
(elapsed_time, expected_time))
def set_parameters(self, dsp):
# Two configurations for testing:
# config1 (8-bit, mono, 8 kHz) should work on even the most
# ancient and crufty sound card, but maybe not on special-
# purpose high-end hardware
# config2 (16-bit, stereo, 44.1kHz) should work on all but the
# most ancient and crufty hardware
config1 = (ossaudiodev.AFMT_U8, 1, 8000)
config2 = (AFMT_S16_NE, 2, 44100)
for config in [config1, config2]:
(fmt, channels, rate) = config
if (dsp.setfmt(fmt) == fmt and
dsp.channels(channels) == channels and
dsp.speed(rate) == rate):
break
else:
raise RuntimeError("unable to set audio sampling parameters: "
"you must have really weird audio hardware")
# setparameters() should be able to set this configuration in
# either strict or non-strict mode.
result = dsp.setparameters(fmt, channels, rate, False)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
result = dsp.setparameters(fmt, channels, rate, True)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
def set_bad_parameters(self, dsp):
# Now try some configurations that are presumably bogus: eg. 300
# channels currently exceeds even Hollywood's ambitions, and
# negative sampling rate is utter nonsense. setparameters() should
# accept these in non-strict mode, returning something other than
# was requested, but should barf in strict mode.
fmt = AFMT_S16_NE
rate = 44100
channels = 2
for config in [(fmt, 300, rate), # ridiculous nchannels
(fmt, -5, rate), # impossible nchannels
(fmt, channels, -50), # impossible rate
]:
(fmt, channels, rate) = config
result = dsp.setparameters(fmt, channels, rate, False)
self.assertNotEqual(result, config,
"unexpectedly got requested configuration")
try:
result = dsp.setparameters(fmt, channels, rate, True)
except ossaudiodev.OSSAudioError as err:
pass
else:
self.fail("expected OSSAudioError")
def test_playback(self):
sound_info = read_sound_file(findfile('audiotest.au'))
self.play_sound_file(*sound_info)
def test_set_parameters(self):
dsp = ossaudiodev.open("w")
try:
self.set_parameters(dsp)
# Disabled because it fails under Linux 2.6 with ALSA's OSS
# emulation layer.
#self.set_bad_parameters(dsp)
finally:
dsp.close()
self.assertTrue(dsp.closed)
def test_mixer_methods(self):
# Issue #8139: ossaudiodev didn't initialize its types properly,
# therefore some methods were unavailable.
with ossaudiodev.openmixer() as mixer:
self.assertGreaterEqual(mixer.fileno(), 0)
def test_with(self):
with ossaudiodev.open('w') as dsp:
pass
self.assertTrue(dsp.closed)
def test_on_closed(self):
dsp = ossaudiodev.open('w')
dsp.close()
self.assertRaises(ValueError, dsp.fileno)
self.assertRaises(ValueError, dsp.read, 1)
self.assertRaises(ValueError, dsp.write, b'x')
self.assertRaises(ValueError, dsp.writeall, b'x')
self.assertRaises(ValueError, dsp.bufsize)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obuffree)
self.assertRaises(ValueError, dsp.getptr)
mixer = ossaudiodev.openmixer()
mixer.close()
self.assertRaises(ValueError, mixer.fileno)
def test_main():
try:
dsp = ossaudiodev.open('w')
except (ossaudiodev.error, OSError) as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
dsp.close()
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
keithhamilton/blackmaas | lib/python2.7/site-packages/pip/_vendor/distlib/util.py | 163 | 49824 | #
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__VENV_LAUNCHER__'
in os.environ):
result = os.environ['__VENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
result = os.path.join(result, suffix)
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return result
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
| bsd-3-clause |
liorvh/golismero | thirdparty_libs/nltk/corpus/reader/switchboard.py | 17 | 4656 | # Natural Language Toolkit: Switchboard Corpus Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import re
from nltk.tag import str2tuple
from util import *
from api import *
class SwitchboardTurn(list):
"""
A specialized list object used to encode switchboard utterances.
The elements of the list are the words in the utterance; and two
attributes, ``speaker`` and ``id``, are provided to retrieve the
spearker identifier and utterance id. Note that utterance ids
are only unique within a given discourse.
"""
def __init__(self, words, speaker, id):
list.__init__(self, words)
self.speaker = speaker
self.id = int(id)
def __repr__(self):
if len(self) == 0:
text = ''
elif isinstance(self[0], tuple):
text = ' '.join('%s/%s' % w for w in self)
else:
text = ' '.join(self)
return '<%s.%s: %r>' % (self.speaker, self.id, text)
class SwitchboardCorpusReader(CorpusReader):
_FILES = ['tagged']
# Use the "tagged" file even for non-tagged data methods, since
# it's tokenized.
def __init__(self, root, tag_mapping_function=None):
CorpusReader.__init__(self, root, self._FILES)
self._tag_mapping_function = tag_mapping_function
def words(self):
return StreamBackedCorpusView(self.abspath('tagged'),
self._words_block_reader)
def tagged_words(self, simplify_tags=False):
def tagged_words_block_reader(stream):
return self._tagged_words_block_reader(stream, simplify_tags)
return StreamBackedCorpusView(self.abspath('tagged'),
tagged_words_block_reader)
def turns(self):
return StreamBackedCorpusView(self.abspath('tagged'),
self._turns_block_reader)
def tagged_turns(self, simplify_tags=False):
def tagged_turns_block_reader(stream):
return self._tagged_turns_block_reader(stream, simplify_tags)
return StreamBackedCorpusView(self.abspath('tagged'),
tagged_turns_block_reader)
def discourses(self):
return StreamBackedCorpusView(self.abspath('tagged'),
self._discourses_block_reader)
def tagged_discourses(self, simplify_tags=False):
def tagged_discourses_block_reader(stream):
return self._tagged_discourses_block_reader(stream, simplify_tags)
return StreamBackedCorpusView(self.abspath('tagged'),
tagged_discourses_block_reader)
def _discourses_block_reader(self, stream):
# returns at most 1 discourse. (The other methods depend on this.)
return [[self._parse_utterance(u, include_tag=False)
for b in read_blankline_block(stream)
for u in b.split('\n') if u.strip()]]
def _tagged_discourses_block_reader(self, stream, simplify_tags=False):
# returns at most 1 discourse. (The other methods depend on this.)
return [[self._parse_utterance(u, include_tag=True,
simplify_tags=simplify_tags)
for b in read_blankline_block(stream)
for u in b.split('\n') if u.strip()]]
def _turns_block_reader(self, stream):
return self._discourses_block_reader(stream)[0]
def _tagged_turns_block_reader(self, stream, simplify_tags=False):
return self._tagged_discourses_block_reader(stream, simplify_tags)[0]
def _words_block_reader(self, stream):
return sum(self._discourses_block_reader(stream)[0], [])
def _tagged_words_block_reader(self, stream, simplify_tags=False):
return sum(self._tagged_discourses_block_reader(stream,
simplify_tags)[0], [])
_UTTERANCE_RE = re.compile('(\w+)\.(\d+)\:\s*(.*)')
_SEP = '/'
def _parse_utterance(self, utterance, include_tag, simplify_tags=False):
m = self._UTTERANCE_RE.match(utterance)
if m is None:
raise ValueError('Bad utterance %r' % utterance)
speaker, id, text = m.groups()
words = [str2tuple(s, self._SEP) for s in text.split()]
if not include_tag:
words = [w for (w,t) in words]
elif simplify_tags:
words = [(w, self._tag_mapping_function(t)) for (w,t) in words]
return SwitchboardTurn(words, speaker, id)
| gpl-2.0 |
woodscn/scipy | scipy/linalg/_matfuncs_sqrtm.py | 77 | 5867 | """
Matrix square root for general matrices and for upper triangular matrices.
This module exists to avoid cyclic imports.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['sqrtm']
import numpy as np
from scipy._lib._util import _asarray_validated
# Local imports
from .misc import norm
from .lapack import ztrsyl, dtrsyl
from .decomp_schur import schur, rsf2csf
class SqrtmError(np.linalg.LinAlgError):
pass
def _sqrtm_triu(T, blocksize=64):
"""
Matrix square root of an upper triangular matrix.
This is a helper function for `sqrtm` and `logm`.
Parameters
----------
T : (N, N) array_like upper triangular
Matrix whose square root to evaluate
blocksize : int, optional
If the blocksize is not degenerate with respect to the
size of the input array, then use a blocked algorithm. (Default: 64)
Returns
-------
sqrtm : (N, N) ndarray
Value of the sqrt function at `T`
References
----------
.. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
"Blocked Schur Algorithms for Computing the Matrix Square Root,
Lecture Notes in Computer Science, 7782. pp. 171-182.
"""
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
if not keep_it_real:
T_diag = T_diag.astype(complex)
R = np.diag(np.sqrt(T_diag))
# Compute the number of blocks to use; use at least one block.
n, n = T.shape
nblocks = max(n // blocksize, 1)
# Compute the smaller of the two sizes of blocks that
# we will actually use, and compute the number of large blocks.
bsmall, nlarge = divmod(n, nblocks)
blarge = bsmall + 1
nsmall = nblocks - nlarge
if nsmall * bsmall + nlarge * blarge != n:
raise Exception('internal inconsistency')
# Define the index range covered by each block.
start_stop_pairs = []
start = 0
for count, size in ((nsmall, bsmall), (nlarge, blarge)):
for i in range(count):
start_stop_pairs.append((start, start + size))
start += size
# Within-block interactions.
for start, stop in start_stop_pairs:
for j in range(start, stop):
for i in range(j-1, start-1, -1):
s = 0
if j - i > 1:
s = R[i, i+1:j].dot(R[i+1:j, j])
denom = R[i, i] + R[j, j]
if not denom:
raise SqrtmError('failed to find the matrix square root')
R[i, j] = (T[i, j] - s) / denom
# Between-block interactions.
for j in range(nblocks):
jstart, jstop = start_stop_pairs[j]
for i in range(j-1, -1, -1):
istart, istop = start_stop_pairs[i]
S = T[istart:istop, jstart:jstop]
if j - i > 1:
S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart,
jstart:jstop])
# Invoke LAPACK.
# For more details, see the solve_sylvester implemention
# and the fortran dtrsyl and ztrsyl docs.
Rii = R[istart:istop, istart:istop]
Rjj = R[jstart:jstop, jstart:jstop]
if keep_it_real:
x, scale, info = dtrsyl(Rii, Rjj, S)
else:
x, scale, info = ztrsyl(Rii, Rjj, S)
R[istart:istop, jstart:jstop] = x * scale
# Return the matrix square root.
return R
def sqrtm(A, disp=True, blocksize=64):
"""
Matrix square root.
Parameters
----------
A : (N, N) array_like
Matrix whose square root to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
blocksize : integer, optional
If the blocksize is not degenerate with respect to the
size of the input array, then use a blocked algorithm. (Default: 64)
Returns
-------
sqrtm : (N, N) ndarray
Value of the sqrt function at `A`
errest : float
(if disp == False)
Frobenius norm of the estimated error, ||err||_F / ||A||_F
References
----------
.. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
"Blocked Schur Algorithms for Computing the Matrix Square Root,
Lecture Notes in Computer Science, 7782. pp. 171-182.
Examples
--------
>>> from scipy.linalg import sqrtm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> r = sqrtm(a)
>>> r
array([[ 0.75592895, 1.13389342],
[ 0.37796447, 1.88982237]])
>>> r.dot(r)
array([[ 1., 3.],
[ 1., 4.]])
"""
A = _asarray_validated(A, check_finite=True, as_inexact=True)
if len(A.shape) != 2:
raise ValueError("Non-matrix input to matrix function.")
if blocksize < 1:
raise ValueError("The blocksize should be at least 1.")
keep_it_real = np.isrealobj(A)
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
failflag = False
try:
R = _sqrtm_triu(T, blocksize=blocksize)
ZH = np.conjugate(Z).T
X = Z.dot(R).dot(ZH)
except SqrtmError:
failflag = True
X = np.empty_like(A)
X.fill(np.nan)
if disp:
nzeig = np.any(np.diag(T) == 0)
if nzeig:
print("Matrix is singular and may not have a square root.")
elif failflag:
print("Failed to find a square root.")
return X
else:
try:
arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro')
except ValueError:
# NaNs in matrix
arg2 = np.inf
return X, arg2
| bsd-3-clause |
Dhivyap/ansible | test/units/modules/network/fortios/test_fortios_application_rule_settings.py | 21 | 7349 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_application_rule_settings
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_application_rule_settings.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_application_rule_settings_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_rule_settings': {
'id': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_rule_settings.fortios_application(input_data, fos_instance)
expected_data = {
'id': '3'
}
set_method_mock.assert_called_with('application', 'rule-settings', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_application_rule_settings_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_rule_settings': {
'id': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_rule_settings.fortios_application(input_data, fos_instance)
expected_data = {
'id': '3'
}
set_method_mock.assert_called_with('application', 'rule-settings', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_application_rule_settings_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'application_rule_settings': {
'id': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_rule_settings.fortios_application(input_data, fos_instance)
delete_method_mock.assert_called_with('application', 'rule-settings', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_application_rule_settings_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'application_rule_settings': {
'id': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_rule_settings.fortios_application(input_data, fos_instance)
delete_method_mock.assert_called_with('application', 'rule-settings', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_application_rule_settings_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_rule_settings': {
'id': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_rule_settings.fortios_application(input_data, fos_instance)
expected_data = {
'id': '3'
}
set_method_mock.assert_called_with('application', 'rule-settings', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_application_rule_settings_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_rule_settings': {
'random_attribute_not_valid': 'tag',
'id': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_rule_settings.fortios_application(input_data, fos_instance)
expected_data = {
'id': '3'
}
set_method_mock.assert_called_with('application', 'rule-settings', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/boto/s3/resumable_download_handler.py | 17 | 15584 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import httplib
import os
import re
import socket
import time
import boto
from boto import config, storage_uri_for_key
from boto.connection import AWSAuthConnection
from boto.exception import ResumableDownloadException
from boto.exception import ResumableTransferDisposition
from boto.s3.keyfile import KeyFile
from boto.gs.key import Key as GSKey
"""
Resumable download handler.
Resumable downloads will retry failed downloads, resuming at the byte count
completed by the last download attempt. If too many retries happen with no
progress (per configurable num_retries param), the download will be aborted.
The caller can optionally specify a tracker_file_name param in the
ResumableDownloadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
Note that resumable downloads work across providers (they depend only
on support Range GETs), but this code is in the boto.s3 package
because it is the wrong abstraction level to go in the top-level boto
package.
TODO: At some point we should refactor the code to have a storage_service
package where all these provider-independent files go.
"""
class ByteTranslatingCallbackHandler(object):
"""
Proxy class that translates progress callbacks made by
boto.s3.Key.get_file(), taking into account that we're resuming
a download.
"""
def __init__(self, proxied_cb, download_start_point):
self.proxied_cb = proxied_cb
self.download_start_point = download_start_point
def call(self, total_bytes_uploaded, total_size):
self.proxied_cb(self.download_start_point + total_bytes_uploaded,
total_size)
def get_cur_file_size(fp, position_to_eof=False):
"""
Returns size of file, optionally leaving fp positioned at EOF.
"""
if isinstance(fp, KeyFile) and not position_to_eof:
# Avoid EOF seek for KeyFile case as it's very inefficient.
return fp.getkey().size
if not position_to_eof:
cur_pos = fp.tell()
fp.seek(0, os.SEEK_END)
cur_file_size = fp.tell()
if not position_to_eof:
fp.seek(cur_pos, os.SEEK_SET)
return cur_file_size
class ResumableDownloadHandler(object):
"""
Handler for resumable downloads.
"""
MIN_ETAG_LEN = 5
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each downloaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracking info
about this download. If supplied and the current process fails
the download, it can be retried in a new process. If called
with an existing file containing an unexpired timestamp,
we'll resume the transfer for this file; else we'll start a
new resumable download.
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable
download making no progress. (Count resets every time we get
progress, so download can span many more than this number of
retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.etag_value_for_current_download = None
if tracker_file_name:
self._load_tracker_file_etag()
# Save download_start_point in instance state so caller can
# find how much was transferred by this ResumableDownloadHandler
# (across retries).
self.download_start_point = None
def _load_tracker_file_etag(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
self.etag_value_for_current_download = f.readline().rstrip('\n')
# We used to match an MD5-based regex to ensure that the etag was
# read correctly. Since ETags need not be MD5s, we now do a simple
# length sanity check instead.
if len(self.etag_value_for_current_download) < self.MIN_ETAG_LEN:
print('Couldn\'t read etag in tracker file (%s). Restarting '
'download from scratch.' % self.tracker_file_name)
except IOError, e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because
# self.etag_value_for_current_download is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'download from scratch.' %
(self.tracker_file_name, e.strerror))
finally:
if f:
f.close()
def _save_tracker_info(self, key):
self.etag_value_for_current_download = key.etag.strip('"\'')
if not self.tracker_file_name:
return
f = None
try:
f = open(self.tracker_file_name, 'w')
f.write('%s\n' % self.etag_value_for_current_download)
except IOError, e:
raise ResumableDownloadException(
'Couldn\'t write tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured download tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
finally:
if f:
f.close()
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _attempt_resumable_download(self, key, fp, headers, cb, num_cb,
torrent, version_id, hash_algs):
"""
Attempts a resumable download.
Raises ResumableDownloadException if any problems occur.
"""
cur_file_size = get_cur_file_size(fp, position_to_eof=True)
if (cur_file_size and
self.etag_value_for_current_download and
self.etag_value_for_current_download == key.etag.strip('"\'')):
# Try to resume existing transfer.
if cur_file_size > key.size:
raise ResumableDownloadException(
'%s is larger (%d) than %s (%d).\nDeleting tracker file, so '
'if you re-try this download it will start from scratch' %
(fp.name, cur_file_size, str(storage_uri_for_key(key)),
key.size), ResumableTransferDisposition.ABORT)
elif cur_file_size == key.size:
if key.bucket.connection.debug >= 1:
print 'Download complete.'
return
if key.bucket.connection.debug >= 1:
print 'Resuming download.'
headers = headers.copy()
headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1)
cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call
self.download_start_point = cur_file_size
else:
if key.bucket.connection.debug >= 1:
print 'Starting new resumable download.'
self._save_tracker_info(key)
self.download_start_point = 0
# Truncate the file, in case a new resumable download is being
# started atop an existing file.
fp.truncate(0)
# Disable AWSAuthConnection-level retry behavior, since that would
# cause downloads to restart from scratch.
if isinstance(key, GSKey):
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0, hash_algs=hash_algs)
else:
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
fp.flush()
def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,
version_id=None, hash_algs=None):
"""
Retrieves a file from a Key
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object from which upload is to be downloaded
:type fp: file
:param fp: File pointer into which data should be downloaded
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from the storage service and
the second representing the total number of bytes that need
to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be
called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type version_id: string
:param version_id: The version ID (optional)
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib/md5.md5}.
Raises ResumableDownloadException if a problem occurs during
the transfer.
"""
debug = key.bucket.connection.debug
if not headers:
headers = {}
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 6.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 6)
progress_less_iterations = 0
while True: # Retry as long as we're making progress.
had_file_bytes_before_attempt = get_cur_file_size(fp)
try:
self._attempt_resumable_download(key, fp, headers, cb, num_cb,
torrent, version_id, hash_algs)
# Download succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
# Previously, check_final_md5() was called here to validate
# downloaded file's checksum, however, to be consistent with
# non-resumable downloads, this call was removed. Checksum
# validation of file contents should be done by the caller.
if debug >= 1:
print 'Resumable download complete.'
return
except self.RETRYABLE_EXCEPTIONS, e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close and reopen the key before resuming
# the download.
if isinstance(key, GSKey):
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0, hash_algs=hash_algs)
else:
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
except ResumableDownloadException, e:
if (e.disposition ==
ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s)' % e.message)
raise
elif (e.disposition ==
ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s); aborting and removing tracker file' %
e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableDownloadException (%s) - will '
'retry' % e.message)
# At this point we had a re-tryable failure; see if made progress.
if get_cur_file_size(fp) > had_file_bytes_before_attempt:
progress_less_iterations = 0
else:
progress_less_iterations += 1
if progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableDownloadException(
'Too many resumable download attempts failed without '
'progress. You might try this download again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Close the key, in case a previous download died partway
# through and left data in the underlying key HTTP buffer.
# Do this within a try/except block in case the connection is
# closed (since key.close() attempts to do a final read, in which
# case this read attempt would get an IncompleteRead exception,
# which we can safely ignore.
try:
key.close()
except httplib.IncompleteRead:
pass
sleep_time_secs = 2**progress_less_iterations
if debug >= 1:
print('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %d seconds before re-trying' %
(progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
| gpl-2.0 |
kenshay/ImageScript | Script_Runner/PYTHON/Lib/site-packages/pyasn1/type/univ.py | 5 | 100158 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2018, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import math
import sys
from pyasn1 import error
from pyasn1.codec.ber import eoo
from pyasn1.compat import binary
from pyasn1.compat import integer
from pyasn1.compat import octets
from pyasn1.type import base
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import tagmap
NoValue = base.NoValue
noValue = NoValue()
__all__ = ['Integer', 'Boolean', 'BitString', 'OctetString', 'Null',
'ObjectIdentifier', 'Real', 'Enumerated',
'SequenceOfAndSetOfBase', 'SequenceOf', 'SetOf',
'SequenceAndSetBase', 'Sequence', 'Set', 'Choice', 'Any',
'NoValue', 'noValue']
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python :class:`int` objects.
Keyword Args
------------
value: :class:`int`, :class:`str` or |ASN.1| object
Python integer or string literal or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
Object representing non-default symbolic aliases for numbers
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class ErrorCode(Integer):
'''
ASN.1 specification:
ErrorCode ::=
INTEGER { disk-full(1), no-disk(-1),
disk-not-formatted(2) }
error ErrorCode ::= disk-full
'''
namedValues = NamedValues(
('disk-full', 1), ('no-disk', -1),
('disk-not-formatted', 2)
)
error = ErrorCode('disk-full')
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
#: representing symbolic aliases for numbers
namedValues = namedval.NamedValues()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
def __init__(self, value=noValue, **kwargs):
if 'namedValues' not in kwargs:
kwargs['namedValues'] = self.namedValues
base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs)
def __and__(self, value):
return self.clone(self._value & value)
def __rand__(self, value):
return self.clone(value & self._value)
def __or__(self, value):
return self.clone(self._value | value)
def __ror__(self, value):
return self.clone(value | self._value)
def __xor__(self, value):
return self.clone(self._value ^ value)
def __rxor__(self, value):
return self.clone(value ^ self._value)
def __lshift__(self, value):
return self.clone(self._value << value)
def __rshift__(self, value):
return self.clone(self._value >> value)
def __add__(self, value):
return self.clone(self._value + value)
def __radd__(self, value):
return self.clone(value + self._value)
def __sub__(self, value):
return self.clone(self._value - value)
def __rsub__(self, value):
return self.clone(value - self._value)
def __mul__(self, value):
return self.clone(self._value * value)
def __rmul__(self, value):
return self.clone(value * self._value)
def __mod__(self, value):
return self.clone(self._value % value)
def __rmod__(self, value):
return self.clone(value % self._value)
def __pow__(self, value, modulo=None):
return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value):
return self.clone(pow(value, self._value))
def __floordiv__(self, value):
return self.clone(self._value // value)
def __rfloordiv__(self, value):
return self.clone(value // self._value)
if sys.version_info[0] <= 2:
def __div__(self, value):
if isinstance(value, float):
return Real(self._value / value)
else:
return self.clone(self._value / value)
def __rdiv__(self, value):
if isinstance(value, float):
return Real(value / self._value)
else:
return self.clone(value / self._value)
else:
def __truediv__(self, value):
return Real(self._value / value)
def __rtruediv__(self, value):
return Real(value / self._value)
def __divmod__(self, value):
return self.clone(divmod(self._value, value))
def __rdivmod__(self, value):
return self.clone(divmod(value, self._value))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self):
return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self):
return long(self._value)
def __float__(self):
return float(self._value)
def __abs__(self):
return self.clone(abs(self._value))
def __index__(self):
return int(self._value)
def __pos__(self):
return self.clone(+self._value)
def __neg__(self):
return self.clone(-self._value)
def __invert__(self):
return self.clone(~self._value)
def __round__(self, n=0):
r = round(self._value, n)
if n:
return self.clone(r)
else:
return r
def __floor__(self):
return math.floor(self._value)
def __ceil__(self):
return math.ceil(self._value)
if sys.version_info[0:2] > (2, 5):
def __trunc__(self):
return self.clone(math.trunc(self._value))
def __lt__(self, value):
return self._value < value
def __le__(self, value):
return self._value <= value
def __eq__(self, value):
return self._value == value
def __ne__(self, value):
return self._value != value
def __gt__(self, value):
return self._value > value
def __ge__(self, value):
return self._value >= value
def prettyIn(self, value):
try:
return int(value)
except ValueError:
try:
return self.namedValues[value]
except KeyError:
raise error.PyAsn1Error(
'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
try:
return str(self.namedValues[value])
except KeyError:
return str(value)
# backward compatibility
def getNamedValues(self):
return self.namedValues
class Boolean(Integer):
"""Create |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python :class:`int` objects.
Keyword Args
------------
value: :class:`int`, :class:`str` or |ASN.1| object
Python integer or boolean or string literal or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
Object representing non-default symbolic aliases for numbers
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class RoundResult(Boolean):
'''
ASN.1 specification:
RoundResult ::= BOOLEAN
ok RoundResult ::= TRUE
ko RoundResult ::= FALSE
'''
ok = RoundResult(True)
ko = RoundResult(False)
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = Integer.subtypeSpec + constraint.SingleValueConstraint(0, 1)
#: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
#: representing symbolic aliases for numbers
namedValues = namedval.NamedValues(('False', 0), ('True', 1))
# Optimization for faster codec lookup
typeId = Integer.getTypeId()
if sys.version_info[0] < 3:
SizedIntegerBase = long
else:
SizedIntegerBase = int
class SizedInteger(SizedIntegerBase):
bitLength = leadingZeroBits = None
def setBitLength(self, bitLength):
self.bitLength = bitLength
self.leadingZeroBits = max(bitLength - integer.bitLength(self), 0)
return self
def __len__(self):
if self.bitLength is None:
self.setBitLength(integer.bitLength(self))
return self.bitLength
class BitString(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type both Python :class:`tuple` (as a tuple
of bits) and :class:`int` objects.
Keyword Args
------------
value: :class:`int`, :class:`str` or |ASN.1| object
Python integer or string literal representing binary or hexadecimal
number or sequence of integer bits or |ASN.1| object.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
Object representing non-default symbolic aliases for numbers
binValue: :py:class:`str`
Binary string initializer to use instead of the *value*.
Example: '10110011'.
hexValue: :py:class:`str`
Hexadecimal string initializer to use instead of the *value*.
Example: 'DEADBEEF'.
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Rights(BitString):
'''
ASN.1 specification:
Rights ::= BIT STRING { user-read(0), user-write(1),
group-read(2), group-write(3),
other-read(4), other-write(5) }
group1 Rights ::= { group-read, group-write }
group2 Rights ::= '0011'B
group3 Rights ::= '3'H
'''
namedValues = NamedValues(
('user-read', 0), ('user-write', 1),
('group-read', 2), ('group-write', 3),
('other-read', 4), ('other-write', 5)
)
group1 = Rights(('group-read', 'group-write'))
group2 = Rights('0011')
group3 = Rights(0x3)
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
#: representing symbolic aliases for numbers
namedValues = namedval.NamedValues()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
defaultBinValue = defaultHexValue = noValue
def __init__(self, value=noValue, **kwargs):
if value is noValue:
if kwargs:
try:
value = self.fromBinaryString(kwargs.pop('binValue'), internalFormat=True)
except KeyError:
pass
try:
value = self.fromHexString(kwargs.pop('hexValue'), internalFormat=True)
except KeyError:
pass
if value is noValue:
if self.defaultBinValue is not noValue:
value = self.fromBinaryString(self.defaultBinValue, internalFormat=True)
elif self.defaultHexValue is not noValue:
value = self.fromHexString(self.defaultHexValue, internalFormat=True)
if 'namedValues' not in kwargs:
kwargs['namedValues'] = self.namedValues
base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs)
def __str__(self):
return self.asBinary()
def __eq__(self, other):
other = self.prettyIn(other)
return self is other or self._value == other and len(self._value) == len(other)
def __ne__(self, other):
other = self.prettyIn(other)
return self._value != other or len(self._value) != len(other)
def __lt__(self, other):
other = self.prettyIn(other)
return len(self._value) < len(other) or len(self._value) == len(other) and self._value < other
def __le__(self, other):
other = self.prettyIn(other)
return len(self._value) <= len(other) or len(self._value) == len(other) and self._value <= other
def __gt__(self, other):
other = self.prettyIn(other)
return len(self._value) > len(other) or len(self._value) == len(other) and self._value > other
def __ge__(self, other):
other = self.prettyIn(other)
return len(self._value) >= len(other) or len(self._value) == len(other) and self._value >= other
# Immutable sequence object protocol
def __len__(self):
return len(self._value)
def __getitem__(self, i):
if i.__class__ is slice:
return self.clone([self[x] for x in range(*i.indices(len(self)))])
else:
length = len(self._value) - 1
if i > length or i < 0:
raise IndexError('bit index out of range')
return (self._value >> (length - i)) & 1
def __iter__(self):
length = len(self._value)
while length:
length -= 1
yield (self._value >> length) & 1
def __reversed__(self):
return reversed(tuple(self))
# arithmetic operators
def __add__(self, value):
value = self.prettyIn(value)
return self.clone(SizedInteger(self._value << len(value) | value).setBitLength(len(self._value) + len(value)))
def __radd__(self, value):
value = self.prettyIn(value)
return self.clone(SizedInteger(value << len(self._value) | self._value).setBitLength(len(self._value) + len(value)))
def __mul__(self, value):
bitString = self._value
while value > 1:
bitString <<= len(self._value)
bitString |= self._value
value -= 1
return self.clone(bitString)
def __rmul__(self, value):
return self * value
def __lshift__(self, count):
return self.clone(SizedInteger(self._value << count).setBitLength(len(self._value) + count))
def __rshift__(self, count):
return self.clone(SizedInteger(self._value >> count).setBitLength(max(0, len(self._value) - count)))
def __int__(self):
return self._value
def __float__(self):
return float(self._value)
if sys.version_info[0] < 3:
def __long__(self):
return self._value
def asNumbers(self):
"""Get |ASN.1| value as a sequence of 8-bit integers.
If |ASN.1| object length is not a multiple of 8, result
will be left-padded with zeros.
"""
return tuple(octets.octs2ints(self.asOctets()))
def asOctets(self):
"""Get |ASN.1| value as a sequence of octets.
If |ASN.1| object length is not a multiple of 8, result
will be left-padded with zeros.
"""
return integer.to_bytes(self._value, length=len(self))
def asInteger(self):
"""Get |ASN.1| value as a single integer value.
"""
return self._value
def asBinary(self):
"""Get |ASN.1| value as a text string of bits.
"""
binString = binary.bin(self._value)[2:]
return '0' * (len(self._value) - len(binString)) + binString
@classmethod
def fromHexString(cls, value, internalFormat=False, prepend=None):
"""Create a |ASN.1| object initialized from the hex string.
Parameters
----------
value: :class:`str`
Text string like 'DEADBEEF'
"""
try:
value = SizedInteger(value, 16).setBitLength(len(value) * 4)
except ValueError:
raise error.PyAsn1Error('%s.fromHexString() error: %s' % (cls.__name__, sys.exc_info()[1]))
if prepend is not None:
value = SizedInteger(
(SizedInteger(prepend) << len(value)) | value
).setBitLength(len(prepend) + len(value))
if not internalFormat:
value = cls(value)
return value
@classmethod
def fromBinaryString(cls, value, internalFormat=False, prepend=None):
"""Create a |ASN.1| object initialized from a string of '0' and '1'.
Parameters
----------
value: :class:`str`
Text string like '1010111'
"""
try:
value = SizedInteger(value or '0', 2).setBitLength(len(value))
except ValueError:
raise error.PyAsn1Error('%s.fromBinaryString() error: %s' % (cls.__name__, sys.exc_info()[1]))
if prepend is not None:
value = SizedInteger(
(SizedInteger(prepend) << len(value)) | value
).setBitLength(len(prepend) + len(value))
if not internalFormat:
value = cls(value)
return value
@classmethod
def fromOctetString(cls, value, internalFormat=False, prepend=None, padding=0):
"""Create a |ASN.1| object initialized from a string.
Parameters
----------
value: :class:`str` (Py2) or :class:`bytes` (Py3)
Text string like '\\\\x01\\\\xff' (Py2) or b'\\\\x01\\\\xff' (Py3)
"""
value = SizedInteger(integer.from_bytes(value) >> padding).setBitLength(len(value) * 8 - padding)
if prepend is not None:
value = SizedInteger(
(SizedInteger(prepend) << len(value)) | value
).setBitLength(len(prepend) + len(value))
if not internalFormat:
value = cls(value)
return value
def prettyIn(self, value):
if isinstance(value, SizedInteger):
return value
elif octets.isStringType(value):
if not value:
return SizedInteger(0).setBitLength(0)
elif value[0] == '\'': # "'1011'B" -- ASN.1 schema representation (deprecated)
if value[-2:] == '\'B':
return self.fromBinaryString(value[1:-2], internalFormat=True)
elif value[-2:] == '\'H':
return self.fromHexString(value[1:-2], internalFormat=True)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
elif self.namedValues and not value.isdigit(): # named bits like 'Urgent, Active'
names = [x.strip() for x in value.split(',')]
try:
bitPositions = [self.namedValues[name] for name in names]
except KeyError:
raise error.PyAsn1Error('unknown bit name(s) in %r' % (names,))
rightmostPosition = max(bitPositions)
number = 0
for bitPosition in bitPositions:
number |= 1 << (rightmostPosition - bitPosition)
return SizedInteger(number).setBitLength(rightmostPosition + 1)
elif value.startswith('0x'):
return self.fromHexString(value[2:], internalFormat=True)
elif value.startswith('0b'):
return self.fromBinaryString(value[2:], internalFormat=True)
else: # assume plain binary string like '1011'
return self.fromBinaryString(value, internalFormat=True)
elif isinstance(value, (tuple, list)):
return self.fromBinaryString(''.join([b and '1' or '0' for b in value]), internalFormat=True)
elif isinstance(value, BitString):
return SizedInteger(value).setBitLength(len(value))
elif isinstance(value, intTypes):
return SizedInteger(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
try:
# noinspection PyStatementEffect
all
except NameError: # Python 2.4
# noinspection PyShadowingBuiltins
def all(iterable):
for element in iterable:
if not element:
return False
return True
class OctetString(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`str` or Python 3 :class:`bytes`.
When used in Unicode context, |ASN.1| type assumes "|encoding|" serialisation.
Keyword Args
------------
value: :class:`str`, :class:`bytes` or |ASN.1| object
string (Python 2) or bytes (Python 3), alternatively unicode object
(Python 2) or string (Python 3) representing character string to be
serialised into octets (note `encoding` parameter) or |ASN.1| object.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in text string context.
binValue: :py:class:`str`
Binary string initializer to use instead of the *value*.
Example: '10110011'.
hexValue: :py:class:`str`
Hexadecimal string initializer to use instead of the *value*.
Example: 'DEADBEEF'.
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Icon(OctetString):
'''
ASN.1 specification:
Icon ::= OCTET STRING
icon1 Icon ::= '001100010011001000110011'B
icon2 Icon ::= '313233'H
'''
icon1 = Icon.fromBinaryString('001100010011001000110011')
icon2 = Icon.fromHexString('313233')
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
defaultBinValue = defaultHexValue = noValue
encoding = 'iso-8859-1'
def __init__(self, value=noValue, **kwargs):
if kwargs:
if value is noValue:
try:
value = self.fromBinaryString(kwargs.pop('binValue'))
except KeyError:
pass
try:
value = self.fromHexString(kwargs.pop('hexValue'))
except KeyError:
pass
if value is noValue:
if self.defaultBinValue is not noValue:
value = self.fromBinaryString(self.defaultBinValue)
elif self.defaultHexValue is not noValue:
value = self.fromHexString(self.defaultHexValue)
if 'encoding' not in kwargs:
kwargs['encoding'] = self.encoding
base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, unicode):
try:
return value.encode(self.encoding)
except (LookupError, UnicodeEncodeError):
raise error.PyAsn1Error(
"Can't encode string '%s' with codec %s" % (value, self.encoding)
)
elif isinstance(value, (tuple, list)):
try:
return ''.join([chr(x) for x in value])
except ValueError:
raise error.PyAsn1Error(
"Bad %s initializer '%s'" % (self.__class__.__name__, value)
)
else:
return str(value)
def __str__(self):
return str(self._value)
def __unicode__(self):
try:
return self._value.decode(self.encoding)
except UnicodeDecodeError:
raise error.PyAsn1Error(
"Can't decode string '%s' with codec %s" % (self._value, self.encoding)
)
def asOctets(self):
return str(self._value)
def asNumbers(self):
return tuple([ord(x) for x in self._value])
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, str):
try:
return value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
"Can't encode string '%s' with '%s' codec" % (value, self.encoding)
)
elif isinstance(value, OctetString): # a shortcut, bytes() would work the same way
return value.asOctets()
elif isinstance(value, base.AbstractSimpleAsn1Item): # this mostly targets Integer objects
return self.prettyIn(str(value))
elif isinstance(value, (tuple, list)):
return self.prettyIn(bytes(value))
else:
return bytes(value)
def __str__(self):
try:
return self._value.decode(self.encoding)
except UnicodeDecodeError:
raise error.PyAsn1Error(
"Can't decode string '%s' with '%s' codec at '%s'" % (self._value, self.encoding, self.__class__.__name__)
)
def __bytes__(self):
return bytes(self._value)
def asOctets(self):
return bytes(self._value)
def asNumbers(self):
return tuple(self._value)
#
# Normally, `.prettyPrint()` is called from `__str__()`. Historically,
# OctetString.prettyPrint() used to return hexified payload
# representation in cases when non-printable content is present. At the
# same time `str()` used to produce either octet-stream (Py2) or
# text (Py3) representations.
#
# Therefore `OctetString.__str__()` -> `.prettyPrint()` call chain is
# reversed to preserve the original behaviour.
#
# Eventually we should deprecate `.prettyPrint()` / `.prettyOut()` harness
# and end up with just `__str__()` producing hexified representation while
# both text and octet-stream representation should only be requested via
# the `.asOctets()` method.
#
# Note: ASN.1 OCTET STRING is never mean to contain text!
#
def prettyOut(self, value):
return value
def prettyPrint(self, scope=0):
# first see if subclass has its own .prettyOut()
value = self.prettyOut(self._value)
if value is not self._value:
return value
numbers = self.asNumbers()
for x in numbers:
# hexify if needed
if x < 32 or x > 126:
return '0x' + ''.join(('%.2x' % x for x in numbers))
else:
# this prevents infinite recursion
return OctetString.__str__(self)
@staticmethod
def fromBinaryString(value):
"""Create a |ASN.1| object initialized from a string of '0' and '1'.
Parameters
----------
value: :class:`str`
Text string like '1010111'
"""
bitNo = 8
byte = 0
r = []
for v in value:
if bitNo:
bitNo -= 1
else:
bitNo = 7
r.append(byte)
byte = 0
if v in ('0', '1'):
v = int(v)
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte |= v << bitNo
r.append(byte)
return octets.ints2octs(r)
@staticmethod
def fromHexString(value):
"""Create a |ASN.1| object initialized from the hex string.
Parameters
----------
value: :class:`str`
Text string like 'DEADBEEF'
"""
r = []
p = []
for v in value:
if p:
r.append(int(p + v, 16))
p = None
else:
p = v
if p:
r.append(int(p + '0', 16))
return octets.ints2octs(r)
# Immutable sequence object protocol
def __len__(self):
return len(self._value)
def __getitem__(self, i):
if i.__class__ is slice:
return self.clone(self._value[i])
else:
return self._value[i]
def __iter__(self):
return iter(self._value)
def __contains__(self, value):
return value in self._value
def __add__(self, value):
return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value):
return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value):
return self.clone(self._value * value)
def __rmul__(self, value):
return self * value
def __int__(self):
return int(self._value)
def __float__(self):
return float(self._value)
def __reversed__(self):
return reversed(self._value)
class Null(OctetString):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python :class:`str` objects (always empty).
Keyword Args
------------
value: :class:`str` or :py:class:`~pyasn1.type.univ.Null` object
Python empty string literal or any object that evaluates to `False`
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Ack(Null):
'''
ASN.1 specification:
Ack ::= NULL
'''
ack = Ack('')
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec + constraint.SingleValueConstraint(octets.str2octs(''))
# Optimization for faster codec lookup
typeId = OctetString.getTypeId()
def prettyIn(self, value):
if value:
return value
return octets.str2octs('')
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = (int,)
numericTypes = intTypes + (float,)
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python :class:`tuple` objects (tuple of non-negative integers).
Keyword Args
------------
value: :class:`tuple`, :class:`str` or |ASN.1| object
Python sequence of :class:`int` or string literal or |ASN.1| object.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class ID(ObjectIdentifier):
'''
ASN.1 specification:
ID ::= OBJECT IDENTIFIER
id-edims ID ::= { joint-iso-itu-t mhs-motif(6) edims(7) }
id-bp ID ::= { id-edims 11 }
'''
id_edims = ID('2.6.7')
id_bp = id_edims + (11,)
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
def __add__(self, other):
return self.clone(self._value + other)
def __radd__(self, other):
return self.clone(other + self._value)
def asTuple(self):
return self._value
# Sequence object protocol
def __len__(self):
return len(self._value)
def __getitem__(self, i):
if i.__class__ is slice:
return self.clone(self._value[i])
else:
return self._value[i]
def __iter__(self):
return iter(self._value)
def __contains__(self, value):
return value in self._value
def index(self, suboid):
return self._value.index(suboid)
def isPrefixOf(self, other):
"""Indicate if this |ASN.1| object is a prefix of other |ASN.1| object.
Parameters
----------
other: |ASN.1| object
|ASN.1| object
Returns
-------
: :class:`bool`
:class:`True` if this |ASN.1| object is a parent (e.g. prefix) of the other |ASN.1| object
or :class:`False` otherwise.
"""
l = len(self)
if l <= len(other):
if self._value[:l] == other[:l]:
return True
return False
def prettyIn(self, value):
if isinstance(value, ObjectIdentifier):
return tuple(value)
elif octets.isStringType(value):
if '-' in value:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
)
try:
return tuple([int(subOid) for subOid in value.split('.') if subOid])
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
)
try:
tupleOfInts = tuple([int(subOid) for subOid in value if subOid >= 0])
except (ValueError, TypeError):
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
)
if len(tupleOfInts) == len(value):
return tupleOfInts
raise error.PyAsn1Error('Malformed Object ID %s at %s' % (value, self.__class__.__name__))
def prettyOut(self, value):
return '.'.join([str(x) for x in value])
class Real(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python :class:`float` objects.
Additionally, |ASN.1| objects behave like a :class:`tuple` in which case its
elements are mantissa, base and exponent.
Keyword Args
------------
value: :class:`tuple`, :class:`float` or |ASN.1| object
Python sequence of :class:`int` (representing mantissa, base and
exponent) or float instance or *Real* class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Pi(Real):
'''
ASN.1 specification:
Pi ::= REAL
pi Pi ::= { mantissa 314159, base 10, exponent -5 }
'''
pi = Pi((314159, 10, -5))
"""
binEncBase = None # binEncBase = 16 is recommended for large numbers
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = _plusInf, _minusInf
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
@staticmethod
def __normalizeBase10(value):
m, b, e = value
while m and m % 10 == 0:
m /= 10
e += 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
if (not isinstance(value[0], numericTypes) or
not isinstance(value[1], intTypes) or
not isinstance(value[2], intTypes)):
raise error.PyAsn1Error('Lame Real value syntax: %s' % (value,))
if (isinstance(value[0], float) and
self._inf and value[0] in self._inf):
return value[0]
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, float) or octets.isStringType(value):
if octets.isStringType(value):
try:
value = float(value)
except ValueError:
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value *= 10
e -= 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyPrint(self, scope=0):
try:
return self.prettyOut(float(self))
except OverflowError:
return '<overflow>'
@property
def isPlusInf(self):
"""Indicate PLUS-INFINITY object value
Returns
-------
: :class:`bool`
:class:`True` if calling object represents plus infinity
or :class:`False` otherwise.
"""
return self._value == self._plusInf
@property
def isMinusInf(self):
"""Indicate MINUS-INFINITY object value
Returns
-------
: :class:`bool`
:class:`True` if calling object represents minus infinity
or :class:`False` otherwise.
"""
return self._value == self._minusInf
@property
def isInf(self):
return self._value in self._inf
def __add__(self, value):
return self.clone(float(self) + value)
def __radd__(self, value):
return self + value
def __mul__(self, value):
return self.clone(float(self) * value)
def __rmul__(self, value):
return self * value
def __sub__(self, value):
return self.clone(float(self) - value)
def __rsub__(self, value):
return self.clone(value - float(self))
def __mod__(self, value):
return self.clone(float(self) % value)
def __rmod__(self, value):
return self.clone(value % float(self))
def __pow__(self, value, modulo=None):
return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value):
return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value):
return self.clone(float(self) / value)
def __rdiv__(self, value):
return self.clone(value / float(self))
else:
def __truediv__(self, value):
return self.clone(float(self) / value)
def __rtruediv__(self, value):
return self.clone(value / float(self))
def __divmod__(self, value):
return self.clone(float(self) // value)
def __rdivmod__(self, value):
return self.clone(value // float(self))
def __int__(self):
return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self):
return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self):
return self.clone(abs(float(self)))
def __pos__(self):
return self.clone(+float(self))
def __neg__(self):
return self.clone(-float(self))
def __round__(self, n=0):
r = round(float(self), n)
if n:
return self.clone(r)
else:
return r
def __floor__(self):
return self.clone(math.floor(float(self)))
def __ceil__(self):
return self.clone(math.ceil(float(self)))
if sys.version_info[0:2] > (2, 5):
def __trunc__(self):
return self.clone(math.trunc(float(self)))
def __lt__(self, value):
return float(self) < value
def __le__(self, value):
return float(self) <= value
def __eq__(self, value):
return float(self) == value
def __ne__(self, value):
return float(self) != value
def __gt__(self, value):
return float(self) > value
def __ge__(self, value):
return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self):
return bool(float(self))
else:
def __bool__(self):
return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
# compatibility stubs
def isPlusInfinity(self):
return self.isPlusInf
def isMinusInfinity(self):
return self.isMinusInf
def isInfinity(self):
return self.isInf
class Enumerated(Integer):
"""Create |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python :class:`int` objects.
Keyword Args
------------
value: :class:`int`, :class:`str` or |ASN.1| object
Python integer or string literal or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
Object representing non-default symbolic aliases for numbers
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class RadioButton(Enumerated):
'''
ASN.1 specification:
RadioButton ::= ENUMERATED { button1(0), button2(1),
button3(2) }
selected-by-default RadioButton ::= button1
'''
namedValues = NamedValues(
('button1', 0), ('button2', 1),
('button3', 2)
)
selected_by_default = RadioButton('button1')
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Optimization for faster codec lookup
typeId = Integer.getTypeId()
#: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
#: representing symbolic aliases for numbers
namedValues = namedval.NamedValues()
# "Structured" ASN.1 types
class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item):
"""Create |ASN.1| type.
|ASN.1| objects are mutable and duck-type Python :class:`list` objects.
Keyword Args
------------
componentType : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A pyasn1 object representing ASN.1 type allowed within |ASN.1| type
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing collection size constraint
Examples
--------
.. code-block:: python
class LotteryDraw(SequenceOf): # SetOf is similar
'''
ASN.1 specification:
LotteryDraw ::= SEQUENCE OF INTEGER
'''
componentType = Integer()
lotteryDraw = LotteryDraw()
lotteryDraw.extend([123, 456, 789])
"""
def __init__(self, *args, **kwargs):
# support positional params for backward compatibility
if args:
for key, value in zip(('componentType', 'tagSet',
'subtypeSpec', 'sizeSpec'), args):
if key in kwargs:
raise error.PyAsn1Error('Conflicting positional and keyword params!')
kwargs['componentType'] = value
base.AbstractConstructedAsn1Item.__init__(self, **kwargs)
# Python list protocol
def __getitem__(self, idx):
try:
return self.getComponentByPosition(idx)
except error.PyAsn1Error:
raise IndexError(sys.exc_info()[1])
def __setitem__(self, idx, value):
try:
self.setComponentByPosition(idx, value)
except error.PyAsn1Error:
raise IndexError(sys.exc_info()[1])
def clear(self):
self._componentValues = []
def append(self, value):
self[len(self)] = value
def count(self, value):
return self._componentValues.count(value)
def extend(self, values):
for value in values:
self.append(value)
def index(self, value, start=0, stop=None):
if stop is None:
stop = len(self)
try:
return self._componentValues.index(value, start, stop)
except error.PyAsn1Error:
raise ValueError(sys.exc_info()[1])
def reverse(self):
self._componentValues.reverse()
def sort(self, key=None, reverse=False):
self._componentValues.sort(key=key, reverse=reverse)
def __iter__(self):
return iter(self._componentValues)
def _cloneComponentValues(self, myClone, cloneValueFlag):
for idx, componentValue in enumerate(self._componentValues):
if componentValue is not noValue:
if isinstance(componentValue, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, componentValue.clone())
def getComponentByPosition(self, idx, default=noValue, instantiate=True):
"""Return |ASN.1| type component value by position.
Equivalent to Python sequence subscription operation (e.g. `[]`).
Parameters
----------
idx : :class:`int`
Component index (zero-based). Must either refer to an existing
component or to N+1 component (if *componentType* is set). In the latter
case a new component type gets instantiated and appended to the |ASN.1|
sequence.
Keyword Args
------------
default: :class:`object`
If set and requested component is a schema object, return the `default`
object instead of the requested component.
instantiate: :class:`bool`
If `True` (default), inner component will be automatically instantiated.
If 'False' either existing component or the `noValue` object will be
returned.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
Instantiate |ASN.1| component type or return existing component value
Examples
--------
.. code-block:: python
# can also be SetOf
class MySequenceOf(SequenceOf):
componentType = OctetString()
s = MySequenceOf()
# returns component #0 with `.isValue` property False
s.getComponentByPosition(0)
# returns None
s.getComponentByPosition(0, default=None)
s.clear()
# returns noValue
s.getComponentByPosition(0, instantiate=False)
# sets component #0 to OctetString() ASN.1 schema
# object and returns it
s.getComponentByPosition(0, instantiate=True)
# sets component #0 to ASN.1 value object
s.setComponentByPosition(0, 'ABCD')
# returns OctetString('ABCD') value object
s.getComponentByPosition(0, instantiate=False)
s.clear()
# returns noValue
s.getComponentByPosition(0, instantiate=False)
"""
try:
componentValue = self._componentValues[idx]
except IndexError:
if not instantiate:
return default
self.setComponentByPosition(idx)
componentValue = self._componentValues[idx]
if default is noValue or componentValue.isValue:
return componentValue
else:
return default
def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`)
or list.append() (when idx == len(self)).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to existing
component or to N+1 component. In the latter case a new component
type gets instantiated (if *componentType* is set, or given ASN.1
object is taken otherwise) and appended to the |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
Raises
------
IndexError:
When idx > len(self)
"""
componentType = self.componentType
try:
currentValue = self._componentValues[idx]
except IndexError:
currentValue = noValue
if len(self._componentValues) < idx:
raise error.PyAsn1Error('Component index out of range')
if value is noValue:
if componentType is not None:
value = componentType.clone()
elif currentValue is noValue:
raise error.PyAsn1Error('Component type not defined')
elif not isinstance(value, base.Asn1Item):
if componentType is not None and isinstance(componentType, base.AbstractSimpleAsn1Item):
value = componentType.clone(value=value)
elif currentValue is not noValue and isinstance(currentValue, base.AbstractSimpleAsn1Item):
value = currentValue.clone(value=value)
else:
raise error.PyAsn1Error('Non-ASN.1 value %r and undefined component type at %r' % (value, self))
elif componentType is not None:
if self.strictConstraints:
if not componentType.isSameTypeWith(value, matchTags, matchConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
else:
if not componentType.isSuperTypeOf(value, matchTags, matchConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
if verifyConstraints and value.isValue:
try:
self.subtypeSpec(value, idx)
except error.PyAsn1Error:
exType, exValue, exTb = sys.exc_info()
raise exType('%s at %s' % (exValue, self.__class__.__name__))
if currentValue is noValue:
self._componentValues.append(value)
else:
self._componentValues[idx] = value
return self
@property
def componentTagMap(self):
if self.componentType is not None:
return self.componentType.tagMap
def prettyPrint(self, scope=0):
scope += 1
representation = self.__class__.__name__ + ':\n'
for idx, componentValue in enumerate(self._componentValues):
representation += ' ' * scope
if (componentValue is noValue and
self.componentType is not None):
representation += '<empty>'
else:
representation += componentValue.prettyPrint(scope)
return representation
def prettyPrintType(self, scope=0):
scope += 1
representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
if self.componentType is not None:
representation += ' ' * scope
representation += self.componentType.prettyPrintType(scope)
return representation + '\n' + ' ' * (scope - 1) + '}'
@property
def isValue(self):
"""Indicate that |ASN.1| object represents ASN.1 value.
If *isValue* is `False` then this object represents just ASN.1 schema.
If *isValue* is `True` then, in addition to its ASN.1 schema features,
this object can also be used like a Python built-in object (e.g. `int`,
`str`, `dict` etc.).
Returns
-------
: :class:`bool`
:class:`False` if object represents just ASN.1 schema.
:class:`True` if object represents ASN.1 schema and can be used as a normal value.
Note
----
There is an important distinction between PyASN1 schema and value objects.
The PyASN1 schema objects can only participate in ASN.1 schema-related
operations (e.g. defining or testing the structure of the data). Most
obvious uses of ASN.1 schema is to guide serialisation codecs whilst
encoding/decoding serialised ASN.1 contents.
The PyASN1 value objects can **additionally** participate in many operations
involving regular Python objects (e.g. arithmetic, comprehension etc).
"""
for componentValue in self._componentValues:
if componentValue is noValue or not componentValue.isValue:
return False
return True
class SequenceOf(SequenceOfAndSetOfBase):
__doc__ = SequenceOfAndSetOfBase.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
#: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: object representing ASN.1 type allowed within |ASN.1| type
componentType = None
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing size constraint on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = SequenceOfAndSetOfBase.getTypeId()
class SetOf(SequenceOfAndSetOfBase):
__doc__ = SequenceOfAndSetOfBase.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
#: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: object representing ASN.1 type allowed within |ASN.1| type
componentType = None
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing size constraint on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = SequenceOfAndSetOfBase.getTypeId()
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
"""Create |ASN.1| type.
|ASN.1| objects are mutable and duck-type Python :class:`dict` objects.
Keyword Args
------------
componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
Object holding named ASN.1 types allowed within this collection
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing collection size constraint
Examples
--------
.. code-block:: python
class Description(Sequence): # Set is similar
'''
ASN.1 specification:
Description ::= SEQUENCE {
surname IA5String,
first-name IA5String OPTIONAL,
age INTEGER DEFAULT 40
}
'''
componentType = NamedTypes(
NamedType('surname', IA5String()),
OptionalNamedType('first-name', IA5String()),
DefaultedNamedType('age', Integer(40))
)
descr = Description()
descr['surname'] = 'Smith'
descr['first-name'] = 'John'
"""
#: Default :py:class:`~pyasn1.type.namedtype.NamedTypes`
#: object representing named ASN.1 types allowed within |ASN.1| type
componentType = namedtype.NamedTypes()
class DynamicNames(object):
"""Fields names/positions mapping for component-less objects"""
def __init__(self):
self._keyToIdxMap = {}
self._idxToKeyMap = {}
def __len__(self):
return len(self._keyToIdxMap)
def __contains__(self, item):
return item in self._keyToIdxMap or item in self._idxToKeyMap
def __iter__(self):
return (self._idxToKeyMap[idx] for idx in range(len(self._idxToKeyMap)))
def __getitem__(self, item):
try:
return self._keyToIdxMap[item]
except KeyError:
return self._idxToKeyMap[item]
def getNameByPosition(self, idx):
try:
return self._idxToKeyMap[idx]
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByName(self, name):
try:
return self._keyToIdxMap[name]
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
def addField(self, idx):
self._keyToIdxMap['field-%d' % idx] = idx
self._idxToKeyMap[idx] = 'field-%d' % idx
def __init__(self, **kwargs):
base.AbstractConstructedAsn1Item.__init__(self, **kwargs)
self._componentTypeLen = len(self.componentType)
self._dynamicNames = self._componentTypeLen or self.DynamicNames()
def __getitem__(self, idx):
if octets.isStringType(idx):
try:
return self.getComponentByName(idx)
except error.PyAsn1Error:
# duck-typing dict
raise KeyError(sys.exc_info()[1])
else:
try:
return self.getComponentByPosition(idx)
except error.PyAsn1Error:
# duck-typing list
raise IndexError(sys.exc_info()[1])
def __setitem__(self, idx, value):
if octets.isStringType(idx):
try:
self.setComponentByName(idx, value)
except error.PyAsn1Error:
# duck-typing dict
raise KeyError(sys.exc_info()[1])
else:
try:
self.setComponentByPosition(idx, value)
except error.PyAsn1Error:
# duck-typing list
raise IndexError(sys.exc_info()[1])
def __contains__(self, key):
if self._componentTypeLen:
return key in self.componentType
else:
return key in self._dynamicNames
def __iter__(self):
return iter(self.componentType or self._dynamicNames)
# Python dict protocol
def values(self):
for idx in range(self._componentTypeLen or len(self._dynamicNames)):
yield self[idx]
def keys(self):
return iter(self)
def items(self):
for idx in range(self._componentTypeLen or len(self._dynamicNames)):
if self._componentTypeLen:
yield self.componentType[idx].name, self[idx]
else:
yield self._dynamicNames[idx], self[idx]
def update(self, *iterValue, **mappingValue):
for k, v in iterValue:
self[k] = v
for k in mappingValue:
self[k] = mappingValue[k]
def clear(self):
self._componentValues = []
self._dynamicNames = self.DynamicNames()
def _cloneComponentValues(self, myClone, cloneValueFlag):
for idx, componentValue in enumerate(self._componentValues):
if componentValue is not noValue:
if isinstance(componentValue, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, componentValue.clone())
def getComponentByName(self, name, default=noValue, instantiate=True):
"""Returns |ASN.1| type component by name.
Equivalent to Python :class:`dict` subscription operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
default: :class:`object`
If set and requested component is a schema object, return the `default`
object instead of the requested component.
instantiate: :class:`bool`
If `True` (default), inner component will be automatically instantiated.
If 'False' either existing component or the `noValue` object will be
returned.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
Instantiate |ASN.1| component type or return existing component value
"""
if self._componentTypeLen:
idx = self.componentType.getPositionByName(name)
else:
try:
idx = self._dynamicNames.getPositionByName(name)
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
return self.getComponentByPosition(idx, default=default, instantiate=instantiate)
def setComponentByName(self, name, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by name.
Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
if self._componentTypeLen:
idx = self.componentType.getPositionByName(name)
else:
try:
idx = self._dynamicNames.getPositionByName(name)
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
)
def getComponentByPosition(self, idx, default=noValue, instantiate=True):
"""Returns |ASN.1| type component by index.
Equivalent to Python sequence subscription operation (e.g. `[]`).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to an existing
component or (if *componentType* is set) new ASN.1 schema object gets
instantiated.
Keyword Args
------------
default: :class:`object`
If set and requested component is a schema object, return the `default`
object instead of the requested component.
instantiate: :class:`bool`
If `True` (default), inner component will be automatically instantiated.
If 'False' either existing component or the `noValue` object will be
returned.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
a PyASN1 object
Examples
--------
.. code-block:: python
# can also be Set
class MySequence(Sequence):
componentType = NamedTypes(
NamedType('id', OctetString())
)
s = MySequence()
# returns component #0 with `.isValue` property False
s.getComponentByPosition(0)
# returns None
s.getComponentByPosition(0, default=None)
s.clear()
# returns noValue
s.getComponentByPosition(0, instantiate=False)
# sets component #0 to OctetString() ASN.1 schema
# object and returns it
s.getComponentByPosition(0, instantiate=True)
# sets component #0 to ASN.1 value object
s.setComponentByPosition(0, 'ABCD')
# returns OctetString('ABCD') value object
s.getComponentByPosition(0, instantiate=False)
s.clear()
# returns noValue
s.getComponentByPosition(0, instantiate=False)
"""
try:
componentValue = self._componentValues[idx]
except IndexError:
componentValue = noValue
if not instantiate:
if componentValue is noValue or not componentValue.isValue:
return default
else:
return componentValue
if componentValue is noValue:
self.setComponentByPosition(idx)
componentValue = self._componentValues[idx]
if default is noValue or componentValue.isValue:
return componentValue
else:
return default
def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx : :class:`int`
Component index (zero-based). Must either refer to existing
component (if *componentType* is set) or to N+1 component
otherwise. In the latter case a new component of given ASN.1
type gets instantiated and appended to |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
componentType = self.componentType
componentTypeLen = self._componentTypeLen
try:
currentValue = self._componentValues[idx]
except IndexError:
currentValue = noValue
if componentTypeLen:
if componentTypeLen < idx:
raise error.PyAsn1Error('component index out of range')
self._componentValues = [noValue] * componentTypeLen
if value is noValue:
if componentTypeLen:
value = componentType.getTypeByPosition(idx).clone()
elif currentValue is noValue:
raise error.PyAsn1Error('Component type not defined')
elif not isinstance(value, base.Asn1Item):
if componentTypeLen:
subComponentType = componentType.getTypeByPosition(idx)
if isinstance(subComponentType, base.AbstractSimpleAsn1Item):
value = subComponentType.clone(value=value)
else:
raise error.PyAsn1Error('%s can cast only scalar values' % componentType.__class__.__name__)
elif currentValue is not noValue and isinstance(currentValue, base.AbstractSimpleAsn1Item):
value = currentValue.clone(value=value)
else:
raise error.PyAsn1Error('%s undefined component type' % componentType.__class__.__name__)
elif (matchTags or matchConstraints) and componentTypeLen:
subComponentType = componentType.getTypeByPosition(idx)
if subComponentType is not noValue:
subtypeChecker = (self.strictConstraints and
subComponentType.isSameTypeWith or
subComponentType.isSuperTypeOf)
if not subtypeChecker(value, matchTags, matchConstraints):
if not componentType[idx].openType:
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
if verifyConstraints and value.isValue:
try:
self.subtypeSpec(value, idx)
except error.PyAsn1Error:
exType, exValue, exTb = sys.exc_info()
raise exType('%s at %s' % (exValue, self.__class__.__name__))
if componentTypeLen or idx in self._dynamicNames:
self._componentValues[idx] = value
elif len(self._componentValues) == idx:
self._componentValues.append(value)
self._dynamicNames.addField(idx)
else:
raise error.PyAsn1Error('Component index out of range')
return self
@property
def isValue(self):
"""Indicate that |ASN.1| object represents ASN.1 value.
If *isValue* is `False` then this object represents just ASN.1 schema.
If *isValue* is `True` then, in addition to its ASN.1 schema features,
this object can also be used like a Python built-in object (e.g. `int`,
`str`, `dict` etc.).
Returns
-------
: :class:`bool`
:class:`False` if object represents just ASN.1 schema.
:class:`True` if object represents ASN.1 schema and can be used as a normal value.
Note
----
There is an important distinction between PyASN1 schema and value objects.
The PyASN1 schema objects can only participate in ASN.1 schema-related
operations (e.g. defining or testing the structure of the data). Most
obvious uses of ASN.1 schema is to guide serialisation codecs whilst
encoding/decoding serialised ASN.1 contents.
The PyASN1 value objects can **additionally** participate in many operations
involving regular Python objects (e.g. arithmetic, comprehension etc).
"""
componentType = self.componentType
if componentType:
for idx, subComponentType in enumerate(componentType.namedTypes):
if subComponentType.isDefaulted or subComponentType.isOptional:
continue
if not self._componentValues:
return False
componentValue = self._componentValues[idx]
if componentValue is noValue or not componentValue.isValue:
return False
else:
for componentValue in self._componentValues:
if componentValue is noValue or not componentValue.isValue:
return False
return True
def prettyPrint(self, scope=0):
"""Return an object representation string.
Returns
-------
: :class:`str`
Human-friendly object representation.
"""
scope += 1
representation = self.__class__.__name__ + ':\n'
for idx, componentValue in enumerate(self._componentValues):
if componentValue is not noValue and componentValue.isValue:
representation += ' ' * scope
if self.componentType:
representation += self.componentType.getNameByPosition(idx)
else:
representation += self._dynamicNames.getNameByPosition(idx)
representation = '%s=%s\n' % (
representation, componentValue.prettyPrint(scope)
)
return representation
def prettyPrintType(self, scope=0):
scope += 1
representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
for idx, componentType in enumerate(self.componentType.values() or self._componentValues):
representation += ' ' * scope
if self.componentType:
representation += '"%s"' % self.componentType.getNameByPosition(idx)
else:
representation += '"%s"' % self._dynamicNames.getNameByPosition(idx)
representation = '%s = %s\n' % (
representation, componentType.prettyPrintType(scope)
)
return representation + '\n' + ' ' * (scope - 1) + '}'
# backward compatibility
def setDefaultComponents(self):
return self
def getComponentType(self):
if self._componentTypeLen:
return self.componentType
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self.componentType[idx].name
class Sequence(SequenceAndSetBase):
__doc__ = SequenceAndSetBase.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing constraints on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection()
#: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
#: object imposing size constraint on |ASN.1| objects
componentType = namedtype.NamedTypes()
# Disambiguation ASN.1 types identification
typeId = SequenceAndSetBase.getTypeId()
# backward compatibility
def getComponentTagMapNearPosition(self, idx):
if self.componentType:
return self.componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self.componentType:
return self.componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
__doc__ = SequenceAndSetBase.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
#: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
#: object representing ASN.1 type allowed within |ASN.1| type
componentType = namedtype.NamedTypes()
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing constraints on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = SequenceAndSetBase.getTypeId()
def getComponent(self, innerFlag=False):
return self
def getComponentByType(self, tagSet, default=noValue,
instantiate=True, innerFlag=False):
"""Returns |ASN.1| type component by ASN.1 tag.
Parameters
----------
tagSet : :py:class:`~pyasn1.type.tag.TagSet`
Object representing ASN.1 tags to identify one of
|ASN.1| object component
Keyword Args
------------
default: :class:`object`
If set and requested component is a schema object, return the `default`
object instead of the requested component.
instantiate: :class:`bool`
If `True` (default), inner component will be automatically instantiated.
If 'False' either existing component or the `noValue` object will be
returned.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
a pyasn1 object
"""
componentValue = self.getComponentByPosition(
self.componentType.getPositionByType(tagSet),
default=default, instantiate=instantiate
)
if innerFlag and isinstance(componentValue, Set):
# get inner component by inner tagSet
return componentValue.getComponent(innerFlag=True)
else:
# get outer component by inner tagSet
return componentValue
def setComponentByType(self, tagSet, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True,
innerFlag=False):
"""Assign |ASN.1| type component by ASN.1 tag.
Parameters
----------
tagSet : :py:class:`~pyasn1.type.tag.TagSet`
Object representing ASN.1 tags to identify one of
|ASN.1| object component
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
innerFlag: :class:`bool`
If `True`, search for matching *tagSet* recursively.
Returns
-------
self
"""
idx = self.componentType.getPositionByType(tagSet)
if innerFlag: # set inner component by inner tagSet
componentType = self.componentType.getTypeByPosition(idx)
if componentType.tagSet:
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
)
else:
componentType = self.getComponentByPosition(idx)
return componentType.setComponentByType(
tagSet, value, verifyConstraints, matchTags, matchConstraints, innerFlag=innerFlag
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
)
@property
def componentTagMap(self):
if self.componentType:
return self.componentType.tagMapUnique
class Choice(Set):
"""Create |ASN.1| type.
|ASN.1| objects are mutable and duck-type Python :class:`dict` objects.
Keyword Args
------------
componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
Object holding named ASN.1 types allowed within this collection
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing collection size constraint
Examples
--------
.. code-block:: python
class Afters(Choice):
'''
ASN.1 specification:
Afters ::= CHOICE {
cheese [0] IA5String,
dessert [1] IA5String
}
'''
componentType = NamedTypes(
NamedType('cheese', IA5String().subtype(
implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
),
NamedType('dessert', IA5String().subtype(
implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
)
)
afters = Afters()
afters['cheese'] = 'Mascarpone'
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.TagSet() # untagged
#: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
#: object representing ASN.1 type allowed within |ASN.1| type
componentType = namedtype.NamedTypes()
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing size constraint on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
# Disambiguation ASN.1 types identification
typeId = Set.getTypeId()
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self._componentValues and True or False
else:
def __bool__(self):
return self._componentValues and True or False
def __len__(self):
return self._currentIdx is not None and 1 or 0
def __contains__(self, key):
if self._currentIdx is None:
return False
return key == self.componentType[self._currentIdx].getName()
def __iter__(self):
if self._currentIdx is None:
raise StopIteration
yield self.componentType[self._currentIdx].getName()
# Python dict protocol
def values(self):
if self._currentIdx is not None:
yield self._componentValues[self._currentIdx]
def keys(self):
if self._currentIdx is not None:
yield self.componentType[self._currentIdx].getName()
def items(self):
if self._currentIdx is not None:
yield self.componentType[self._currentIdx].getName(), self[self._currentIdx]
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
component = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(component, Choice):
tagSet = component.effectiveTagSet
else:
tagSet = component.tagSet
if isinstance(component, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, component.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, component.clone())
def getComponentByPosition(self, idx, default=noValue, instantiate=True):
__doc__ = Set.__doc__
if self._currentIdx is None or self._currentIdx != idx:
return Set.getComponentByPosition(self, idx, default=default,
instantiate=instantiate)
return self._componentValues[idx]
def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to existing
component or to N+1 component. In the latter case a new component
type gets instantiated (if *componentType* is set, or given ASN.1
object is taken otherwise) and appended to the |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component. Once a new value is
set to *idx* component, previous value is dropped.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
oldIdx = self._currentIdx
Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints)
self._currentIdx = idx
if oldIdx is not None and oldIdx != idx:
self._componentValues[oldIdx] = noValue
return self
@property
def effectiveTagSet(self):
"""Return a :class:`~pyasn1.type.tag.TagSet` object of the currently initialized component or self (if |ASN.1| is tagged)."""
if self.tagSet:
return self.tagSet
else:
component = self.getComponent()
return component.effectiveTagSet
@property
def tagMap(self):
""""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
ASN.1 tags to ASN.1 objects contained within callee.
"""
if self.tagSet:
return Set.tagMap.fget(self)
else:
return self.componentType.tagMapUnique
def getComponent(self, innerFlag=False):
"""Return currently assigned component of the |ASN.1| object.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
a PyASN1 object
"""
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=False):
"""Return the name of currently assigned component of the |ASN.1| object.
Returns
-------
: :py:class:`str`
|ASN.1| component name
"""
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self.componentType.getNameByPosition(self._currentIdx)
@property
def isValue(self):
"""Indicate that |ASN.1| object represents ASN.1 value.
If *isValue* is `False` then this object represents just ASN.1 schema.
If *isValue* is `True` then, in addition to its ASN.1 schema features,
this object can also be used like a Python built-in object (e.g. `int`,
`str`, `dict` etc.).
Returns
-------
: :class:`bool`
:class:`False` if object represents just ASN.1 schema.
:class:`True` if object represents ASN.1 schema and can be used as a normal value.
Note
----
There is an important distinction between PyASN1 schema and value objects.
The PyASN1 schema objects can only participate in ASN.1 schema-related
operations (e.g. defining or testing the structure of the data). Most
obvious uses of ASN.1 schema is to guide serialisation codecs whilst
encoding/decoding serialised ASN.1 contents.
The PyASN1 value objects can **additionally** participate in many operations
involving regular Python objects (e.g. arithmetic, comprehension etc).
"""
if self._currentIdx is None:
return False
componentValue = self._componentValues[self._currentIdx]
return componentValue is not noValue and componentValue.isValue
def clear(self):
self._currentIdx = None
Set.clear(self)
# compatibility stubs
def getMinTagSet(self):
return self.minTagSet
class Any(OctetString):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`str` or Python 3
:class:`bytes`. When used in Unicode context, |ASN.1| type assumes "|encoding|"
serialisation.
Keyword Args
------------
value: :class:`str`, :class:`bytes` or |ASN.1| object
string (Python 2) or bytes (Python 3), alternatively unicode object
(Python 2) or string (Python 3) representing character string to be
serialised into octets (note `encoding` parameter) or |ASN.1| object.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in text string context.
binValue: :py:class:`str`
Binary string initializer to use instead of the *value*.
Example: '10110011'.
hexValue: :py:class:`str`
Hexadecimal string initializer to use instead of the *value*.
Example: 'DEADBEEF'.
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Error(Sequence):
'''
ASN.1 specification:
Error ::= SEQUENCE {
code INTEGER,
parameter ANY DEFINED BY code -- Either INTEGER or REAL
}
'''
componentType=NamedTypes(
NamedType('code', Integer()),
NamedType('parameter', Any(),
openType=OpenType('code', {1: Integer(),
2: Real()}))
)
error = Error()
error['code'] = 1
error['parameter'] = Integer(1234)
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.TagSet() # untagged
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = OctetString.getTypeId()
@property
def tagMap(self):
""""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
ASN.1 tags to ASN.1 objects contained within callee.
"""
try:
return self._tagMap
except AttributeError:
self._tagMap = tagmap.TagMap(
{self.tagSet: self},
{eoo.endOfOctets.tagSet: eoo.endOfOctets},
self
)
return self._tagMap
# XXX
# coercion rules?
| gpl-3.0 |
scvalencia/ROBOCOL_desastres | Galileo/Python/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| mit |
oliverodaa/cs184-final-proj | dwinelle/video/gen_3d.py | 1 | 1238 | #!/usr/bin/env python3
# This file is part of dwinelle-tools.
# dwinelle-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# dwinelle-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with dwinelle-tools. If not, see <http://www.gnu.org/licenses/>.
# This can be used to generate data3d.js for the web frontend.
import utils
edge_lengths = utils.load_edge_lengths()
print('var el = {{{}}};'.format(','.join('"{} {}":{}'.format(k[0], k[1], v) for k, v in edge_lengths.items())))
print('var coords = {{{}}};'.format(','.join('{}:{{x:{},y:{},z:{}}}'.format(k, v[0], v[1], v[2]) for k, v in utils.get_node_coords().items())))
print('var eh = {{{}}};'.format(','.join('"{} {}":{{bot:{},top:{},l:{}}}'.format(k[0], k[1], v[0], v[1], v[2]) for k, v in utils.load_edge_heights().items())))
| mit |
gchaimovitz/CouchPotatoServer | libs/suds/mx/literal.py | 178 | 9517 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides literal I{marshaller} classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.core import Core
from suds.mx.typer import Typer
from suds.resolver import GraphResolver, Frame
from suds.sax.element import Element
from suds.sudsobject import Factory
log = getLogger(__name__)
#
# Add typed extensions
# type = The expected xsd type
# real = The 'true' XSD type
# ancestry = The 'type' ancestry
#
Content.extensions.append('type')
Content.extensions.append('real')
Content.extensions.append('ancestry')
class Typed(Core):
"""
A I{typed} marshaller.
This marshaller is semi-typed as needed to support both
I{document/literal} and I{rpc/literal} soap message styles.
@ivar schema: An xsd schema.
@type schema: L{xsd.schema.Schema}
@ivar resolver: A schema type resolver.
@type resolver: L{GraphResolver}
"""
def __init__(self, schema, xstq=True):
"""
@param schema: A schema object
@type schema: L{xsd.schema.Schema}
@param xstq: The B{x}ml B{s}chema B{t}ype B{q}ualified flag indicates
that the I{xsi:type} attribute values should be qualified by namespace.
@type xstq: bool
"""
Core.__init__(self)
self.schema = schema
self.xstq = xstq
self.resolver = GraphResolver(self.schema)
def reset(self):
self.resolver.reset()
def start(self, content):
#
# Start marshalling the 'content' by ensuring that both the
# 'content' _and_ the resolver are primed with the XSD type
# information. The 'content' value is both translated and
# sorted based on the XSD type. Only values that are objects
# have their attributes sorted.
#
log.debug('starting content:\n%s', content)
if content.type is None:
name = content.tag
if name.startswith('_'):
name = '@'+name[1:]
content.type = self.resolver.find(name, content.value)
if content.type is None:
raise TypeNotFound(content.tag)
else:
known = None
if isinstance(content.value, Object):
known = self.resolver.known(content.value)
if known is None:
log.debug('object has no type information', content.value)
known = content.type
frame = Frame(content.type, resolved=known)
self.resolver.push(frame)
frame = self.resolver.top()
content.real = frame.resolved
content.ancestry = frame.ancestry
self.translate(content)
self.sort(content)
if self.skip(content):
log.debug('skipping (optional) content:\n%s', content)
self.resolver.pop()
return False
else:
return True
def suspend(self, content):
#
# Suspend to process a list content. Primarily, this
# involves popping the 'list' content off the resolver's
# stack so the list items can be marshalled.
#
self.resolver.pop()
def resume(self, content):
#
# Resume processing a list content. To do this, we
# really need to simply push the 'list' content
# back onto the resolver stack.
#
self.resolver.push(Frame(content.type))
def end(self, parent, content):
#
# End processing the content. Make sure the content
# ending matches the top of the resolver stack since for
# list processing we play games with the resolver stack.
#
log.debug('ending content:\n%s', content)
current = self.resolver.top().type
if current == content.type:
self.resolver.pop()
else:
raise Exception, \
'content (end) mismatch: top=(%s) cont=(%s)' % \
(current, content)
def node(self, content):
#
# Create an XML node and namespace qualify as defined
# by the schema (elementFormDefault).
#
ns = content.type.namespace()
if content.type.form_qualified:
node = Element(content.tag, ns=ns)
node.addPrefix(ns[0], ns[1])
else:
node = Element(content.tag)
self.encode(node, content)
log.debug('created - node:\n%s', node)
return node
def setnil(self, node, content):
#
# Set the 'node' nil only if the XSD type
# specifies that it is permitted.
#
if content.type.nillable:
node.setnil()
def setdefault(self, node, content):
#
# Set the node to the default value specified
# by the XSD type.
#
default = content.type.default
if default is None:
pass
else:
node.setText(default)
return default
def optional(self, content):
if content.type.optional():
return True
for a in content.ancestry:
if a.optional():
return True
return False
def encode(self, node, content):
# Add (soap) encoding information only if the resolved
# type is derived by extension. Further, the xsi:type values
# is qualified by namespace only if the content (tag) and
# referenced type are in different namespaces.
if content.type.any():
return
if not content.real.extension():
return
if content.type.resolve() == content.real:
return
ns = None
name = content.real.name
if self.xstq:
ns = content.real.namespace('ns1')
Typer.manual(node, name, ns)
def skip(self, content):
"""
Get whether to skip this I{content}.
Should be skipped when the content is optional
and either the value=None or the value is an empty list.
@param content: The content to skip.
@type content: L{Object}
@return: True if content is to be skipped.
@rtype: bool
"""
if self.optional(content):
v = content.value
if v is None:
return True
if isinstance(v, (list,tuple)) and len(v) == 0:
return True
return False
def optional(self, content):
if content.type.optional():
return True
for a in content.ancestry:
if a.optional():
return True
return False
def translate(self, content):
"""
Translate using the XSD type information.
Python I{dict} is translated to a suds object. Most
importantly, primative values are translated from python
types to XML types using the XSD type.
@param content: The content to translate.
@type content: L{Object}
@return: self
@rtype: L{Typed}
"""
v = content.value
if v is None:
return
if isinstance(v, dict):
cls = content.real.name
content.value = Factory.object(cls, v)
md = content.value.__metadata__
md.sxtype = content.type
return
v = content.real.translate(v, False)
content.value = v
return self
def sort(self, content):
"""
Sort suds object attributes based on ordering defined
in the XSD type information.
@param content: The content to sort.
@type content: L{Object}
@return: self
@rtype: L{Typed}
"""
v = content.value
if isinstance(v, Object):
md = v.__metadata__
md.ordering = self.ordering(content.real)
return self
def ordering(self, type):
"""
Get the attribute ordering defined in the specified
XSD type information.
@param type: An XSD type object.
@type type: SchemaObject
@return: An ordered list of attribute names.
@rtype: list
"""
result = []
for child, ancestry in type.resolve():
name = child.name
if child.name is None:
continue
if child.isattr():
name = '_%s' % child.name
result.append(name)
return result
class Literal(Typed):
"""
A I{literal} marshaller.
This marshaller is semi-typed as needed to support both
I{document/literal} and I{rpc/literal} soap message styles.
"""
pass | gpl-3.0 |
jakobworldpeace/scikit-learn | sklearn/utils/sparsefuncs.py | 28 | 13531 | # Authors: Manoj Kumar
# Thomas Unterthiner
# Giorgio Patrini
#
# License: BSD 3 clause
import scipy.sparse as sp
import numpy as np
from .fixes import sparse_min_max, bincount
from .sparsefuncs_fast import (
csr_mean_variance_axis0 as _csr_mean_var_axis0,
csc_mean_variance_axis0 as _csc_mean_var_axis0,
incr_mean_variance_axis0 as _incr_mean_var_axis0)
def _raise_typeerror(X):
"""Raises a TypeError if X is not a CSR or CSC matrix"""
input_type = X.format if sp.issparse(X) else type(X)
err = "Expected a CSR or CSC sparse matrix, got %s." % input_type
raise TypeError(err)
def _raise_error_wrong_axis(axis):
if axis not in (0, 1):
raise ValueError(
"Unknown axis value: %d. Use 0 for rows, or 1 for columns" % axis)
def inplace_csr_column_scale(X, scale):
"""Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR matrix with shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : float array with shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[1]
X.data *= scale.take(X.indices, mode='clip')
def inplace_csr_row_scale(X, scale):
""" Inplace row scaling of a CSR matrix.
Scale each sample of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR sparse matrix, shape (n_samples, n_features)
Matrix to be scaled.
scale : float array with shape (n_samples,)
Array of precomputed sample-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[0]
X.data *= np.repeat(scale, np.diff(X.indptr))
def mean_variance_axis(X, axis):
"""Compute mean and variance along an axix on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis : int (either 0 or 1)
Axis along which the axis should be computed.
Returns
-------
means : float array with shape (n_features,)
Feature-wise means
variances : float array with shape (n_features,)
Feature-wise variances
"""
_raise_error_wrong_axis(axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X)
else:
return _csc_mean_var_axis0(X.T)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X)
else:
return _csr_mean_var_axis0(X.T)
else:
_raise_typeerror(X)
def incr_mean_variance_axis(X, axis, last_mean, last_var, last_n):
"""Compute incremental mean and variance along an axix on a CSR or
CSC matrix.
last_mean, last_var are the statistics computed at the last step by this
function. Both must be initilized to 0-arrays of the proper size, i.e.
the number of features in X. last_n is the number of samples encountered
until now.
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis : int (either 0 or 1)
Axis along which the axis should be computed.
last_mean : float array with shape (n_features,)
Array of feature-wise means to update with the new data X.
last_var : float array with shape (n_features,)
Array of feature-wise var to update with the new data X.
last_n : int
Number of samples seen so far, excluded X.
Returns
-------
means : float array with shape (n_features,)
Updated feature-wise means.
variances : float array with shape (n_features,)
Updated feature-wise variances.
n : int
Updated number of seen samples.
"""
_raise_error_wrong_axis(axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _incr_mean_var_axis0(X, last_mean=last_mean,
last_var=last_var, last_n=last_n)
else:
return _incr_mean_var_axis0(X.T, last_mean=last_mean,
last_var=last_var, last_n=last_n)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _incr_mean_var_axis0(X, last_mean=last_mean,
last_var=last_var, last_n=last_n)
else:
return _incr_mean_var_axis0(X.T, last_mean=last_mean,
last_var=last_var, last_n=last_n)
else:
_raise_typeerror(X)
def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSC or CSR matrix with shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : float array with shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_row_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_row_scale(X, scale):
""" Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Matrix to be scaled.
scale : float array with shape (n_features,)
Array of precomputed sample-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_column_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_row_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_swap_row_csc(X, m, n):
"""
Swaps two rows of a CSC matrix in-place.
Parameters
----------
X : scipy.sparse.csc_matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
def inplace_swap_row_csr(X, m, n):
"""
Swaps two rows of a CSR matrix in-place.
Parameters
----------
X : scipy.sparse.csr_matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
# The following swapping makes life easier since m is assumed to be the
# smaller integer below.
if m > n:
m, n = n, m
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
# Modify indptr first
X.indptr[m + 2:n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate([X.indices[:m_start],
X.indices[n_start:n_stop],
X.indices[m_stop:n_start],
X.indices[m_start:m_stop],
X.indices[n_stop:]])
X.data = np.concatenate([X.data[:m_start],
X.data[n_start:n_stop],
X.data[m_stop:n_start],
X.data[m_start:m_stop],
X.data[n_stop:]])
def inplace_swap_row(X, m, n):
"""
Swaps two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : CSR or CSC sparse matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
if isinstance(X, sp.csc_matrix):
return inplace_swap_row_csc(X, m, n)
elif isinstance(X, sp.csr_matrix):
return inplace_swap_row_csr(X, m, n)
else:
_raise_typeerror(X)
def inplace_swap_column(X, m, n):
"""
Swaps two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : CSR or CSC sparse matrix, shape=(n_samples, n_features)
Matrix whose two columns are to be swapped.
m : int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if isinstance(X, sp.csc_matrix):
return inplace_swap_row_csr(X, m, n)
elif isinstance(X, sp.csr_matrix):
return inplace_swap_row_csc(X, m, n)
else:
_raise_typeerror(X)
def min_max_axis(X, axis):
"""Compute minimum and maximum along an axis on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis : int (either 0 or 1)
Axis along which the axis should be computed.
Returns
-------
mins : float array with shape (n_features,)
Feature-wise minima
maxs : float array with shape (n_features,)
Feature-wise maxima
"""
if isinstance(X, sp.csr_matrix) or isinstance(X, sp.csc_matrix):
return sparse_min_max(X, axis=axis)
else:
_raise_typeerror(X)
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : CSR sparse matrix, shape = (n_samples, n_labels)
Input data.
axis : None, 0 or 1
The axis on which the data is aggregated.
sample_weight : array, shape = (n_samples,), optional
Weight for each row of X.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != 'csr':
raise TypeError('Expected CSR sparse format, got {0}'.format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
return out
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return bincount(X.indices, minlength=X.shape[1],
weights=weights)
else:
raise ValueError('Unsupported axis: {0}'.format(axis))
def _get_median(data, n_zeros):
"""Compute the median of data with n_zeros additional zeros.
This function is used to support sparse matrices; it modifies data in-place
"""
n_elems = len(data) + n_zeros
if not n_elems:
return np.nan
n_negative = np.count_nonzero(data < 0)
middle, is_odd = divmod(n_elems, 2)
data.sort()
if is_odd:
return _get_elem_at_rank(middle, data, n_negative, n_zeros)
return (_get_elem_at_rank(middle - 1, data, n_negative, n_zeros) +
_get_elem_at_rank(middle, data, n_negative, n_zeros)) / 2.
def _get_elem_at_rank(rank, data, n_negative, n_zeros):
"""Find the value in data augmented with n_zeros for the given rank"""
if rank < n_negative:
return data[rank]
if rank - n_negative < n_zeros:
return 0
return data[rank - n_zeros]
def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : CSC sparse matrix, shape (n_samples, n_features)
Input data.
Returns
-------
median : ndarray, shape (n_features,)
Median.
"""
if not isinstance(X, sp.csc_matrix):
raise TypeError("Expected matrix of CSC format, got %s" % X.format)
indptr = X.indptr
n_samples, n_features = X.shape
median = np.zeros(n_features)
for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])):
# Prevent modifying X in place
data = np.copy(X.data[start: end])
nz = n_samples - data.size
median[f_ind] = _get_median(data, nz)
return median
| bsd-3-clause |
Kiiv/Sick-Beard | lib/requests/packages/chardet/euckrprober.py | 236 | 1672 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import EUCKRDistributionAnalysis
from mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| gpl-3.0 |
MTASZTAKI/ApertusVR | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/deps/v8/tools/gc-nvp-trace-processor.py | 31 | 11135 | #!/usr/bin/env python
#
# Copyright 2010 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# This is an utility for plotting charts based on GC traces produced by V8 when
# run with flags --trace-gc --trace-gc-nvp. Relies on gnuplot for actual
# plotting.
#
# Usage: gc-nvp-trace-processor.py <GC-trace-filename>
#
from __future__ import with_statement
import sys, types, subprocess, math
import gc_nvp_common
def flatten(l):
flat = []
for i in l: flat.extend(i)
return flat
def gnuplot(script):
gnuplot = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
gnuplot.stdin.write(script)
gnuplot.stdin.close()
gnuplot.wait()
x1y1 = 'x1y1'
x1y2 = 'x1y2'
x2y1 = 'x2y1'
x2y2 = 'x2y2'
class Item(object):
def __init__(self, title, field, axis = x1y1, **keywords):
self.title = title
self.axis = axis
self.props = keywords
if type(field) is types.ListType:
self.field = field
else:
self.field = [field]
def fieldrefs(self):
return self.field
def to_gnuplot(self, context):
args = ['"%s"' % context.datafile,
'using %s' % context.format_fieldref(self.field),
'title "%s"' % self.title,
'axis %s' % self.axis]
if 'style' in self.props:
args.append('with %s' % self.props['style'])
if 'lc' in self.props:
args.append('lc rgb "%s"' % self.props['lc'])
if 'fs' in self.props:
args.append('fs %s' % self.props['fs'])
return ' '.join(args)
class Plot(object):
def __init__(self, *items):
self.items = items
def fieldrefs(self):
return flatten([item.fieldrefs() for item in self.items])
def to_gnuplot(self, ctx):
return 'plot ' + ', '.join([item.to_gnuplot(ctx) for item in self.items])
class Set(object):
def __init__(self, value):
self.value = value
def to_gnuplot(self, ctx):
return 'set ' + self.value
def fieldrefs(self):
return []
class Context(object):
def __init__(self, datafile, field_to_index):
self.datafile = datafile
self.field_to_index = field_to_index
def format_fieldref(self, fieldref):
return ':'.join([str(self.field_to_index[field]) for field in fieldref])
def collect_fields(plot):
field_to_index = {}
fields = []
def add_field(field):
if field not in field_to_index:
fields.append(field)
field_to_index[field] = len(fields)
for field in flatten([item.fieldrefs() for item in plot]):
add_field(field)
return (fields, field_to_index)
def is_y2_used(plot):
for subplot in plot:
if isinstance(subplot, Plot):
for item in subplot.items:
if item.axis == x1y2 or item.axis == x2y2:
return True
return False
def get_field(trace_line, field):
t = type(field)
if t is types.StringType:
return trace_line[field]
elif t is types.FunctionType:
return field(trace_line)
def generate_datafile(datafile_name, trace, fields):
with open(datafile_name, 'w') as datafile:
for line in trace:
data_line = [str(get_field(line, field)) for field in fields]
datafile.write('\t'.join(data_line))
datafile.write('\n')
def generate_script_and_datafile(plot, trace, datafile, output):
(fields, field_to_index) = collect_fields(plot)
generate_datafile(datafile, trace, fields)
script = [
'set terminal png',
'set output "%s"' % output,
'set autoscale',
'set ytics nomirror',
'set xtics nomirror',
'set key below'
]
if is_y2_used(plot):
script.append('set autoscale y2')
script.append('set y2tics')
context = Context(datafile, field_to_index)
for item in plot:
script.append(item.to_gnuplot(context))
return '\n'.join(script)
def plot_all(plots, trace, prefix):
charts = []
for plot in plots:
outfilename = "%s_%d.png" % (prefix, len(charts))
charts.append(outfilename)
script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
print 'Plotting %s...' % outfilename
gnuplot(script)
return charts
def reclaimed_bytes(row):
return row['total_size_before'] - row['total_size_after']
def other_scope(r):
if r['gc'] == 's':
# there is no 'other' scope for scavenging collections.
return 0
return r['pause'] - r['mark'] - r['sweep'] - r['external']
def scavenge_scope(r):
if r['gc'] == 's':
return r['pause'] - r['external']
return 0
def real_mutator(r):
return r['mutator'] - r['steps_took']
plots = [
[
Set('style fill solid 0.5 noborder'),
Set('style histogram rowstacked'),
Set('style data histograms'),
Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
Item('Marking', 'mark', lc = 'purple'),
Item('Sweep', 'sweep', lc = 'blue'),
Item('External', 'external', lc = '#489D43'),
Item('Other', other_scope, lc = 'grey'),
Item('IGC Steps', 'steps_took', lc = '#FF6347'))
],
[
Set('style fill solid 0.5 noborder'),
Set('style histogram rowstacked'),
Set('style data histograms'),
Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
Item('Marking', 'mark', lc = 'purple'),
Item('Sweep', 'sweep', lc = 'blue'),
Item('External', 'external', lc = '#489D43'),
Item('Other', other_scope, lc = '#ADD8E6'),
Item('External', 'external', lc = '#D3D3D3'))
],
[
Plot(Item('Mutator', real_mutator, lc = 'black', style = 'lines'))
],
[
Set('style histogram rowstacked'),
Set('style data histograms'),
Plot(Item('Heap Size (before GC)', 'total_size_before', x1y2,
fs = 'solid 0.4 noborder',
lc = 'green'),
Item('Total holes (after GC)', 'holes_size_before', x1y2,
fs = 'solid 0.4 noborder',
lc = 'red'),
Item('GC Time', ['i', 'pause'], style = 'lines', lc = 'red'))
],
[
Set('style histogram rowstacked'),
Set('style data histograms'),
Plot(Item('Heap Size (after GC)', 'total_size_after', x1y2,
fs = 'solid 0.4 noborder',
lc = 'green'),
Item('Total holes (after GC)', 'holes_size_after', x1y2,
fs = 'solid 0.4 noborder',
lc = 'red'),
Item('GC Time', ['i', 'pause'],
style = 'lines',
lc = 'red'))
],
[
Set('style fill solid 0.5 noborder'),
Set('style data histograms'),
Plot(Item('Allocated', 'allocated'),
Item('Reclaimed', reclaimed_bytes),
Item('Promoted', 'promoted', style = 'lines', lc = 'black'))
],
]
def freduce(f, field, trace, init):
return reduce(lambda t,r: f(t, r[field]), trace, init)
def calc_total(trace, field):
return freduce(lambda t,v: t + long(v), field, trace, long(0))
def calc_max(trace, field):
return freduce(lambda t,r: max(t, r), field, trace, 0)
def count_nonzero(trace, field):
return freduce(lambda t,r: t if r == 0 else t + 1, field, trace, 0)
def process_trace(filename):
trace = gc_nvp_common.parse_gc_trace(filename)
marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
scavenges = filter(lambda r: r['gc'] == 's', trace)
globalgcs = filter(lambda r: r['gc'] != 's', trace)
charts = plot_all(plots, trace, filename)
def stats(out, prefix, trace, field):
n = len(trace)
total = calc_total(trace, field)
max = calc_max(trace, field)
if n > 0:
avg = total / n
else:
avg = 0
if n > 1:
dev = math.sqrt(freduce(lambda t,r: t + (r - avg) ** 2, field, trace, 0) /
(n - 1))
else:
dev = 0
out.write('<tr><td>%s</td><td>%d</td><td>%d</td>'
'<td>%d</td><td>%d [dev %f]</td></tr>' %
(prefix, n, total, max, avg, dev))
def HumanReadable(size):
suffixes = ['bytes', 'kB', 'MB', 'GB']
power = 1
for i in range(len(suffixes)):
if size < power*1024:
return "%.1f" % (float(size) / power) + " " + suffixes[i]
power *= 1024
def throughput(name, trace):
total_live_after = calc_total(trace, 'total_size_after')
total_live_before = calc_total(trace, 'total_size_before')
total_gc = calc_total(trace, 'pause')
if total_gc == 0:
return
out.write('GC %s Throughput (after): %s / %s ms = %s/ms<br/>' %
(name,
HumanReadable(total_live_after),
total_gc,
HumanReadable(total_live_after / total_gc)))
out.write('GC %s Throughput (before): %s / %s ms = %s/ms<br/>' %
(name,
HumanReadable(total_live_before),
total_gc,
HumanReadable(total_live_before / total_gc)))
with open(filename + '.html', 'w') as out:
out.write('<html><body>')
out.write('<table>')
out.write('<tr><td>Phase</td><td>Count</td><td>Time (ms)</td>')
out.write('<td>Max</td><td>Avg</td></tr>')
stats(out, 'Total in GC', trace, 'pause')
stats(out, 'Scavenge', scavenges, 'pause')
stats(out, 'MarkSweep', marksweeps, 'pause')
stats(out, 'Mark', filter(lambda r: r['mark'] != 0, trace), 'mark')
stats(out, 'Sweep', filter(lambda r: r['sweep'] != 0, trace), 'sweep')
stats(out,
'External',
filter(lambda r: r['external'] != 0, trace),
'external')
out.write('</table>')
throughput('TOTAL', trace)
throughput('MS', marksweeps)
throughput('OLDSPACE', globalgcs)
out.write('<br/>')
for chart in charts:
out.write('<img src="%s">' % chart)
out.write('</body></html>')
print "%s generated." % (filename + '.html')
if len(sys.argv) != 2:
print "Usage: %s <GC-trace-filename>" % sys.argv[0]
sys.exit(1)
process_trace(sys.argv[1])
| mit |
apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/zingmp3.py | 94 | 3672 | # coding=utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class ZingMp3BaseInfoExtractor(InfoExtractor):
def _extract_item(self, item):
error_message = item.find('./errormessage').text
if error_message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message),
expected=True)
title = item.find('./title').text.strip()
source = item.find('./source').text
extension = item.attrib['type']
thumbnail = item.find('./backimage').text
return {
'title': title,
'url': source,
'ext': extension,
'thumbnail': thumbnail,
}
def _extract_player_xml(self, player_xml_url, id, playlist_title=None):
player_xml = self._download_xml(player_xml_url, id, 'Downloading Player XML')
items = player_xml.findall('./item')
if len(items) == 1:
# one single song
data = self._extract_item(items[0])
data['id'] = id
return data
else:
# playlist of songs
entries = []
for i, item in enumerate(items, 1):
entry = self._extract_item(item)
entry['id'] = '%s-%d' % (id, i)
entries.append(entry)
return {
'_type': 'playlist',
'id': id,
'title': playlist_title,
'entries': entries,
}
class ZingMp3SongIE(ZingMp3BaseInfoExtractor):
_VALID_URL = r'https?://mp3\.zing\.vn/bai-hat/(?P<slug>[^/]+)/(?P<song_id>\w+)\.html'
_TESTS = [{
'url': 'http://mp3.zing.vn/bai-hat/Xa-Mai-Xa-Bao-Thy/ZWZB9WAB.html',
'md5': 'ead7ae13693b3205cbc89536a077daed',
'info_dict': {
'id': 'ZWZB9WAB',
'title': 'Xa Mãi Xa',
'ext': 'mp3',
'thumbnail': 're:^https?://.*\.jpg$',
},
}]
IE_NAME = 'zingmp3:song'
IE_DESC = 'mp3.zing.vn songs'
def _real_extract(self, url):
matched = re.match(self._VALID_URL, url)
slug = matched.group('slug')
song_id = matched.group('song_id')
webpage = self._download_webpage(
'http://mp3.zing.vn/bai-hat/%s/%s.html' % (slug, song_id), song_id)
player_xml_url = self._search_regex(
r'&xmlURL=(?P<xml_url>[^&]+)&', webpage, 'player xml url')
return self._extract_player_xml(player_xml_url, song_id)
class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor):
_VALID_URL = r'https?://mp3\.zing\.vn/album/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html'
_TESTS = [{
'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html',
'info_dict': {
'_type': 'playlist',
'id': 'ZWZBWDAF',
'title': 'Lâu Đài Tình Ái - Bằng Kiều ft. Minh Tuyết | Album 320 lossless',
},
'playlist_count': 10,
}]
IE_NAME = 'zingmp3:album'
IE_DESC = 'mp3.zing.vn albums'
def _real_extract(self, url):
matched = re.match(self._VALID_URL, url)
slug = matched.group('slug')
album_id = matched.group('album_id')
webpage = self._download_webpage(
'http://mp3.zing.vn/album/%s/%s.html' % (slug, album_id), album_id)
player_xml_url = self._search_regex(
r'&xmlURL=(?P<xml_url>[^&]+)&', webpage, 'player xml url')
return self._extract_player_xml(
player_xml_url, album_id,
playlist_title=self._og_search_title(webpage))
| unlicense |
dosiecki/NewsBlur | utils/memcached_status.py | 19 | 1172 | import memcache
import re
import sys
from settings import CACHE_BACKEND
#gfranxman
verbose = False
if not CACHE_BACKEND.startswith( 'memcached://' ):
print "you are not configured to use memcched as your django cache backend"
else:
m = re.search( r'//(.+:\d+)', CACHE_BACKEND )
cache_host = m.group(1)
h = memcache._Host( cache_host )
h.connect()
h.send_cmd( 'stats' )
stats = {}
pat = re.compile( r'STAT (\w+) (\w+)' )
l = '' ;
while l.find( 'END' ) < 0 :
l = h.readline()
if verbose:
print l
m = pat.match( l )
if m :
stats[ m.group(1) ] = m.group(2)
h.close_socket()
if verbose:
print stats
items = int( stats[ 'curr_items' ] )
bytes = int( stats[ 'bytes' ] )
limit_maxbytes = int( stats[ 'limit_maxbytes' ] ) or bytes
current_conns = int( stats[ 'curr_connections' ] )
print "MemCache status for %s" % ( CACHE_BACKEND )
print "%d items using %d of %d" % ( items, bytes, limit_maxbytes )
print "%5.2f%% full" % ( 100.0 * bytes / limit_maxbytes )
print "%d connections being handled" % ( current_conns )
print | mit |
misty-/plugin.video.youtube | resources/lib/youtube/helper/tv.py | 26 | 1416 | __author__ = 'bromix'
from resources.lib import kodion
from resources.lib.youtube.helper import utils
from resources.lib.kodion.items.video_item import VideoItem
def my_subscriptions_to_items(provider, context, json_data):
result = []
video_id_dict = {}
items = json_data.get('items', [])
for item in items:
video_id = item['id']
video_item = VideoItem(item['title'],
uri=context.create_uri(['play'], {'video_id': video_id}))
result.append(video_item)
video_id_dict[video_id] = video_item
pass
channel_item_dict = {}
utils.update_video_infos(provider, context, video_id_dict, channel_items_dict=channel_item_dict)
utils.update_fanarts(provider, context, channel_item_dict)
# next page
next_page_token = json_data.get('next_page_token', '')
if next_page_token or json_data.get('continue', False):
new_params = {}
new_params.update(context.get_params())
new_params['next_page_token'] = next_page_token
new_params['offset'] = int(json_data.get('offset', 0))
new_context = context.clone(new_params=new_params)
current_page = int(new_context.get_param('page', 1))
next_page_item = kodion.items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context))
result.append(next_page_item)
pass
return result
| gpl-2.0 |
fitzgen/servo | tests/wpt/web-platform-tests/html/rendering/replaced-elements/tools/gen-svgsizing-tests.py | 266 | 1985 | from string import Template
import os
import sys
template = Template("""<!DOCTYPE html>
<!-- This file is generated by $generator -->
<html>
<head>
<title>SVG sizing: <$placeholder></title>
<meta name=timeout content=long>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="../resources/svg-sizing.js"></script>
<style>
#testContainer {
position: absolute;
left: 0;
top: 0;
width: 800px;
height: 600px
}
iframe { border: 0 }
</style>
<link rel="help" href="http://www.w3.org/TR/CSS2/visudet.html#inline-replaced-width">
<link rel="help" href="http://www.w3.org/TR/CSS2/visudet.html#inline-replaced-height">
<link rel="help" href="https://html.spec.whatwg.org/multipage/#replaced-elements">
<link rel="help" href="https://html.spec.whatwg.org/multipage/#attr-dim-width">
<link rel="help" href="http://www.w3.org/TR/SVG/coords.html#ViewportSpace">
</head>
<body>
<div id="log"></div>
<div id="testContainer"></div>
<div id="demo"></div>
<script src="svg-embedded-sizing.js"></script>
<script>testPlaceholderWithHeight("$placeholder", $placeholderHeightAttr)</script>
</body>
</html>
""")
placeholders = [ "object", "iframe", "img" ]
placeholderHeightAttrs = [ "null", "'100px'", "'100%'" ]
placeholderHeightAttrsDescriptions = [ "auto", "fixed", "percentage" ]
try:
os.makedirs("../svg-embedded-sizing")
except OSError:
pass
for placeholder in placeholders:
for i, placeholderHeightAttr in enumerate(placeholderHeightAttrs):
testContent = template.substitute(placeholder=placeholder, placeholderHeightAttr=placeholderHeightAttr, generator=sys.argv[0])
filename = "../svg-embedded-sizing/svg-in-%s-%s.html" % (placeholder, placeholderHeightAttrsDescriptions[i])
f = open(filename, "w")
f.write(testContent)
f.close()
| mpl-2.0 |
roadmapper/ansible | test/units/modules/network/routeros/routeros_module.py | 52 | 2521 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestRouterosModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
dukhlov/oslo.messaging | oslo_messaging/notify/logger.py | 4 | 2706 | # Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for the Python logging package that sends log records as a notification.
"""
import logging
from oslo_config import cfg
from oslo_messaging.notify import notifier
class LoggingNotificationHandler(logging.Handler):
"""Handler for logging to the messaging notification system.
Each time the application logs a message using the :py:mod:`logging`
module, it will be sent as a notification. The severity used for the
notification will be the same as the one used for the log record.
This can be used into a Python logging configuration this way::
[handler_notifier]
class=oslo_messaging.LoggingNotificationHandler
level=ERROR
args=('rabbit:///')
"""
CONF = cfg.CONF
"""Default configuration object used, subclass this class if you want to
use another one.
"""
def __init__(self, url, publisher_id=None, driver=None,
topic=None, serializer=None):
self.notifier = notifier.Notifier(
notifier.get_notification_transport(self.CONF, url),
publisher_id, driver,
topic,
serializer() if serializer else None)
logging.Handler.__init__(self)
def emit(self, record):
"""Emit the log record to the messaging notification system.
:param record: A log record to emit.
"""
method = getattr(self.notifier, record.levelname.lower(), None)
if not method:
return
method(
{},
'logrecord',
{
'name': record.name,
'levelno': record.levelno,
'levelname': record.levelname,
'exc_info': record.exc_info,
'pathname': record.pathname,
'lineno': record.lineno,
'msg': record.getMessage(),
'funcName': record.funcName,
'thread': record.thread,
'processName': record.processName,
'process': record.process,
'extra': getattr(record, 'extra', None),
}
)
| apache-2.0 |
Tatsh-ansible/ansible | lib/ansible/module_utils/facts/system/dns.py | 232 | 2678 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
class DnsFactCollector(BaseFactCollector):
name = 'dns'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
dns_facts = {}
# TODO: flatten
dns_facts['dns'] = {}
for line in get_file_content('/etc/resolv.conf', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
if 'nameservers' not in dns_facts['dns']:
dns_facts['dns']['nameservers'] = []
for nameserver in tokens[1:]:
dns_facts['dns']['nameservers'].append(nameserver)
elif tokens[0] == 'domain':
if len(tokens) > 1:
dns_facts['dns']['domain'] = tokens[1]
elif tokens[0] == 'search':
dns_facts['dns']['search'] = []
for suffix in tokens[1:]:
dns_facts['dns']['search'].append(suffix)
elif tokens[0] == 'sortlist':
dns_facts['dns']['sortlist'] = []
for address in tokens[1:]:
dns_facts['dns']['sortlist'].append(address)
elif tokens[0] == 'options':
dns_facts['dns']['options'] = {}
if len(tokens) > 1:
for option in tokens[1:]:
option_tokens = option.split(':', 1)
if len(option_tokens) == 0:
continue
val = len(option_tokens) == 2 and option_tokens[1] or True
dns_facts['dns']['options'][option_tokens[0]] = val
return dns_facts
| gpl-3.0 |
iwhiz/Algorist | fibonacci_dp.py | 1 | 1163 | # This function uses the dynamic programming concept, where it stores previously computed values rather
# computing it every time like in normal recursion.
import time
# This version of fibonacci uses dynamic programming concept.
# First check the normal fibonacci.py for better understanding
def fibo_dp(n: int): # pass 'n' to this function and it should be of type int
memo = {} # create an empty dictionary to hold the already calculated items
if n < len(memo): # if 'n' is less than the length of memo, then it already has that value stored in it
return memo.get(n) # then return that value
else:
if n <= 2:
f = 1 # if 'n' is less than 2 then the value of f is 1
else:
f = fibo_dp(n - 1) + fibo_dp(n - 2) # otherwise call the recursion
memo[n] = f # store it in memo for future use
return f # return the value to the calling function
if __name__ == "__main__":
start_time = time.clock() # starting the timer
print(fibo_dp(15)) # calling the function here to find 700th number in fibonnaci series.
print(time.clock() - start_time) # print the finish time
| mit |
highweb-project/highweb-webcl-html5spec | chrome/common/extensions/docs/server2/redirector_test.py | 44 | 7508 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from compiled_file_system import CompiledFileSystem
from object_store_creator import ObjectStoreCreator
from redirector import Redirector
from test_file_system import TestFileSystem
from third_party.json_schema_compiler.json_parse import Parse
HOST = 'localhost/'
file_system = TestFileSystem({
'redirects.json': json.dumps({
'foo/...': 'apps/...',
'': '/index.html',
'home': 'index.html',
'index.html': 'http://something.absolute.com/'
}),
'apps': {
'redirects.json': json.dumps({
'': '../index.html',
'index.html': 'about_apps.html',
'foo.html': '/bar.html',
})
},
'extensions': {
'redirects.json': json.dumps({
'manifest': 'manifest.html',
'tabs': 'tabs.html',
'dev/...': '...',
'a/very/long/dir/chain/...': 'short/...',
'_short/...': 'another/long/chain/...',
'r1/...': 'r2/r1/...',
'r2/r1/...': 'r3/...',
'r3/...': 'r4/...',
'r5/...': 'r6/...',
'nofile1/...': 'nofile2/...',
'noredirects1/...': 'noredirects2/...'
}),
'manifest': {
'redirects.json': json.dumps({
'': '../manifest.html',
'more-info': 'http://lmgtfy.com'
}),
},
'stable': {
'redirects.json': json.dumps({
'tabs': 'tabs.html'
}),
'manifest': {
'redirects.json': json.dumps({
'storage': 'storage.html'
})
},
},
'dev': {
'redirects.json': json.dumps({
'tabs': 'tabs.html',
'manifest': 'manifest.html'
}),
'manifest': {
'redirects.json': json.dumps({
'storage': 'storage.html'
})
}
},
'r4': {
'redirects.json': json.dumps({
'manifest': 'manifest.html'
})
},
'r6': {
'redirects.json': json.dumps({
'...': 'directory/...'
}),
'directory': {
'redirects.json': json.dumps({
'manifest': 'manifest.html'
}),
'manifest': 'manifest.html'
}
},
'short': {
'redirects.json': json.dumps({
'index': 'index.html'
})
},
'another': {
'long': {
'chain': {
'redirects.json': json.dumps({
'index': 'index.html'
})
}
}
},
'nofile': {
'redirects.json': json.dumps({
})
}
},
'priority': {
'redirects.json': json.dumps({
'directory/...': 'GOOD/...'
}),
'directory': {
'redirects.json': json.dumps({
'...': '../BAD/...'
}),
}
},
'relative_directory': {
'redirects.json': json.dumps({
'...': '../...'
})
},
'infinite_redirect': {
'redirects.json': json.dumps({
'...': 'loop/...'
}),
'loop': {
'redirects.json': json.dumps({
'...': './...'
})
}
},
'parent_redirect': {
'redirects.json': json.dumps({
'a/...': 'b/...'
})
}
})
class RedirectorTest(unittest.TestCase):
def setUp(self):
self._redirector = Redirector(
CompiledFileSystem.Factory(ObjectStoreCreator.ForTest()),
file_system)
def testExternalRedirection(self):
self.assertEqual(
'http://something.absolute.com/',
self._redirector.Redirect(HOST, 'index.html'))
self.assertEqual(
'http://lmgtfy.com',
self._redirector.Redirect(HOST, 'extensions/manifest/more-info'))
def testAbsoluteRedirection(self):
self.assertEqual(
'/index.html', self._redirector.Redirect(HOST, ''))
self.assertEqual(
'/bar.html', self._redirector.Redirect(HOST, 'apps/foo.html'))
def testRelativeRedirection(self):
self.assertEqual(
'apps/about_apps.html',
self._redirector.Redirect(HOST, 'apps/index.html'))
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/manifest/'))
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/manifest'))
self.assertEqual(
'index.html', self._redirector.Redirect(HOST, 'apps/'))
self.assertEqual(
'index.html', self._redirector.Redirect(HOST, 'home'))
def testNotFound(self):
self.assertEqual(
None, self._redirector.Redirect(HOST, 'not/a/real/path'))
self.assertEqual(
None, self._redirector.Redirect(HOST, 'public/apps/okay.html'))
def testOldHosts(self):
self.assertEqual(
'https://developer.chrome.com/',
self._redirector.Redirect('code.google.com', ''))
def testRefresh(self):
self._redirector.Refresh().Get()
expected_paths = set([
'redirects.json',
'apps/redirects.json',
'extensions/redirects.json',
'extensions/manifest/redirects.json'
])
for path in expected_paths:
self.assertEqual(
Parse(file_system.ReadSingle(path).Get()),
# Access the cache's object store to see what files were hit during
# the cron run. Returns strings parsed as JSON.
# TODO(jshumway): Make a non hack version of this check.
self._redirector._cache._file_object_store.Get(
path).Get().cache_data)
def testDirectoryRedirection(self):
# Simple redirect.
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/dev/manifest'))
# Multiple hops with one file.
self.assertEqual(
'extensions/r4/manifest.html',
self._redirector.Redirect(HOST, 'extensions/r1/manifest'))
# Multiple hops w/ multiple redirection files.
self.assertEqual(
'extensions/r6/directory/manifest.html',
self._redirector.Redirect(HOST, 'extensions/r5/manifest'))
# Redirection from root directory redirector.
self.assertEqual(
'apps/about_apps.html',
self._redirector.Redirect(HOST, 'foo/index.html'))
# Short to long.
self.assertEqual(
'extensions/short/index.html',
self._redirector.Redirect(HOST, 'extensions/a/very/long/dir/chain/index'))
# Long to short.
self.assertEqual(
'extensions/another/long/chain/index.html',
self._redirector.Redirect(HOST, 'extensions/_short/index'))
# Directory redirection without a redirects.json in final directory.
self.assertEqual(
'extensions/noredirects2/file',
self._redirector.Redirect(HOST, 'extensions/noredirects1/file'))
# Directory redirection with redirects.json without rule for the filename.
self.assertEqual(
'extensions/nofile2/file',
self._redirector.Redirect(HOST, 'extensions/nofile1/file'))
# Relative directory path.
self.assertEqual(
'index.html',
self._redirector.Redirect(HOST, 'relative_directory/home'))
# Shallower directory redirects have priority.
self.assertEqual(
'priority/GOOD/index',
self._redirector.Redirect(HOST, 'priority/directory/index'))
# Don't infinitely redirect.
self.assertEqual('infinite_redirect/loop/index',
self._redirector.Redirect(HOST, 'infinite_redirect/index'))
# If a parent directory is redirected, redirect children properly.
self.assertEqual('parent_redirect/b/c/index',
self._redirector.Redirect(HOST, 'parent_redirect/a/c/index'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
harisibrahimkv/django | tests/serializers/test_yaml.py | 74 | 5567 | import importlib
import unittest
from io import StringIO
from django.core import management, serializers
from django.core.serializers.base import DeserializationError
from django.test import SimpleTestCase, TestCase, TransactionTestCase
from .models import Author
from .tests import SerializersTestBase, SerializersTransactionTestBase
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
YAML_IMPORT_ERROR_MESSAGE = r'No module named yaml'
class YamlImportModuleMock:
"""Provides a wrapped import_module function to simulate yaml ImportError
In order to run tests that verify the behavior of the YAML serializer
when run on a system that has yaml installed (like the django CI server),
mock import_module, so that it raises an ImportError when the yaml
serializer is being imported. The importlib.import_module() call is
being made in the serializers.register_serializer().
Refs: #12756
"""
def __init__(self):
self._import_module = importlib.import_module
def import_module(self, module_path):
if module_path == serializers.BUILTIN_SERIALIZERS['yaml']:
raise ImportError(YAML_IMPORT_ERROR_MESSAGE)
return self._import_module(module_path)
class NoYamlSerializerTestCase(SimpleTestCase):
"""Not having pyyaml installed provides a misleading error
Refs: #12756
"""
@classmethod
def setUpClass(cls):
"""Removes imported yaml and stubs importlib.import_module"""
super().setUpClass()
cls._import_module_mock = YamlImportModuleMock()
importlib.import_module = cls._import_module_mock.import_module
# clear out cached serializers to emulate yaml missing
serializers._serializers = {}
@classmethod
def tearDownClass(cls):
"""Puts yaml back if necessary"""
super().tearDownClass()
importlib.import_module = cls._import_module_mock._import_module
# clear out cached serializers to clean out BadSerializer instances
serializers._serializers = {}
def test_serializer_pyyaml_error_message(self):
"""Using yaml serializer without pyyaml raises ImportError"""
jane = Author(name="Jane")
with self.assertRaises(ImportError):
serializers.serialize("yaml", [jane])
def test_deserializer_pyyaml_error_message(self):
"""Using yaml deserializer without pyyaml raises ImportError"""
with self.assertRaises(ImportError):
serializers.deserialize("yaml", "")
def test_dumpdata_pyyaml_error_message(self):
"""Calling dumpdata produces an error when yaml package missing"""
with self.assertRaisesMessage(management.CommandError, YAML_IMPORT_ERROR_MESSAGE):
management.call_command('dumpdata', format='yaml')
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category
- fields:
name: Non-fiction
model: serializers.category"""
mapping_ordering_str = """- model: serializers.article
pk: %(article_pk)s
fields:
author: %(author_pk)s
headline: Poker has no place on ESPN
pub_date: 2006-06-16 11:00:00
categories: [%(first_category_pk)s, %(second_category_pk)s]
meta_data: []
"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, str):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
def test_yaml_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("yaml", "{"):
pass
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
| bsd-3-clause |
openweave/openweave-core | src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_one_way_subscribe_05.py | 1 | 3250 | #!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Calls Weave WDM one way subscribe between nodes.
# C03: One way Subscribe: Root path. Null Version. Mutate data in Publisher. Client cancels
# L05: Stress One way Subscribe: Root path. Null Version. Mutate data in Publisher. Client cancels
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
import set_test_path
from weave_wdm_next_test_base import weave_wdm_next_test_base
import WeaveUtilities
class test_weave_wdm_next_one_way_subscribe_05(weave_wdm_next_test_base):
def test_weave_wdm_next_one_way_subscribe_05(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "one_way_subscribe"
wdm_next_args['total_client_count'] = 2
wdm_next_args['final_client_status'] = 0
wdm_next_args['timer_client_period'] = 5000
wdm_next_args['test_client_iterations'] = 1
wdm_next_args['test_client_delay'] = 2000
wdm_next_args['enable_client_flip'] = 0
wdm_next_args['total_server_count'] = 2
wdm_next_args['final_server_status'] = 4
wdm_next_args['timer_server_period'] = 4000
wdm_next_args['enable_server_flip'] = 1
wdm_next_args['client_clear_state_between_iterations'] = True
wdm_next_args['server_clear_state_between_iterations'] = True
wdm_next_args['client_log_check'] = [('Client\[0\] \[(ALIVE|CONFM)\] EndSubscription Ref\(\d+\)', wdm_next_args['test_client_iterations'] * 1),
('Client->kEvent_OnNotificationProcessed', wdm_next_args['test_client_iterations'] * (wdm_next_args['total_server_count'] + 1)),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'] * 1)]
wdm_next_args['server_log_check'] = [('Handler\[0\] \[(ALIVE|CONFM)\] CancelRequestHandler Ref\(\d+\)', wdm_next_args['test_client_iterations'] * 1),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'] * 1)]
wdm_next_args['test_tag'] = self.__class__.__name__[19:].upper()
wdm_next_args['test_case_name'] = ['L05: Stress One way Subscribe: Root path, Null Version. Mutate data in Publisher. Client cancels']
print('test file: ' + self.__class__.__name__)
print("weave-wdm-next test C03 and L05")
super(test_weave_wdm_next_one_way_subscribe_05, self).weave_wdm_next_test_base(wdm_next_args)
if __name__ == "__main__":
WeaveUtilities.run_unittest()
| apache-2.0 |
tedye/leetcode | Python/leetcode.037.sudoku-solver.py | 1 | 2045 | class Solution(object):
def solveSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
hset = [{'1','2','3','4','5','6','7','8','9'} for _ in range(9)]
vset = [{'1','2','3','4','5','6','7','8','9'} for _ in range(9)]
boxset = [{'1','2','3','4','5','6','7','8','9'}for _ in range(9)]
temp = self.solver(board,hset,vset,boxset)
board[:] = temp[:]
def solver(self, board, h, v, b):
q = []
for i in range(9):
for j in range(9):
if board[i][j] == '.':
q.append([(i,j), h[i], v[j], b[(i//3) * 3 + j // 3]])
else:
num = board[i][j]
h[i] -= {num}
v[j] -= {num}
b[(i//3) * 3 + j // 3] -= {num}
while q:
q.sort(key = lambda x: len(x[1] & x[2] & x[3]))
cur = q.pop(0)
avail = cur[1]&cur[2]&cur[3]
i = cur[0][0]
j = cur[0][1]
if len(avail) == 0:
return []
elif len(avail) == 1:
num = avail.pop()
h[i] -= {num}
v[j] -= {num}
b[(i//3) * 3 + j // 3] -= {num}
board[i][j] = num
else:
l = len(avail)
for k in range(l):
num = avail.pop()
h[i] -= {num}
v[j] -= {num}
b[(i//3) * 3 + j // 3] -= {num}
board[i][j] = num
temp = self.solver([x[:] for x in board], [set(a) for a in h], [set(a) for a in v], [set(a) for a in b])
if temp:
return temp
board[i][j] = '.'
h[i].add(num)
v[j].add(num)
b[(i//3) * 3 + j // 3].add(num)
return []
return board | mit |
bymerej/mwphb | maintenance/cssjanus/csslex.py | 172 | 3719 | #!/usr/bin/python
#
# Copyright 2007 Google Inc. All Rights Reserved.
"""CSS Lexical Grammar rules.
CSS lexical grammar from http://www.w3.org/TR/CSS21/grammar.html
"""
__author__ = ['elsigh@google.com (Lindsey Simon)',
'msamuel@google.com (Mike Samuel)']
# public symbols
__all__ = [ "NEWLINE", "HEX", "NON_ASCII", "UNICODE", "ESCAPE", "NMSTART", "NMCHAR", "STRING1", "STRING2", "IDENT", "NAME", "HASH", "NUM", "STRING", "URL", "SPACE", "WHITESPACE", "COMMENT", "QUANTITY", "PUNC" ]
# The comments below are mostly copied verbatim from the grammar.
# "@import" {return IMPORT_SYM;}
# "@page" {return PAGE_SYM;}
# "@media" {return MEDIA_SYM;}
# "@charset" {return CHARSET_SYM;}
KEYWORD = r'(?:\@(?:import|page|media|charset))'
# nl \n|\r\n|\r|\f ; a newline
NEWLINE = r'\n|\r\n|\r|\f'
# h [0-9a-f] ; a hexadecimal digit
HEX = r'[0-9a-f]'
# nonascii [\200-\377]
NON_ASCII = r'[\200-\377]'
# unicode \\{h}{1,6}(\r\n|[ \t\r\n\f])?
UNICODE = r'(?:(?:\\' + HEX + r'{1,6})(?:\r\n|[ \t\r\n\f])?)'
# escape {unicode}|\\[^\r\n\f0-9a-f]
ESCAPE = r'(?:' + UNICODE + r'|\\[^\r\n\f0-9a-f])'
# nmstart [_a-z]|{nonascii}|{escape}
NMSTART = r'(?:[_a-z]|' + NON_ASCII + r'|' + ESCAPE + r')'
# nmchar [_a-z0-9-]|{nonascii}|{escape}
NMCHAR = r'(?:[_a-z0-9-]|' + NON_ASCII + r'|' + ESCAPE + r')'
# ident -?{nmstart}{nmchar}*
IDENT = r'-?' + NMSTART + NMCHAR + '*'
# name {nmchar}+
NAME = NMCHAR + r'+'
# hash
HASH = r'#' + NAME
# string1 \"([^\n\r\f\\"]|\\{nl}|{escape})*\" ; "string"
STRING1 = r'"(?:[^\"\\]|\\.)*"'
# string2 \'([^\n\r\f\\']|\\{nl}|{escape})*\' ; 'string'
STRING2 = r"'(?:[^\'\\]|\\.)*'"
# string {string1}|{string2}
STRING = '(?:' + STRING1 + r'|' + STRING2 + ')'
# num [0-9]+|[0-9]*"."[0-9]+
NUM = r'(?:[0-9]*\.[0-9]+|[0-9]+)'
# s [ \t\r\n\f]
SPACE = r'[ \t\r\n\f]'
# w {s}*
WHITESPACE = '(?:' + SPACE + r'*)'
# url special chars
URL_SPECIAL_CHARS = r'[!#$%&*-~]'
# url chars ({url_special_chars}|{nonascii}|{escape})*
URL_CHARS = r'(?:%s|%s|%s)*' % (URL_SPECIAL_CHARS, NON_ASCII, ESCAPE)
# url
URL = r'url\(%s(%s|%s)%s\)' % (WHITESPACE, STRING, URL_CHARS, WHITESPACE)
# comments
# see http://www.w3.org/TR/CSS21/grammar.html
COMMENT = r'/\*[^*]*\*+([^/*][^*]*\*+)*/'
# {E}{M} {return EMS;}
# {E}{X} {return EXS;}
# {P}{X} {return LENGTH;}
# {C}{M} {return LENGTH;}
# {M}{M} {return LENGTH;}
# {I}{N} {return LENGTH;}
# {P}{T} {return LENGTH;}
# {P}{C} {return LENGTH;}
# {D}{E}{G} {return ANGLE;}
# {R}{A}{D} {return ANGLE;}
# {G}{R}{A}{D} {return ANGLE;}
# {M}{S} {return TIME;}
# {S} {return TIME;}
# {H}{Z} {return FREQ;}
# {K}{H}{Z} {return FREQ;}
# % {return PERCENTAGE;}
UNIT = r'(?:em|ex|px|cm|mm|in|pt|pc|deg|rad|grad|ms|s|hz|khz|%)'
# {num}{UNIT|IDENT} {return NUMBER;}
QUANTITY = '%s(?:%s%s|%s)?' % (NUM, WHITESPACE, UNIT, IDENT)
# "<!--" {return CDO;}
# "-->" {return CDC;}
# "~=" {return INCLUDES;}
# "|=" {return DASHMATCH;}
# {w}"{" {return LBRACE;}
# {w}"+" {return PLUS;}
# {w}">" {return GREATER;}
# {w}"," {return COMMA;}
PUNC = r'<!--|-->|~=|\|=|[\{\+>,:;]'
| gpl-2.0 |
prark/bitcoinxt | qa/rpc-tests/receivedby.py | 140 | 7345 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(BitcoinTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
| mit |
scripteed/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/variables/filelist/gyptest-filelist-golden.py | 228 | 1584 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<|(list.txt ...)' syntax commands.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('filelist.gyp.stdout')
if sys.platform == 'win32':
expect = expect.replace('/', r'\\').replace('\r\n', '\n')
test.run_gyp('src/filelist.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the filelist.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('src/filelist.gypd').replace(
'\r', '').replace('\\\\', '/')
expect = test.read('filelist.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `src/filelist.gypd'"
test.diff(expect, contents, 'src/filelist.gypd ')
test.fail_test()
contents = test.read('src/names.txt')
expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
if not test.match(contents, expect):
print "Unexpected contents of `src/names.txt'"
test.diff(expect, contents, 'src/names.txt ')
test.fail_test()
test.pass_test()
| gpl-3.0 |
W-M-D/ergotelescope.org | node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
TheTypoMaster/calligra | 3rdparty/google-breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/message_test.py | 253 | 15707 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import test_util
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(unittest.TestCase):
def testGoldenMessage(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertTrue(golden_message.SerializeToString() == golden_data)
golden_copy = copy.deepcopy(golden_message)
self.assertTrue(golden_copy.SerializeToString() == golden_data)
def testGoldenExtensions(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertTrue(golden_message.SerializeToString() == golden_data)
golden_copy = copy.deepcopy(golden_message)
self.assertTrue(golden_copy.SerializeToString() == golden_data)
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertTrue(all_set.SerializeToString() == golden_data)
golden_copy = copy.deepcopy(golden_message)
self.assertTrue(golden_copy.SerializeToString() == golden_data)
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertTrue(all_set.SerializeToString() == golden_data)
golden_copy = copy.deepcopy(golden_message)
self.assertTrue(golden_copy.SerializeToString() == golden_data)
def testPositiveInfinity(self):
golden_data = ('\x5D\x00\x00\x80\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
'\xCD\x02\x00\x00\x80\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testNegativeInfinity(self):
golden_data = ('\x5D\x00\x00\x80\xFF'
'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
'\xCD\x02\x00\x00\x80\xFF'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testNotANumber(self):
golden_data = ('\x5D\x00\x00\xC0\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
'\xCD\x02\x00\x00\xC0\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testPositiveInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testNegativeInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testNotANumberPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append('a')
message.repeated_bytes.append('c')
message.repeated_bytes.append('b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], 'a')
self.assertEqual(message.repeated_bytes[1], 'b')
self.assertEqual(message.repeated_bytes[2], 'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y)))
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(lambda x,y: cmp(len(x), len(y)))
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
endlessm/chromium-browser | third_party/catapult/third_party/Paste/tests/test_fileapp.py | 47 | 9766 | # (c) 2005 Ian Bicking, Clark C. Evans and contributors
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import time
import random
import os
import tempfile
try:
# Python 3
from email.utils import parsedate_tz, mktime_tz
except ImportError:
# Python 2
from rfc822 import parsedate_tz, mktime_tz
import six
from paste import fileapp
from paste.fileapp import *
from paste.fixture import *
# NOTE(haypo): don't use string.letters because the order of lower and upper
# case letters changes when locale.setlocale() is called for the first time
LETTERS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def test_data():
harness = TestApp(DataApp(b'mycontent'))
res = harness.get("/")
assert 'application/octet-stream' == res.header('content-type')
assert '9' == res.header('content-length')
assert "<Response 200 OK 'mycontent'>" == repr(res)
harness.app.set_content(b"bingles")
assert "<Response 200 OK 'bingles'>" == repr(harness.get("/"))
def test_cache():
def build(*args,**kwargs):
app = DataApp(b"SomeContent")
app.cache_control(*args,**kwargs)
return TestApp(app).get("/")
res = build()
assert 'public' == res.header('cache-control')
assert not res.header('expires',None)
res = build(private=True)
assert 'private' == res.header('cache-control')
assert mktime_tz(parsedate_tz(res.header('expires'))) < time.time()
res = build(no_cache=True)
assert 'no-cache' == res.header('cache-control')
assert mktime_tz(parsedate_tz(res.header('expires'))) < time.time()
res = build(max_age=60,s_maxage=30)
assert 'public, max-age=60, s-maxage=30' == res.header('cache-control')
expires = mktime_tz(parsedate_tz(res.header('expires')))
assert expires > time.time()+58 and expires < time.time()+61
res = build(private=True, max_age=60, no_transform=True, no_store=True)
assert 'private, no-store, no-transform, max-age=60' == \
res.header('cache-control')
expires = mktime_tz(parsedate_tz(res.header('expires')))
assert mktime_tz(parsedate_tz(res.header('expires'))) < time.time()
def test_disposition():
def build(*args,**kwargs):
app = DataApp(b"SomeContent")
app.content_disposition(*args,**kwargs)
return TestApp(app).get("/")
res = build()
assert 'attachment' == res.header('content-disposition')
assert 'application/octet-stream' == res.header('content-type')
res = build(filename="bing.txt")
assert 'attachment; filename="bing.txt"' == \
res.header('content-disposition')
assert 'text/plain' == res.header('content-type')
res = build(inline=True)
assert 'inline' == res.header('content-disposition')
assert 'application/octet-stream' == res.header('content-type')
res = build(inline=True, filename="/some/path/bing.txt")
assert 'inline; filename="bing.txt"' == \
res.header('content-disposition')
assert 'text/plain' == res.header('content-type')
try:
res = build(inline=True,attachment=True)
except AssertionError:
pass
else:
assert False, "should be an exception"
def test_modified():
harness = TestApp(DataApp(b'mycontent'))
res = harness.get("/")
assert "<Response 200 OK 'mycontent'>" == repr(res)
last_modified = res.header('last-modified')
res = harness.get("/",headers={'if-modified-since': last_modified})
assert "<Response 304 Not Modified ''>" == repr(res)
res = harness.get("/",headers={'if-modified-since': last_modified + \
'; length=1506'})
assert "<Response 304 Not Modified ''>" == repr(res)
res = harness.get("/",status=400,
headers={'if-modified-since': 'garbage'})
assert 400 == res.status and b"ill-formed timestamp" in res.body
res = harness.get("/",status=400,
headers={'if-modified-since':
'Thu, 22 Dec 2030 01:01:01 GMT'})
assert 400 == res.status and b"check your system clock" in res.body
def test_file():
tempfile = "test_fileapp.%s.txt" % (random.random())
content = LETTERS * 20
if six.PY3:
content = content.encode('utf8')
with open(tempfile, "wb") as fp:
fp.write(content)
try:
app = fileapp.FileApp(tempfile)
res = TestApp(app).get("/")
assert len(content) == int(res.header('content-length'))
assert 'text/plain' == res.header('content-type')
assert content == res.body
assert content == app.content # this is cashed
lastmod = res.header('last-modified')
print("updating", tempfile)
file = open(tempfile,"a+")
file.write("0123456789")
file.close()
res = TestApp(app).get("/",headers={'Cache-Control': 'max-age=0'})
assert len(content)+10 == int(res.header('content-length'))
assert 'text/plain' == res.header('content-type')
assert content + b"0123456789" == res.body
assert app.content # we are still cached
file = open(tempfile,"a+")
file.write("X" * fileapp.CACHE_SIZE) # exceed the cashe size
file.write("YZ")
file.close()
res = TestApp(app).get("/",headers={'Cache-Control': 'max-age=0'})
newsize = fileapp.CACHE_SIZE + len(content)+12
assert newsize == int(res.header('content-length'))
assert newsize == len(res.body)
assert res.body.startswith(content) and res.body.endswith(b'XYZ')
assert not app.content # we are no longer cached
finally:
os.unlink(tempfile)
def test_dir():
tmpdir = tempfile.mkdtemp()
try:
tmpfile = os.path.join(tmpdir, 'file')
tmpsubdir = os.path.join(tmpdir, 'dir')
fp = open(tmpfile, 'w')
fp.write('abcd')
fp.close()
os.mkdir(tmpsubdir)
try:
app = fileapp.DirectoryApp(tmpdir)
for path in ['/', '', '//', '/..', '/.', '/../..']:
assert TestApp(app).get(path, status=403).status == 403, ValueError(path)
for path in ['/~', '/foo', '/dir', '/dir/']:
assert TestApp(app).get(path, status=404).status == 404, ValueError(path)
assert TestApp(app).get('/file').body == b'abcd'
finally:
os.remove(tmpfile)
os.rmdir(tmpsubdir)
finally:
os.rmdir(tmpdir)
def _excercize_range(build,content):
# full content request, but using ranges'
res = build("bytes=0-%d" % (len(content)-1))
assert res.header('accept-ranges') == 'bytes'
assert res.body == content
assert res.header('content-length') == str(len(content))
res = build("bytes=-%d" % (len(content)-1))
assert res.body == content
assert res.header('content-length') == str(len(content))
res = build("bytes=0-")
assert res.body == content
assert res.header('content-length') == str(len(content))
# partial content requests
res = build("bytes=0-9", status=206)
assert res.body == content[:10]
assert res.header('content-length') == '10'
res = build("bytes=%d-" % (len(content)-1), status=206)
assert res.body == b'Z'
assert res.header('content-length') == '1'
res = build("bytes=%d-%d" % (3,17), status=206)
assert res.body == content[3:18]
assert res.header('content-length') == '15'
def test_range():
content = LETTERS * 5
if six.PY3:
content = content.encode('utf8')
def build(range, status=206):
app = DataApp(content)
return TestApp(app).get("/",headers={'Range': range}, status=status)
_excercize_range(build,content)
build('bytes=0-%d' % (len(content)+1), 416)
def test_file_range():
tempfile = "test_fileapp.%s.txt" % (random.random())
content = LETTERS * (1+(fileapp.CACHE_SIZE // len(LETTERS)))
if six.PY3:
content = content.encode('utf8')
assert len(content) > fileapp.CACHE_SIZE
with open(tempfile, "wb") as fp:
fp.write(content)
try:
def build(range, status=206):
app = fileapp.FileApp(tempfile)
return TestApp(app).get("/",headers={'Range': range},
status=status)
_excercize_range(build,content)
for size in (13,len(LETTERS), len(LETTERS)-1):
fileapp.BLOCK_SIZE = size
_excercize_range(build,content)
finally:
os.unlink(tempfile)
def test_file_cache():
filename = os.path.join(os.path.dirname(__file__),
'urlparser_data', 'secured.txt')
app = TestApp(fileapp.FileApp(filename))
res = app.get('/')
etag = res.header('ETag')
last_mod = res.header('Last-Modified')
res = app.get('/', headers={'If-Modified-Since': last_mod},
status=304)
res = app.get('/', headers={'If-None-Match': etag},
status=304)
res = app.get('/', headers={'If-None-Match': 'asdf'},
status=200)
res = app.get('/', headers={'If-Modified-Since': 'Sat, 1 Jan 2005 12:00:00 GMT'},
status=200)
res = app.get('/', headers={'If-Modified-Since': last_mod + '; length=100'},
status=304)
res = app.get('/', headers={'If-Modified-Since': 'invalid date'},
status=400)
def test_methods():
filename = os.path.join(os.path.dirname(__file__),
'urlparser_data', 'secured.txt')
app = TestApp(fileapp.FileApp(filename))
get_res = app.get('')
res = app.get('', extra_environ={'REQUEST_METHOD': 'HEAD'})
assert res.headers == get_res.headers
assert not res.body
app.post('', status=405) # Method Not Allowed
| bsd-3-clause |
thnkloud9/Airtime | python_apps/media-monitor/mm2/media/metadata/definitions.py | 10 | 4773 | # -*- coding: utf-8 -*-
import process as md
import re
from os.path import normpath
from ..monitor.pure import format_length, file_md5, is_airtime_recorded, \
no_extension_basename
defs_loaded = False
MAX_SIGNED_INT = 2**31-1
def is_defs_loaded():
global defs_loaded
return defs_loaded
def load_definitions():
with md.metadata('MDATA_KEY_DURATION') as t:
t.default(u'0.0')
t.depends('length')
t.translate(lambda k: format_length(k['length']))
with md.metadata('MDATA_KEY_CUE_IN') as t:
t.default(u'0.0')
t.depends('cuein')
t.translate(lambda k: format_length(k['cuein']))
with md.metadata('MDATA_KEY_CUE_OUT') as t:
t.default(u'0.0')
t.depends('cueout')
t.translate(lambda k: format_length(k['cueout']))
with md.metadata('MDATA_KEY_MIME') as t:
t.default(u'')
t.depends('mime')
# Is this necessary?
t.translate(lambda k: k['mime'].replace('audio/vorbis','audio/ogg'))
with md.metadata('MDATA_KEY_BITRATE') as t:
t.default(u'')
t.depends('bitrate')
t.translate(lambda k: k['bitrate'])
t.max_value(MAX_SIGNED_INT)
with md.metadata('MDATA_KEY_SAMPLERATE') as t:
t.default(u'0')
t.depends('sample_rate')
t.translate(lambda k: k['sample_rate'])
t.max_value(MAX_SIGNED_INT)
with md.metadata('MDATA_KEY_FTYPE') as t:
t.depends('ftype') # i don't think this field even exists
t.default(u'audioclip')
t.translate(lambda k: k['ftype']) # but just in case
with md.metadata("MDATA_KEY_CREATOR") as t:
t.depends("artist")
# A little kludge to make sure that we have some value for when we parse
# MDATA_KEY_TITLE
t.default(u"")
t.max_length(512)
with md.metadata("MDATA_KEY_SOURCE") as t:
t.depends("album")
t.max_length(512)
with md.metadata("MDATA_KEY_GENRE") as t:
t.depends("genre")
t.max_length(64)
with md.metadata("MDATA_KEY_MOOD") as t:
t.depends("mood")
t.max_length(64)
with md.metadata("MDATA_KEY_TRACKNUMBER") as t:
t.depends("tracknumber")
t.max_value(MAX_SIGNED_INT)
with md.metadata("MDATA_KEY_BPM") as t:
t.depends("bpm")
t.max_value(MAX_SIGNED_INT)
with md.metadata("MDATA_KEY_LABEL") as t:
t.depends("organization")
t.max_length(512)
with md.metadata("MDATA_KEY_COMPOSER") as t:
t.depends("composer")
t.max_length(512)
with md.metadata("MDATA_KEY_ENCODER") as t:
t.depends("encodedby")
t.max_length(512)
with md.metadata("MDATA_KEY_CONDUCTOR") as t:
t.depends("conductor")
t.max_length(512)
with md.metadata("MDATA_KEY_YEAR") as t:
t.depends("date")
t.max_length(16)
with md.metadata("MDATA_KEY_URL") as t:
t.depends("website")
with md.metadata("MDATA_KEY_ISRC") as t:
t.depends("isrc")
t.max_length(512)
with md.metadata("MDATA_KEY_COPYRIGHT") as t:
t.depends("copyright")
t.max_length(512)
with md.metadata("MDATA_KEY_ORIGINAL_PATH") as t:
t.depends('path')
t.translate(lambda k: unicode(normpath(k['path'])))
with md.metadata("MDATA_KEY_MD5") as t:
t.depends('path')
t.optional(False)
t.translate(lambda k: file_md5(k['path'], max_length=100))
# owner is handled differently by (by events.py)
# MDATA_KEY_TITLE is the annoying special case b/c we sometimes read it
# from file name
# must handle 3 cases:
# 1. regular case (not recorded + title is present)
# 2. title is absent (read from file)
# 3. recorded file
def tr_title(k):
#unicode_unknown = u"unknown"
new_title = u""
if is_airtime_recorded(k) or k['title'] != u"":
new_title = k['title']
else:
default_title = no_extension_basename(k['path'])
default_title = re.sub(r'__\d+\.',u'.', default_title)
# format is: track_number-title-123kbps.mp3
m = re.match(".+?-(?P<title>.+)-(\d+kbps|unknown)$", default_title)
if m: new_title = m.group('title')
else: new_title = re.sub(r'-\d+kbps$', u'', default_title)
return new_title
with md.metadata('MDATA_KEY_TITLE') as t:
# Need to know MDATA_KEY_CREATOR to know if show was recorded. Value is
# defaulted to "" from definitions above
t.depends('title','MDATA_KEY_CREATOR','path')
t.optional(False)
t.translate(tr_title)
t.max_length(512)
with md.metadata('MDATA_KEY_LABEL') as t:
t.depends('label')
t.max_length(512)
| agpl-3.0 |
CopeX/odoo | addons/sale/sale.py | 4 | 72408 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
from openerp import workflow
class res_company(osv.Model):
_inherit = "res.company"
_columns = {
'sale_note': fields.text('Default Terms and Conditions', translate=True, help="Default terms and conditions for quotations."),
}
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_track = {
'state': {
'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual', 'progress'],
'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent']
},
}
def _amount_line_tax(self, cr, uid, line, context=None):
val = 0.0
for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']:
val += c.get('amount', 0.0)
return val
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
if sale.invoiced:
res[sale.id] = 100.0
continue
tot = 0.0
for invoice in sale.invoice_ids:
if invoice.state not in ('draft', 'cancel'):
tot += invoice.amount_untaxed
if tot:
res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00))
else:
res[sale.id] = 0.0
return res
def _invoice_exists(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = False
if sale.invoice_ids:
res[sale.id] = True
return res
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = True
invoice_existence = False
for invoice in sale.invoice_ids:
if invoice.state!='cancel':
invoice_existence = True
if invoice.state != 'paid':
res[sale.id] = False
break
if not invoice_existence or sale.state == 'manual':
res[sale.id] = False
return res
def _invoiced_search(self, cursor, user, obj, name, args, context=None):
if not len(args):
return []
clause = ''
sale_clause = ''
no_invoiced = False
for arg in args:
if (arg[1] == '=' and arg[2]) or (arg[1] == '!=' and not arg[2]):
clause += 'AND inv.state = \'paid\''
else:
clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id '
sale_clause = ', sale_order AS sale '
no_invoiced = True
cursor.execute('SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \
'WHERE rel.invoice_id = inv.id ' + clause)
res = cursor.fetchall()
if no_invoiced:
cursor.execute('SELECT sale.id ' \
'FROM sale_order AS sale ' \
'WHERE sale.id NOT IN ' \
'(SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'')
res.extend(cursor.fetchall())
if not res:
return [('id', '=', 0)]
return [('id', 'in', [x[0] for x in res])]
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'name': fields.char('Order Reference', required=True, copy=False,
readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True),
'origin': fields.char('Source Document', help="Reference of the document that generated this sales order request."),
'client_order_ref': fields.char('Reference/Description', copy=False),
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('waiting_date', 'Waiting Schedule'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, copy=False, help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
'date_order': fields.datetime('Date', required=True, readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=False),
'create_date': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which sales order is created."),
'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which sales order is confirmed.", copy=False),
'user_id': fields.many2one('res.users', 'Salesperson', states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, select=True, track_visibility='always'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order."),
'partner_shipping_id': fields.many2one('res.partner', 'Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order."),
'order_policy': fields.selection([
('manual', 'On Demand'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""This field controls how invoice and delivery operations are synchronized."""),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency", readonly=True, required=True),
'project_id': fields.many2one('account.analytic.account', 'Contract / Analytic', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order."),
'order_line': fields.one2many('sale.order.line', 'order_id', 'Order Lines', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=True),
'invoice_ids': fields.many2many('account.invoice', 'sale_order_invoice_rel', 'order_id', 'invoice_id', 'Invoices', readonly=True, copy=False, help="This is the list of invoices that have been generated for this sales order. The same sales order may have been invoiced in several times (by line for example)."),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Paid',
fnct_search=_invoiced_search, type='boolean', help="It indicates that an invoice has been paid."),
'invoice_exists': fields.function(_invoice_exists, string='Invoiced',
fnct_search=_invoiced_search, type='boolean', help="It indicates that sales order has at least one invoice."),
'note': fields.text('Terms and conditions'),
'amount_untaxed': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'),
'amount_tax': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The tax amount."),
'amount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Total',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The total amount."),
'payment_term': fields.many2one('account.payment.term', 'Payment Term'),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'company_id': fields.many2one('res.company', 'Company'),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'procurement_group_id': fields.many2one('procurement.group', 'Procurement group', copy=False),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
}
_defaults = {
'date_order': fields.datetime.now,
'order_policy': 'manual',
'company_id': _get_default_company,
'state': 'draft',
'user_id': lambda obj, cr, uid, context: uid,
'name': lambda obj, cr, uid, context: '/',
'partner_invoice_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['invoice'])['invoice'],
'partner_shipping_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['delivery'])['delivery'],
'note': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.sale_note,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_order = 'date_order desc, id desc'
# Form filling
def unlink(self, cr, uid, ids, context=None):
sale_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in sale_orders:
if s['state'] in ['draft', 'cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it before!'))
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def copy_quotation(self, cr, uid, ids, context=None):
id = self.copy(cr, uid, ids[0], context=context)
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': id,
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def onchange_pricelist_id(self, cr, uid, ids, pricelist_id, order_lines, context=None):
context = context or {}
if not pricelist_id:
return {}
value = {
'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id
}
if not order_lines or order_lines == [(6, 0, [])]:
return {'value': value}
warning = {
'title': _('Pricelist Warning!'),
'message' : _('If you change the pricelist of this order (and eventually the currency), prices of existing order lines will not be updated.')
}
return {'warning': warning, 'value': value}
def get_salenote(self, cr, uid, ids, partner_id, context=None):
context_lang = context.copy()
if partner_id:
partner_lang = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).lang
context_lang.update({'lang': partner_lang})
return self.pool.get('res.users').browse(cr, uid, uid, context=context_lang).company_id.sale_note
def onchange_delivery_id(self, cr, uid, ids, company_id, partner_id, delivery_id, fiscal_position, context=None):
r = {'value': {}}
if not company_id:
company_id = self._get_default_company(cr, uid, context=context)
fiscal_position = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, delivery_id, context=context)
if fiscal_position:
r['value']['fiscal_position'] = fiscal_position
return r
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'payment_term': False, 'fiscal_position': False}}
part = self.pool.get('res.partner').browse(cr, uid, part, context=context)
addr = self.pool.get('res.partner').address_get(cr, uid, [part.id], ['delivery', 'invoice', 'contact'])
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
invoice_part = self.pool.get('res.partner').browse(cr, uid, addr['invoice'], context=context)
payment_term = invoice_part.property_payment_term and invoice_part.property_payment_term.id or False
dedicated_salesman = part.user_id and part.user_id.id or uid
val = {
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'payment_term': payment_term,
'user_id': dedicated_salesman,
}
delivery_onchange = self.onchange_delivery_id(cr, uid, ids, False, part.id, addr['delivery'], False, context=context)
val.update(delivery_onchange['value'])
if pricelist:
val['pricelist_id'] = pricelist
if not self._get_default_section_id(cr, uid, context=context) and part.section_id:
val['section_id'] = part.section_id.id
sale_note = self.get_salenote(cr, uid, ids, part.id, context=context)
if sale_note: val.update({'note': sale_note})
return {'value': val}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'sale.order', context=context) or '/'
if vals.get('partner_id') and any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id', 'fiscal_position']):
defaults = self.onchange_partner_id(cr, uid, [], vals['partner_id'], context=context)['value']
if not vals.get('fiscal_position') and vals.get('partner_shipping_id'):
delivery_onchange = self.onchange_delivery_id(cr, uid, [], vals.get('company_id'), None, vals['partner_id'], vals.get('partner_shipping_id'), context=context)
defaults.update(delivery_onchange['value'])
vals = dict(defaults, **vals)
ctx = dict(context or {}, mail_create_nolog=True)
new_id = super(sale_order, self).create(cr, uid, vals, context=ctx)
self.message_post(cr, uid, [new_id], body=_("Quotation created"), context=ctx)
return new_id
def button_dummy(self, cr, uid, ids, context=None):
return True
# FIXME: deprecated method, overriders should be using _prepare_invoice() instead.
# can be removed after 6.1.
def _inv_get(self, cr, uid, order, context=None):
return {}
def _prepare_invoice(self, cr, uid, order, lines, context=None):
"""Prepare the dict of values to create the new invoice for a
sales order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: sale.order record to invoice
:param list(int) line: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
if context is None:
context = {}
journal_ids = self.pool.get('account.journal').search(cr, uid,
[('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define sales journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
invoice_vals = {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': order.client_order_ref or order.name,
'account_id': order.partner_invoice_id.property_account_receivable.id,
'partner_id': order.partner_invoice_id.id,
'journal_id': journal_ids[0],
'invoice_line': [(6, 0, lines)],
'currency_id': order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': order.payment_term and order.payment_term.id or False,
'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,
'date_invoice': context.get('date_invoice', False),
'company_id': order.company_id.id,
'user_id': order.user_id and order.user_id.id or False,
'section_id' : order.section_id.id
}
# Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1
invoice_vals.update(self._inv_get(cr, uid, order, context=context))
return invoice_vals
def _make_invoice(self, cr, uid, order, lines, context=None):
inv_obj = self.pool.get('account.invoice')
obj_invoice_line = self.pool.get('account.invoice.line')
if context is None:
context = {}
invoiced_sale_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('order_id', '=', order.id), ('invoiced', '=', True)], context=context)
from_line_invoice_ids = []
for invoiced_sale_line_id in self.pool.get('sale.order.line').browse(cr, uid, invoiced_sale_line_ids, context=context):
for invoice_line_id in invoiced_sale_line_id.invoice_lines:
if invoice_line_id.invoice_id.id not in from_line_invoice_ids:
from_line_invoice_ids.append(invoice_line_id.invoice_id.id)
for preinv in order.invoice_ids:
if preinv.state not in ('cancel',) and preinv.id not in from_line_invoice_ids:
for preline in preinv.invoice_line:
inv_line_id = obj_invoice_line.copy(cr, uid, preline.id, {'invoice_id': False, 'price_unit': -preline.price_unit})
lines.append(inv_line_id)
inv = self._prepare_invoice(cr, uid, order, lines, context=context)
inv_id = inv_obj.create(cr, uid, inv, context=context)
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv['payment_term'], time.strftime(DEFAULT_SERVER_DATE_FORMAT))
if data.get('value', False):
inv_obj.write(cr, uid, [inv_id], data['value'], context=context)
inv_obj.button_compute(cr, uid, [inv_id])
return inv_id
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the sales order and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'quotation_sent')
return self.pool['report'].get_action(cr, uid, ids, 'sale.report_saleorder', context=context)
def manual_invoice(self, cr, uid, ids, context=None):
""" create invoices for the given sales orders (ids), and open the form
view of one of the newly created invoices
"""
mod_obj = self.pool.get('ir.model.data')
# create invoices through the sales orders' workflow
inv_ids0 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
self.signal_workflow(cr, uid, ids, 'manual_invoice')
inv_ids1 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
# determine newly created invoices
new_inv_ids = list(inv_ids1 - inv_ids0)
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False,
return {
'name': _('Customer Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': new_inv_ids and new_inv_ids[0] or False,
}
def action_view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of invoices to display
inv_ids = []
for so in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in so.invoice_ids]
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def test_no_product(self, cr, uid, order, context):
for line in order.order_line:
if line.state == 'cancel':
continue
if line.product_id and (line.product_id.type<>'service'):
return False
return True
def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice = False, context=None):
if states is None:
states = ['confirmed', 'done', 'exception']
res = False
invoices = {}
invoice_ids = []
invoice = self.pool.get('account.invoice')
obj_sale_order_line = self.pool.get('sale.order.line')
partner_currency = {}
# If date was specified, use it as date invoiced, usefull when invoices are generated this month and put the
# last day of the last month as invoice date
if date_invoice:
context = dict(context or {}, date_invoice=date_invoice)
for o in self.browse(cr, uid, ids, context=context):
currency_id = o.pricelist_id.currency_id.id
if (o.partner_id.id in partner_currency) and (partner_currency[o.partner_id.id] <> currency_id):
raise osv.except_osv(
_('Error!'),
_('You cannot group sales having different currencies for the same partner.'))
partner_currency[o.partner_id.id] = currency_id
lines = []
for line in o.order_line:
if line.invoiced:
continue
elif (line.state in states):
lines.append(line.id)
created_lines = obj_sale_order_line.invoice_line_create(cr, uid, lines)
if created_lines:
invoices.setdefault(o.partner_invoice_id.id or o.partner_id.id, []).append((o, created_lines))
if not invoices:
for o in self.browse(cr, uid, ids, context=context):
for i in o.invoice_ids:
if i.state == 'draft':
return i.id
for val in invoices.values():
if grouped:
res = self._make_invoice(cr, uid, val[0][0], reduce(lambda x, y: x + y, [l for o, l in val], []), context=context)
invoice_ref = ''
origin_ref = ''
for o, l in val:
invoice_ref += (o.client_order_ref or o.name) + '|'
origin_ref += (o.origin or o.name) + '|'
self.write(cr, uid, [o.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (o.id, res))
self.invalidate_cache(cr, uid, ['invoice_ids'], [o.id], context=context)
#remove last '|' in invoice_ref
if len(invoice_ref) >= 1:
invoice_ref = invoice_ref[:-1]
if len(origin_ref) >= 1:
origin_ref = origin_ref[:-1]
invoice.write(cr, uid, [res], {'origin': origin_ref, 'name': invoice_ref})
else:
for order, il in val:
res = self._make_invoice(cr, uid, order, il, context=context)
invoice_ids.append(res)
self.write(cr, uid, [order.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (order.id, res))
self.invalidate_cache(cr, uid, ['invoice_ids'], [order.id], context=context)
return res
def action_invoice_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'invoice_except'}, context=context)
return True
def action_invoice_end(self, cr, uid, ids, context=None):
for this in self.browse(cr, uid, ids, context=context):
for line in this.order_line:
if line.state == 'exception':
line.write({'state': 'confirmed'})
if this.state == 'invoice_except':
this.write({'state': 'progress'})
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
account_invoice_obj = self.pool.get('account.invoice')
for sale in self.browse(cr, uid, ids, context=context):
for inv in sale.invoice_ids:
if inv.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel this sales order!'),
_('First cancel all invoices attached to this sales order.'))
inv.signal_workflow('invoice_cancel')
line_ids = [l.id for l in sale.order_line if l.state != 'cancel']
sale_order_line_obj.button_cancel(cr, uid, line_ids, context=context)
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def action_button_confirm(self, cr, uid, ids, context=None):
if not context:
context = {}
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.signal_workflow(cr, uid, ids, 'order_confirm')
if context.get('send_email'):
order_id = ids[0]
email_act = self.action_quotation_send(cr, uid, [order_id], context=context)
if email_act and email_act.get('context'):
composer_obj = self.pool['mail.compose.message']
composer_values = {}
email_ctx = email_act['context']
template_values = [
email_ctx.get('default_template_id'),
email_ctx.get('default_composition_mode'),
email_ctx.get('default_model'),
email_ctx.get('default_res_id'),
]
composer_values.update(composer_obj.onchange_template_id(cr, uid, None, *template_values, context=context).get('value', {}))
if not composer_values.get('email_from'):
composer_values['email_from'] = self.browse(cr, uid, order_id, context=context).company_id.email
for key in ['attachment_ids', 'partner_ids']:
if composer_values.get(key):
composer_values[key] = [(6, 0, composer_values[key])]
composer_id = composer_obj.create(cr, uid, composer_values, context=email_ctx)
composer_obj.send_mail(cr, uid, [composer_id], context=email_ctx)
return True
def action_wait(self, cr, uid, ids, context=None):
context = context or {}
for o in self.browse(cr, uid, ids):
if not any(line.state != 'cancel' for line in o.order_line):
raise osv.except_osv(_('Error!'),_('You cannot confirm a sales order which has no line.'))
noprod = self.test_no_product(cr, uid, o, context)
if (o.order_policy == 'manual') or noprod:
self.write(cr, uid, [o.id], {'state': 'manual', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
else:
self.write(cr, uid, [o.id], {'state': 'progress', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
self.pool.get('sale.order.line').button_confirm(cr, uid, [x.id for x in o.order_line if x.state != 'cancel'])
return True
def action_quotation_send(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict()
ctx.update({
'default_model': 'sale.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def action_done(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
self.pool.get('sale.order.line').write(cr, uid, [line.id for line in order.order_line if line.state != 'cancel'], {'state': 'done'}, context=context)
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context)
return {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id) or line.product_uom.id,
'company_id': order.company_id.id,
'group_id': group_id,
'invoice_state': (order.order_policy == 'picking') and '2binvoiced' or 'none',
'sale_line_id': line.id
}
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=line.delay or 0.0)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
return {'name': order.name, 'partner_id': order.partner_shipping_id.id}
def procurement_needed(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (sale_service, sale_stock) that will change this.
sale_line_obj = self.pool.get('sale.order.line')
res = []
for order in self.browse(cr, uid, ids, context=context):
res.append(sale_line_obj.need_procurement(cr, uid, [line.id for line in order.order_line if line.state != 'cancel'], context=context))
return any(res)
def action_ignore_delivery_exception(self, cr, uid, ids, context=None):
for sale_order in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, ids, {'state': 'progress' if sale_order.invoice_exists else 'manual'}, context=context)
return True
def action_ship_create(self, cr, uid, ids, context=None):
"""Create the required procurements to supply sales order lines, also connecting
the procurements to appropriate stock moves in order to bring the goods to the
sales order's requested location.
:return: True
"""
context = context or {}
context['lang'] = self.pool['res.users'].browse(cr, uid, uid).lang
procurement_obj = self.pool.get('procurement.order')
sale_line_obj = self.pool.get('sale.order.line')
for order in self.browse(cr, uid, ids, context=context):
proc_ids = []
vals = self._prepare_procurement_group(cr, uid, order, context=context)
if not order.procurement_group_id:
group_id = self.pool.get("procurement.group").create(cr, uid, vals, context=context)
order.write({'procurement_group_id': group_id})
for line in order.order_line:
if line.state == 'cancel':
continue
#Try to fix exception procurement (possible when after a shipping exception the user choose to recreate)
if line.procurement_ids:
#first check them to see if they are in exception or not (one of the related moves is cancelled)
procurement_obj.check(cr, uid, [x.id for x in line.procurement_ids if x.state not in ['cancel', 'done']])
line.refresh()
#run again procurement that are in exception in order to trigger another move
except_proc_ids = [x.id for x in line.procurement_ids if x.state in ('exception', 'cancel')]
procurement_obj.reset_to_confirmed(cr, uid, except_proc_ids, context=context)
proc_ids += except_proc_ids
elif sale_line_obj.need_procurement(cr, uid, [line.id], context=context):
if (line.state == 'done') or not line.product_id:
continue
vals = self._prepare_order_line_procurement(cr, uid, order, line, group_id=order.procurement_group_id.id, context=context)
ctx = context.copy()
ctx['procurement_autorun_defer'] = True
proc_id = procurement_obj.create(cr, uid, vals, context=ctx)
proc_ids.append(proc_id)
#Confirm procurement order such that rules will be applied on it
#note that the workflow normally ensure proc_ids isn't an empty list
procurement_obj.run(cr, uid, proc_ids, context=context)
#if shipping was in exception and the user choose to recreate the delivery order, write the new status of SO
if order.state == 'shipping_except':
val = {'state': 'progress', 'shipped': False}
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
order.write(val)
return True
def onchange_fiscal_position(self, cr, uid, ids, fiscal_position, order_lines, context=None):
'''Update taxes of order lines for each line where a product is defined
:param list ids: not used
:param int fiscal_position: sale order fiscal position
:param list order_lines: command list for one2many write method
'''
order_line = []
fiscal_obj = self.pool.get('account.fiscal.position')
product_obj = self.pool.get('product.product')
line_obj = self.pool.get('sale.order.line')
fpos = False
if fiscal_position:
fpos = fiscal_obj.browse(cr, uid, fiscal_position, context=context)
for line in order_lines:
# create (0, 0, { fields })
# update (1, ID, { fields })
if line[0] in [0, 1]:
prod = None
if line[2].get('product_id'):
prod = product_obj.browse(cr, uid, line[2]['product_id'], context=context)
elif line[1]:
prod = line_obj.browse(cr, uid, line[1], context=context).product_id
if prod and prod.taxes_id:
line[2]['tax_id'] = [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]
order_line.append(line)
# link (4, ID)
# link all (6, 0, IDS)
elif line[0] in [4, 6]:
line_ids = line[0] == 4 and [line[1]] or line[2]
for line_id in line_ids:
prod = line_obj.browse(cr, uid, line_id, context=context).product_id
if prod and prod.taxes_id:
order_line.append([1, line_id, {'tax_id': [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]}])
else:
order_line.append([4, line_id])
else:
order_line.append(line)
return {'value': {'order_line': order_line, 'amount_untaxed': False, 'amount_tax': False, 'amount_total': False}}
def test_procurements_done(self, cr, uid, ids, context=None):
for sale in self.browse(cr, uid, ids, context=context):
for line in sale.order_line:
if line.state == 'cancel':
continue
if not all([x.state == 'done' for x in line.procurement_ids]):
return False
return True
def test_procurements_except(self, cr, uid, ids, context=None):
for sale in self.browse(cr, uid, ids, context=context):
for line in sale.order_line:
if line.state == 'cancel':
continue
if any([x.state == 'cancel' for x in line.procurement_ids]):
return True
return False
# TODO add a field price_unit_uos
# - update it on change product and unit price
# - use it in report if there is a uos
class sale_order_line(osv.osv):
def need_procurement(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (sale_service, sale_stock) that will change this.
prod_obj = self.pool.get('product.product')
for line in self.browse(cr, uid, ids, context=context):
if prod_obj.need_procurement(cr, uid, [line.product_id.id], context=context):
return True
return False
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(cr, uid, line.tax_id, price, line.product_uom_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, *args):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
def _fnct_line_invoiced(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for this in self.browse(cr, uid, ids, context=context):
res[this.id] = this.invoice_lines and \
all(iline.invoice_id.state != 'cancel' for iline in this.invoice_lines)
return res
def _order_lines_from_invoice(self, cr, uid, ids, context=None):
# direct access to the m2m table is the less convoluted way to achieve this (and is ok ACL-wise)
cr.execute("""SELECT DISTINCT sol.id FROM sale_order_invoice_rel rel JOIN
sale_order_line sol ON (sol.order_id = rel.order_id)
WHERE rel.invoice_id = ANY(%s)""", (list(ids),))
return [i[0] for i in cr.fetchall()]
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.price_subtotal / line.product_uom_qty
return res
_name = 'sale.order.line'
_description = 'Sales Order Line'
_columns = {
'order_id': fields.many2one('sale.order', 'Order Reference', required=True, ondelete='cascade', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'name': fields.text('Description', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of sales order lines."),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], change_default=True, readonly=True, states={'draft': [('readonly', False)]}, ondelete='restrict'),
'invoice_lines': fields.many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True, copy=False),
'invoiced': fields.function(_fnct_line_invoiced, string='Invoiced', type='boolean',
store={
'account.invoice': (_order_lines_from_invoice, ['state'], 10),
'sale.order.line': (lambda self,cr,uid,ids,ctx=None: ids, ['invoice_lines'], 10)
}),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price'), readonly=True, states={'draft': [('readonly', False)]}),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'price_reduce': fields.function(_get_price_reduce, type='float', string='Price Reduce', digits_compute=dp.get_precision('Product Price')),
'tax_id': fields.many2many('account.tax', 'sale_order_tax', 'order_line_id', 'tax_id', 'Taxes', readonly=True, states={'draft': [('readonly', False)]}),
'address_allotment_id': fields.many2one('res.partner', 'Allotment Partner',help="A partner to whom the particular product needs to be allotted."),
'product_uom_qty': fields.float('Quantity', digits_compute= dp.get_precision('Product UoS'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Unit of Measure ', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Quantity (UoS)' ,digits_compute= dp.get_precision('Product UoS'), readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS'),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount'), readonly=True, states={'draft': [('readonly', False)]}),
'th_weight': fields.float('Weight', readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection(
[('cancel', 'Cancelled'),('draft', 'Draft'),('confirmed', 'Confirmed'),('exception', 'Exception'),('done', 'Done')],
'Status', required=True, readonly=True, copy=False,
help='* The \'Draft\' status is set when the related sales order in draft status. \
\n* The \'Confirmed\' status is set when the related sales order is confirmed. \
\n* The \'Exception\' status is set when the related sales order is set as exception. \
\n* The \'Done\' status is set when the sales order line has been picked. \
\n* The \'Cancelled\' status is set when a user cancel the sales order related.'),
'order_partner_id': fields.related('order_id', 'partner_id', type='many2one', relation='res.partner', store=True, string='Customer'),
'salesman_id':fields.related('order_id', 'user_id', type='many2one', relation='res.users', store=True, string='Salesperson'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}),
'procurement_ids': fields.one2many('procurement.order', 'sale_line_id', 'Procurements'),
}
_order = 'order_id desc, sequence, id'
_defaults = {
'product_uom' : _get_uom_id,
'discount': 0.0,
'product_uom_qty': 1,
'product_uos_qty': 1,
'sequence': 10,
'state': 'draft',
'price_unit': 0.0,
'delay': 0.0,
}
def _get_line_qty(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos_qty or 0.0
return line.product_uom_qty
def _get_line_uom(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos.id
return line.product_uom.id
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Prepare the dict of values to create the new invoice line for a
sales order line. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record line: sale.order.line record to invoice
:param int account_id: optional ID of a G/L account to force
(this is used for returning products including service)
:return: dict of values to create() the invoice line
"""
res = {}
if not line.invoiced:
if not account_id:
if line.product_id:
account_id = line.product_id.property_account_income.id
if not account_id:
account_id = line.product_id.categ_id.property_account_income_categ.id
if not account_id:
raise osv.except_osv(_('Error!'),
_('Please define income account for this product: "%s" (id:%d).') % \
(line.product_id.name, line.product_id.id,))
else:
prop = self.pool.get('ir.property').get(cr, uid,
'property_account_income_categ', 'product.category',
context=context)
account_id = prop and prop.id or False
uosqty = self._get_line_qty(cr, uid, line, context=context)
uos_id = self._get_line_uom(cr, uid, line, context=context)
pu = 0.0
if uosqty:
pu = round(line.price_unit * line.product_uom_qty / uosqty,
self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Price'))
fpos = line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, account_id)
if not account_id:
raise osv.except_osv(_('Error!'),
_('There is no Fiscal Position defined or Income category account defined for default properties of Product categories.'))
res = {
'name': line.name,
'sequence': line.sequence,
'origin': line.order_id.name,
'account_id': account_id,
'price_unit': pu,
'quantity': uosqty,
'discount': line.discount,
'uos_id': uos_id,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in line.tax_id])],
'account_analytic_id': line.order_id.project_id and line.order_id.project_id.id or False,
}
return res
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
create_ids = []
sales = set()
for line in self.browse(cr, uid, ids, context=context):
vals = self._prepare_order_line_invoice_line(cr, uid, line, False, context)
if vals:
inv_id = self.pool.get('account.invoice.line').create(cr, uid, vals, context=context)
self.write(cr, uid, [line.id], {'invoice_lines': [(4, inv_id)]}, context=context)
sales.add(line.order_id.id)
create_ids.append(inv_id)
# Trigger workflow events
for sale_id in sales:
workflow.trg_write(uid, 'sale.order', sale_id, cr)
return create_ids
def button_cancel(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for line in lines:
if line.invoiced:
raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sales order line that has already been invoiced.'))
procurement_obj = self.pool['procurement.order']
procurement_obj.cancel(cr, uid, sum([l.procurement_ids.ids for l in lines], []), context=context)
return self.write(cr, uid, ids, {'state': 'cancel'})
def button_confirm(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'})
def button_done(self, cr, uid, ids, context=None):
res = self.write(cr, uid, ids, {'state': 'done'})
for line in self.browse(cr, uid, ids, context=context):
workflow.trg_write(uid, 'sale.order', line.order_id.id, cr)
return res
def uos_change(self, cr, uid, ids, product_uos, product_uos_qty=0, product_id=None):
product_obj = self.pool.get('product.product')
if not product_id:
return {'value': {'product_uom': product_uos,
'product_uom_qty': product_uos_qty}, 'domain': {}}
product = product_obj.browse(cr, uid, product_id)
value = {
'product_uom': product.uom_id.id,
}
# FIXME must depend on uos/uom of the product and not only of the coeff.
try:
value.update({
'product_uom_qty': product_uos_qty / product.uos_coeff,
'th_weight': product_uos_qty / product.uos_coeff * product.weight
})
except ZeroDivisionError:
pass
return {'value': value}
def create(self, cr, uid, values, context=None):
if values.get('order_id') and values.get('product_id') and any(f not in values for f in ['name', 'price_unit', 'type', 'product_uom_qty', 'product_uom']):
order = self.pool['sale.order'].read(cr, uid, values['order_id'], ['pricelist_id', 'partner_id', 'date_order', 'fiscal_position'], context=context)
defaults = self.product_id_change(cr, uid, [], order['pricelist_id'][0], values['product_id'],
qty=float(values.get('product_uom_qty', False)),
uom=values.get('product_uom', False),
qty_uos=float(values.get('product_uos_qty', False)),
uos=values.get('product_uos', False),
name=values.get('name', False),
partner_id=order['partner_id'][0],
date_order=order['date_order'],
fiscal_position=order['fiscal_position'][0] if order['fiscal_position'] else False,
flag=False, # Force name update
context=context
)['value']
if defaults.get('tax_id'):
defaults['tax_id'] = [[6, 0, defaults['tax_id']]]
values = dict(defaults, **values)
return super(sale_order_line, self).create(cr, uid, values, context=context)
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
context = context or {}
lang = lang or context.get('lang', False)
if not partner_id:
raise osv.except_osv(_('No Customer Defined!'), _('Before choosing a product,\n select a customer in the sales form.'))
warning = False
product_uom_obj = self.pool.get('product.uom')
partner_obj = self.pool.get('res.partner')
product_obj = self.pool.get('product.product')
partner = partner_obj.browse(cr, uid, partner_id)
lang = partner.lang
context_partner = context.copy()
context_partner.update({'lang': lang, 'partner_id': partner_id})
if not product:
return {'value': {'th_weight': 0,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
if not date_order:
date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
result = {}
warning_msgs = ''
product_obj = product_obj.browse(cr, uid, product, context=context_partner)
uom2 = False
if uom:
uom2 = product_uom_obj.browse(cr, uid, uom)
if product_obj.uom_id.category_id.id != uom2.category_id.id:
uom = False
if uos:
if product_obj.uos_id:
uos2 = product_uom_obj.browse(cr, uid, uos)
if product_obj.uos_id.category_id.id != uos2.category_id.id:
uos = False
else:
uos = False
fpos = False
if not fiscal_position:
fpos = partner.property_account_position or False
else:
fpos = self.pool.get('account.fiscal.position').browse(cr, uid, fiscal_position)
if update_tax: #The quantity only have changed
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, product_obj.taxes_id)
if not flag:
result['name'] = self.pool.get('product.product').name_get(cr, uid, [product_obj.id], context=context_partner)[0][1]
if product_obj.description_sale:
result['name'] += '\n'+product_obj.description_sale
domain = {}
if (not uom) and (not uos):
result['product_uom'] = product_obj.uom_id.id
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
uos_category_id = product_obj.uos_id.category_id.id
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
uos_category_id = False
result['th_weight'] = qty * product_obj.weight
domain = {'product_uom':
[('category_id', '=', product_obj.uom_id.category_id.id)],
'product_uos':
[('category_id', '=', uos_category_id)]}
elif uos and not uom: # only happens if uom is False
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id
result['product_uom_qty'] = qty_uos / product_obj.uos_coeff
result['th_weight'] = result['product_uom_qty'] * product_obj.weight
elif uom: # whether uos is set or not
default_uom = product_obj.uom_id and product_obj.uom_id.id
q = product_uom_obj._compute_qty(cr, uid, uom, qty, default_uom)
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
result['th_weight'] = q * product_obj.weight # Round the quantity up
if not uom2:
uom2 = product_obj.uom_id
# get unit price
if not pricelist:
warn_msg = _('You have to select a pricelist or a customer in the sales form !\n'
'Please set one before choosing a product.')
warning_msgs += _("No Pricelist ! : ") + warn_msg +"\n\n"
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, qty or 1.0, partner_id, {
'uom': uom or result.get('product_uom'),
'date': date_order,
})[pricelist]
if price is False:
warn_msg = _("Cannot find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
warning_msgs += _("No valid pricelist line found ! :") + warn_msg +"\n\n"
else:
result.update({'price_unit': price})
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
return {'value': result, 'domain': domain, 'warning': warning}
def product_uom_change(self, cursor, user, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, context=None):
context = context or {}
lang = lang or ('lang' in context and context['lang'])
if not uom:
return {'value': {'price_unit': 0.0, 'product_uom' : uom or False}}
return self.product_id_change(cursor, user, ids, pricelist, product,
qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name,
partner_id=partner_id, lang=lang, update_tax=update_tax,
date_order=date_order, context=context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Allows to delete sales order lines in draft,cancel states"""
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a sales order line which is in state \'%s\'.') %(rec.state,))
return super(sale_order_line, self).unlink(cr, uid, ids, context=context)
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'sale.order' and context.get('default_res_id') and context.get('mark_so_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('sale.order').signal_workflow(cr, uid, [context['default_res_id']], 'quotation_sent')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_defaults = {
'section_id': lambda self, cr, uid, c=None: self._get_default_section_id(cr, uid, context=c)
}
def confirm_paid(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order')
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
so_ids = sale_order_obj.search(cr, uid, [('invoice_ids', 'in', ids)], context=context)
for so_id in so_ids:
sale_order_obj.message_post(cr, uid, so_id, body=_("Invoice paid"), context=context)
return res
def unlink(self, cr, uid, ids, context=None):
""" Overwrite unlink method of account invoice to send a trigger to the sale workflow upon invoice deletion """
invoice_ids = self.search(cr, uid, [('id', 'in', ids), ('state', 'in', ['draft', 'cancel'])], context=context)
#if we can't cancel all invoices, do nothing
if len(invoice_ids) == len(ids):
#Cancel invoice(s) first before deleting them so that if any sale order is associated with them
#it will trigger the workflow to put the sale order in an 'invoice exception' state
for id in ids:
workflow.trg_validate(uid, 'account.invoice', id, 'invoice_cancel', cr)
return super(account_invoice, self).unlink(cr, uid, ids, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', string='Sale Order Line'),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(procurement_order, self).write(cr, uid, ids, vals, context=context)
from openerp import workflow
if vals.get('state') in ['done', 'cancel', 'exception']:
for proc in self.browse(cr, uid, ids, context=context):
if proc.sale_line_id and proc.sale_line_id.order_id:
order_id = proc.sale_line_id.order_id.id
if self.pool.get('sale.order').test_procurements_done(cr, uid, [order_id], context=context):
workflow.trg_validate(uid, 'sale.order', order_id, 'ship_end', cr)
if self.pool.get('sale.order').test_procurements_except(cr, uid, [order_id], context=context):
workflow.trg_validate(uid, 'sale.order', order_id, 'ship_except', cr)
return res
class product_product(osv.Model):
_inherit = 'product.product'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
r = dict.fromkeys(ids, 0)
domain = [
('state', 'in', ['waiting_date','progress','manual', 'shipping_except', 'invoice_except', 'done']),
('product_id', 'in', ids),
]
for group in self.pool['sale.report'].read_group(cr, uid, domain, ['product_id','product_uom_qty'], ['product_id'], context=context):
r[group['product_id'][0]] = group['product_uom_qty']
return r
def action_view_sales(self, cr, uid, ids, context=None):
result = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'sale.action_order_line_product_tree', raise_if_not_found=True)
result = self.pool['ir.actions.act_window'].read(cr, uid, [result], context=context)[0]
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
class product_template(osv.Model):
_inherit = 'product.template'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.sales_count for p in template.product_variant_ids])
return res
def action_view_sales(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
product_ids = []
for template in self.browse(cr, uid, ids, context=context):
product_ids += [x.id for x in template.product_variant_ids]
result = mod_obj.xmlid_to_res_id(cr, uid, 'sale.action_order_line_product_tree',raise_if_not_found=True)
result = act_obj.read(cr, uid, [result], context=context)[0]
result['domain'] = "[('product_id','in',[" + ','.join(map(str, product_ids)) + "])]"
return result
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
benthomasson/ansible | lib/ansible/modules/packaging/os/pkg5.py | 7 | 4800 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- Multiple packages may be specified, separated by C(,).
required: true
state:
description:
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
package.
required: false
default: present
choices: [ present, latest, absent ]
accept_licenses:
description:
- Accept any licences.
required: false
default: false
choices: [ true, false ]
aliases: [ accept_licences, accept ]
'''
EXAMPLES = '''
# Install Vim:
- pkg5:
name: editor/vim
# Remove finger daemon:
- pkg5:
name: service/network/finger
state: absent
# Install several packages at once:
- pkg5:
name:
- /file/gnu-findutils
- /text/gnu-grep
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='list'),
state=dict(
default='present',
choices=[
'present',
'installed',
'latest',
'absent',
'uninstalled',
'removed',
]
),
accept_licenses=dict(
type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
),
supports_check_mode=True,
)
params = module.params
packages = []
# pkg(5) FRMIs include a comma before the release number, but
# AnsibleModule will have split this into multiple items for us.
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:
packages.append(fragment)
if params['state'] in ['present', 'installed']:
ensure(module, 'present', packages, params)
elif params['state'] in ['latest']:
ensure(module, 'latest', packages, params)
elif params['state'] in ['absent', 'uninstalled', 'removed']:
ensure(module, 'absent', packages, params)
def ensure(module, state, packages, params):
response = {
'results': [],
'msg': '',
}
behaviour = {
'present': {
'filter': lambda p: not is_installed(module, p),
'subcommand': 'install',
},
'latest': {
'filter': lambda p: (
not is_installed(module, p) or not is_latest(module, p)
),
'subcommand': 'install',
},
'absent': {
'filter': lambda p: is_installed(module, p),
'subcommand': 'uninstall',
},
}
if module.check_mode:
dry_run = ['-n']
else:
dry_run = []
if params['accept_licenses']:
accept_licenses = ['--accept']
else:
accept_licenses = []
to_modify = filter(behaviour[state]['filter'], packages)
if to_modify:
rc, out, err = module.run_command(
[
'pkg', behaviour[state]['subcommand']
]
+ dry_run
+ accept_licenses
+ [
'-q', '--'
] + to_modify
)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
response['changed'] = True
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def is_installed(module, package):
rc, out, err = module.run_command(['pkg', 'list', '--', package])
return not bool(int(rc))
def is_latest(module, package):
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
return bool(int(rc))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
gauribhoite/personfinder | env/google_appengine/lib/django-1.5/django/db/models/options.py | 99 | 23148 | from __future__ import unicode_literals
import re
from bisect import bisect
from django.conf import settings
from django.db.models.related import RelatedObject
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.utils import six
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
from django.utils.translation import activate, deactivate_all, get_language, string_concat
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together')
@python_2_unicode_compatible
class Options(object):
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.index_together = []
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.admin = None
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = SortedDict()
self.duplicate_targets = {}
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
cls._meta = self
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
# calling code can uniformly expect that.
ut = meta_attrs.pop('unique_together', self.unique_together)
if ut and not isinstance(ut[0], (tuple, list)):
ut = (ut,)
self.unique_together = ut
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + module_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.module_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
self.order_with_respect_to = self.get_field(self.order_with_respect_to)
self.ordering = ('_order',)
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
# Determine any sets of fields that are pointing to the same targets
# (e.g. two ForeignKeys to the same remote model). The query
# construction code needs to know this. At the end of this,
# self.duplicate_targets will map each duplicate field column to the
# columns it duplicates.
collections = {}
for column, target in six.iteritems(self.duplicate_targets):
try:
collections[target].add(column)
except KeyError:
collections[target] = set([column])
self.duplicate_targets = {}
for elt in six.itervalues(collections):
if len(elt) == 1:
continue
for column in elt:
self.duplicate_targets[column] = elt.difference(set([column]))
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if field.rel and isinstance(field.rel, ManyToManyRel):
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
if hasattr(self, '_m2m_cache'):
del self._m2m_cache
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
if hasattr(self, '_name_map'):
del self._name_map
def add_virtual_field(self, field):
self.virtual_fields.append(field)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def pk_index(self):
"""
Returns the index of the primary key field in the self.fields list.
"""
return self.fields.index(self.pk)
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.module_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_text(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
def _swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.object_name.lower())
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
swapped = property(_swapped)
def _fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
fields = property(_fields)
def get_fields_with_model(self):
"""
Returns a sequence of (field, model) pairs for all fields. The "model"
element is None for fields on the current model. Mostly of use when
constructing queries so that we know which model a field belongs to.
"""
try:
self._field_cache
except AttributeError:
self._fill_fields_cache()
return self._field_cache
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def _many_to_many(self):
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return list(self._m2m_cache)
many_to_many = property(_many_to_many)
def get_m2m_with_model(self):
"""
The many-to-many version of get_fields_with_model().
"""
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return list(six.iteritems(self._m2m_cache))
def _fill_m2m_cache(self):
cache = SortedDict()
for parent in self.parents:
for field, model in parent._meta.get_m2m_with_model():
if model:
cache[field] = model
else:
cache[field] = parent
for field in self.local_many_to_many:
cache[field] = None
self._m2m_cache = cache
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = many_to_many and (self.fields + self.many_to_many) or self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._name_map[name]
except AttributeError:
cache = self.init_name_map()
return cache[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def get_all_field_names(self):
"""
Returns a list of all field names that are possible for this model
(including reverse relation names). This is used for pretty printing
debugging output (a list of choices), so any internal-only field names
are not included.
"""
try:
cache = self._name_map
except AttributeError:
cache = self.init_name_map()
names = sorted(cache.keys())
# Internal-only names end with "+" (symmetrical m2m related names being
# the main example). Trim them.
return [val for val in names if not val.endswith('+')]
def init_name_map(self):
"""
Initialises the field name -> field object mapping.
"""
cache = {}
# We intentionally handle related m2m objects first so that symmetrical
# m2m accessor names can be overridden, if necessary.
for f, model in self.get_all_related_m2m_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, True)
for f, model in self.get_all_related_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, False)
for f, model in self.get_m2m_with_model():
cache[f.name] = (f, model, True, True)
for f, model in self.get_fields_with_model():
cache[f.name] = (f, model, True, False)
if app_cache_ready():
self._name_map = cache
return cache
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
def get_change_permission(self):
return 'change_%s' % self.object_name.lower()
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [k for k, v in self.get_all_related_objects_with_model(
local_only=local_only, include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq)]
def get_all_related_objects_with_model(self, local_only=False,
include_hidden=False,
include_proxy_eq=False):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
predicates = []
if local_only:
predicates.append(lambda k, v: not v)
if not include_hidden:
predicates.append(lambda k, v: not k.field.rel.is_hidden())
cache = (self._related_objects_proxy_cache if include_proxy_eq
else self._related_objects_cache)
return [t for t in cache.items() if all(p(*t) for p in predicates)]
def _fill_related_objects_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
# Collect also objects which are in relation to some proxy child/parent of self.
proxy_cache = cache.copy()
for klass in get_models(include_auto_created=True, only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, six.string_types):
if self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
proxy_cache[RelatedObject(f.rel.to, klass, f)] = None
elif self.concrete_model == f.rel.to._meta.concrete_model:
proxy_cache[RelatedObject(f.rel.to, klass, f)] = None
self._related_objects_cache = cache
self._related_objects_proxy_cache = proxy_cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return list(cache)
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return list(six.iteritems(cache))
def _fill_related_many_to_many_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models(only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_many_to_many:
if (f.rel
and not isinstance(f.rel.to, six.string_types)
and self == f.rel.to._meta):
cache[RelatedObject(f.rel.to, klass, f)] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a granparent or even more distant relation.
"""
if not self.parents:
return
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
raise TypeError('%r is not an ancestor of this model'
% model._meta.module_name)
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_ordered_objects(self):
"Returns a list of Options objects that are ordered with respect to this object."
if not hasattr(self, '_ordered_objects'):
objects = []
# TODO
#for klass in get_models(get_app(self.app_label)):
# opts = klass._meta
# if opts.order_with_respect_to and opts.order_with_respect_to.rel \
# and self == opts.order_with_respect_to.rel.to._meta:
# objects.append(opts)
self._ordered_objects = objects
return self._ordered_objects
| apache-2.0 |
andmos/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_wait.py | 14 | 4063 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_wait
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: Wait for Ansible Tower job to finish.
description:
- Wait for Ansible Tower job to finish and report success or failure. See
U(https://www.ansible.com/tower) for an overview.
options:
job_id:
description:
- ID of the job to monitor.
required: True
min_interval:
description:
- Minimum interval in seconds, to request an update from Tower.
default: 1
max_interval:
description:
- Maximum interval in seconds, to request an update from Tower.
default: 30
timeout:
description:
- Maximum time in seconds to wait for a job to finish.
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Launch a job
tower_job_launch:
job_template: "My Job Template"
register: job
- name: Wait for job max 120s
tower_job_wait:
job_id: job.id
timeout: 120
'''
RETURN = '''
id:
description: job id that is being waited on
returned: success
type: int
sample: 99
elapsed:
description: total time in seconds the job took to run
returned: success
type: float
sample: 10.879
started:
description: timestamp of when the job started running
returned: success
type: str
sample: "2017-03-01T17:03:53.200234Z"
finished:
description: timestamp of when the job finished running
returned: success
type: str
sample: "2017-03-01T17:04:04.078782Z"
status:
description: current status of job
returned: success
type: str
sample: successful
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
from ansible.module_utils.six.moves import cStringIO as StringIO
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = dict(
job_id=dict(type='int', required=True),
timeout=dict(type='int'),
min_interval=dict(type='float', default=1),
max_interval=dict(type='float', default=30),
)
module = TowerModule(
argument_spec,
supports_check_mode=True
)
json_output = {}
fail_json = None
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
job = tower_cli.get_resource('job')
params = module.params.copy()
# tower-cli gets very noisy when monitoring.
# We pass in our our outfile to suppress the out during our monitor call.
outfile = StringIO()
params['outfile'] = outfile
job_id = params.get('job_id')
try:
result = job.monitor(job_id, **params)
except exc.Timeout as excinfo:
result = job.status(job_id)
result['id'] = job_id
json_output['msg'] = 'Timeout waiting for job to finish.'
json_output['timeout'] = True
except exc.NotFound as excinfo:
fail_json = dict(msg='Unable to wait, no job_id {0} found: {1}'.format(job_id, excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
fail_json = dict(msg='Unable to wait for job: {0}'.format(excinfo), changed=False)
if fail_json is not None:
module.fail_json(**fail_json)
json_output['success'] = True
for k in ('id', 'status', 'elapsed', 'started', 'finished'):
json_output[k] = result.get(k)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
pernici/sympy | sympy/thirdparty/pyglet/pyglet/image/codecs/gdiplus.py | 7 | 8870 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: pil.py 163 2006-11-13 04:15:46Z Alex.Holkner $'
from ctypes import *
from pyglet.gl import *
from pyglet.image import *
from pyglet.image.codecs import *
from pyglet.window.win32.constants import *
from pyglet.window.win32.types import *
ole32 = windll.ole32
kernel32 = windll.kernel32
gdiplus = windll.gdiplus
LPSTREAM = c_void_p
REAL = c_float
PixelFormat1bppIndexed = 196865
PixelFormat4bppIndexed = 197634
PixelFormat8bppIndexed = 198659
PixelFormat16bppGrayScale = 1052676
PixelFormat16bppRGB555 = 135173
PixelFormat16bppRGB565 = 135174
PixelFormat16bppARGB1555 = 397319
PixelFormat24bppRGB = 137224
PixelFormat32bppRGB = 139273
PixelFormat32bppARGB = 2498570
PixelFormat32bppPARGB = 925707
PixelFormat48bppRGB = 1060876
PixelFormat64bppARGB = 3424269
PixelFormat64bppPARGB = 29622286
PixelFormatMax = 15
ImageLockModeRead = 1
ImageLockModeWrite = 2
ImageLockModeUserInputBuf = 4
class GdiplusStartupInput(Structure):
_fields_ = [
('GdiplusVersion', c_uint32),
('DebugEventCallback', c_void_p),
('SuppressBackgroundThread', BOOL),
('SuppressExternalCodecs', BOOL)
]
class GdiplusStartupOutput(Structure):
_fields = [
('NotificationHookProc', c_void_p),
('NotificationUnhookProc', c_void_p)
]
class BitmapData(Structure):
_fields_ = [
('Width', c_uint),
('Height', c_uint),
('Stride', c_int),
('PixelFormat', c_int),
('Scan0', POINTER(c_byte)),
('Reserved', POINTER(c_uint))
]
class Rect(Structure):
_fields_ = [
('X', c_int),
('Y', c_int),
('Width', c_int),
('Height', c_int)
]
kernel32.GlobalAlloc.restype = HGLOBAL
kernel32.GlobalLock.restype = c_void_p
PropertyTagFrameDelay = 0x5100
class PropertyItem(Structure):
_fields_ = [
('id', c_uint),
('length', c_ulong),
('type', c_short),
('value', c_void_p)
]
class GDIPlusDecoder(ImageDecoder):
def get_file_extensions(self):
return ['.bmp', '.gif', '.jpg', '.jpeg', '.exif', '.png', '.tif',
'.tiff']
def get_animation_file_extensions(self):
# TIFF also supported as a multi-page image; but that's not really an
# animation, is it?
return ['.gif']
def _load_bitmap(self, file, filename):
data = file.read()
# Create a HGLOBAL with image data
hglob = kernel32.GlobalAlloc(GMEM_MOVEABLE, len(data))
ptr = kernel32.GlobalLock(hglob)
memmove(ptr, data, len(data))
kernel32.GlobalUnlock(hglob)
# Create IStream for the HGLOBAL
stream = LPSTREAM()
ole32.CreateStreamOnHGlobal(hglob, True, byref(stream))
# Load image from stream
bitmap = c_void_p()
status = gdiplus.GdipCreateBitmapFromStream(stream, byref(bitmap))
if status != 0:
# TODO release stream
raise ImageDecodeException(
'GDI+ cannot load %r' % (filename or file))
return bitmap
def _get_image(self, bitmap):
# Get size of image (Bitmap subclasses Image)
width = REAL()
height = REAL()
gdiplus.GdipGetImageDimension(bitmap, byref(width), byref(height))
width = int(width.value)
height = int(height.value)
# Get image pixel format
pf = c_int()
gdiplus.GdipGetImagePixelFormat(bitmap, byref(pf))
pf = pf.value
# Reverse from what's documented because of Intel little-endianness.
format = 'BGRA'
if pf == PixelFormat24bppRGB:
format = 'BGR'
elif pf == PixelFormat32bppRGB:
pass
elif pf == PixelFormat32bppARGB:
pass
elif pf in (PixelFormat16bppARGB1555, PixelFormat32bppPARGB,
PixelFormat64bppARGB, PixelFormat64bppPARGB):
pf = PixelFormat32bppARGB
else:
format = 'BGR'
pf = PixelFormat24bppRGB
# Lock pixel data in best format
rect = Rect()
rect.X = 0
rect.Y = 0
rect.Width = width
rect.Height = height
bitmap_data = BitmapData()
gdiplus.GdipBitmapLockBits(bitmap,
byref(rect), ImageLockModeRead, pf, byref(bitmap_data))
# Create buffer for RawImage
buffer = create_string_buffer(bitmap_data.Stride * height)
memmove(buffer, bitmap_data.Scan0, len(buffer))
# Unlock data
gdiplus.GdipBitmapUnlockBits(bitmap, byref(bitmap_data))
return ImageData(width, height, format, buffer, -bitmap_data.Stride)
def _delete_bitmap(self, bitmap):
# Release image and stream
gdiplus.GdipDisposeImage(bitmap)
# TODO: How to call IUnknown::Release on stream?
def decode(self, file, filename):
bitmap = self._load_bitmap(file, filename)
image = self._get_image(bitmap)
self._delete_bitmap(bitmap)
return image
def decode_animation(self, file, filename):
bitmap = self._load_bitmap(file, filename)
dimension_count = c_uint()
gdiplus.GdipImageGetFrameDimensionsCount(bitmap, byref(dimension_count))
if dimension_count.value < 1:
self._delete_bitmap(bitmap)
raise ImageDecodeException('Image has no frame dimensions')
# XXX Make sure this dimension is time?
dimensions = (c_void_p * dimension_count.value)()
gdiplus.GdipImageGetFrameDimensionsList(bitmap, dimensions,
dimension_count.value)
frame_count = c_uint()
gdiplus.GdipImageGetFrameCount(bitmap, dimensions, byref(frame_count))
prop_id = PropertyTagFrameDelay
prop_size = c_uint()
gdiplus.GdipGetPropertyItemSize(bitmap, prop_id, byref(prop_size))
prop_buffer = c_buffer(prop_size.value)
prop_item = cast(prop_buffer, POINTER(PropertyItem)).contents
gdiplus.GdipGetPropertyItem(bitmap, prop_id, prop_size.value,
prop_buffer)
# XXX Sure it's long?
n_delays = prop_item.length / sizeof(c_long)
delays = cast(prop_item.value, POINTER(c_long * n_delays)).contents
frames = []
for i in range(frame_count.value):
gdiplus.GdipImageSelectActiveFrame(bitmap, dimensions, i)
image = self._get_image(bitmap)
delay = delays[i]
if delay <= 1:
delay = 10
frames.append(AnimationFrame(image, delay/100.))
self._delete_bitmap(bitmap)
return Animation(frames)
def get_decoders():
return [GDIPlusDecoder()]
def get_encoders():
return []
def init():
token = c_ulong()
startup_in = GdiplusStartupInput()
startup_in.GdiplusVersion = 1
startup_out = GdiplusStartupOutput()
gdiplus.GdiplusStartup(byref(token), byref(startup_in), byref(startup_out))
# Shutdown later?
# gdiplus.GdiplusShutdown(token)
init()
| bsd-3-clause |
kaiweifan/vse-lbaas-plugin-poc | quantum/agent/linux/daemon.py | 2 | 4218 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import atexit
import fcntl
import os
import sys
from quantum.agent.linux import utils
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Pidfile(object):
def __init__(self, pidfile, procname, root_helper='sudo'):
try:
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
except IOError, e:
LOG.exception(_("Failed to open pidfile: %s"), pidfile)
sys.exit(1)
self.pidfile = pidfile
self.procname = procname
self.root_helper = root_helper
if not not fcntl.flock(self.fd, fcntl.LOCK_EX):
raise IOError(_('Unable to lock pid file'))
def __str__(self):
return self.pidfile
def unlock(self):
if not not fcntl.flock(self.fd, fcntl.LOCK_UN):
raise IOError(_('Unable to unlock pid file'))
def write(self, pid):
os.ftruncate(self.fd, 0)
os.write(self.fd, "%d" % pid)
os.fsync(self.fd)
def read(self):
try:
pid = int(os.read(self.fd, 128))
os.lseek(self.fd, 0, os.SEEK_SET)
return pid
except ValueError:
return
def is_running(self):
pid = self.read()
if not pid:
return False
cmd = ['cat', '/proc/%s/cmdline' % pid]
try:
return self.procname in utils.execute(cmd, self.root_helper)
except RuntimeError, e:
return False
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null', procname='python', root_helper='sudo'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.procname = procname
self.pidfile = Pidfile(pidfile, procname, root_helper)
def _fork(self):
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
LOG.exception(_('Fork failed'))
sys.exit(1)
def daemonize(self):
"""Daemonize process by doing Stevens double fork."""
# fork first time
self._fork()
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# fork second time
self._fork()
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
stdin = open(self.stdin, 'r')
stdout = open(self.stdout, 'a+')
stderr = open(self.stderr, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delete_pid)
self.pidfile.write(os.getpid())
def delete_pid(self):
os.remove(str(self.pidfile))
def start(self):
""" Start the daemon """
if self.pidfile.is_running():
self.pidfile.unlock()
message = _('Pidfile %s already exist. Daemon already running?')
LOG.error(message, self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def run(self):
"""Override this method when subclassing Daemon.
start() will call this method after the process has daemonized.
"""
pass
| apache-2.0 |
opennode/nodeconductor-paas-oracle | src/nodeconductor_paas_oracle/extension.py | 1 | 3675 | from nodeconductor.core import NodeConductorExtension
class OracleExtension(NodeConductorExtension):
class Settings:
ORACLE_TICKET_TEMPLATES = {
'provision': {
'summary': "Request for a new Oracle instance",
'details': """
Oracle DB purchase details
Customer name: {customer.name}
Project name: {project.project_group.name}
Environment name: {project.name}
Customer UUID: {customer.uuid.hex}
Project UUID: {project.project_group.uuid.hex}
Environment UUID: {project.uuid.hex}
OpenStack tenant id: {deployment.tenant.backend_id}
Hardware Configuration:
Name: {deployment.name}
Flavor: {deployment.flavor_info}
SSH key: {ssh_key.name}
SSH key UUID: {ssh_key.uuid.hex}
Oracle DB Configuration:
Name: {deployment.db_name}
Size: {deployment.db_size} GB / {deployment.db_arch_size} GB
Version: {deployment.db_version_type}
Database type: {deployment.db_template}
Character set: {deployment.db_charset}
Additional data: {deployment.user_data}
""",
},
'undeploy': {
'summary': "Request for removing Oracle DB PaaS instance",
'details': """
Customer name: {customer.name}
Project name: {project.project_group.name}
Environment name: {project.name}
Customer UUID: {customer.uuid.hex}
Project UUID: {project.project_group.uuid.hex}
Environment UUID: {project.uuid.hex}
Oracle DB details:
Name: {deployment.name}
UUID: {deployment.uuid.hex}
""",
},
'resize': {
'summary': "Request for resizing Oracle DB PaaS instance",
'details': """
Customer name: {customer.name}
Project name: {project.project_group.name}
Environment name: {project.name}
Customer UUID: {customer.uuid.hex}
Project UUID: {project.project_group.uuid.hex}
Environment UUID: {project.uuid.hex}
Oracle DB details:
Name: {deployment.name}
UUID: {deployment.uuid.hex}
Hardware Configuration:
Flavor: {deployment.flavor_info}
""",
},
'support': {
'summary': "Custom support request",
'details': """
Customer name: {customer.name}
Project name: {project.project_group.name}
Environment name: {project.name}
Customer UUID: {customer.uuid.hex}
Project UUID: {project.project_group.uuid.hex}
Environment UUID: {project.uuid.hex}
Oracle DB details:
Name: {deployment.name}
UUID: {deployment.uuid.hex}
{message}
""",
},
}
@staticmethod
def django_app():
return 'nodeconductor_paas_oracle'
@staticmethod
def rest_urls():
from .urls import register_in
return register_in
| mit |
ingadhoc/odoo | addons/l10n_si/__openerp__.py | 430 | 1826 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright: (C) 2012 - Mentis d.o.o., Dravograd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Slovenian - Accounting",
"version" : "1.2",
"author" : "Mentis d.o.o.",
"website" : "http://www.mentis.si",
"category" : "Localization/Account Charts",
"description" : " ",
"depends" : ["account", "base_iban", "base_vat", "account_chart", "account_cancel"],
"description" : "Kontni načrt za gospodarske družbe",
"data" : [
"data/account.account.type.csv",
"data/account.account.template.csv",
"data/account.tax.code.template.csv",
"data/account.chart.template.csv",
"data/account.tax.template.csv",
"data/account.fiscal.position.template.csv",
"data/account.fiscal.position.account.template.csv",
"data/account.fiscal.position.tax.template.csv",
"l10n_si_wizard.xml"
],
'auto_install': False,
"installable": True,
}
| agpl-3.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/numpy/distutils/environment.py | 248 | 2346 | from __future__ import division, absolute_import, print_function
import os
from distutils.dist import Distribution
__metaclass__ = type
class EnvironmentConfig(object):
def __init__(self, distutils_section='ALL', **kw):
self._distutils_section = distutils_section
self._conf_keys = kw
self._conf = None
self._hook_handler = None
def dump_variable(self, name):
conf_desc = self._conf_keys[name]
hook, envvar, confvar, convert = conf_desc
if not convert:
convert = lambda x : x
print('%s.%s:' % (self._distutils_section, name))
v = self._hook_handler(name, hook)
print(' hook : %s' % (convert(v),))
if envvar:
v = os.environ.get(envvar, None)
print(' environ: %s' % (convert(v),))
if confvar and self._conf:
v = self._conf.get(confvar, (None, None))[1]
print(' config : %s' % (convert(v),))
def dump_variables(self):
for name in self._conf_keys:
self.dump_variable(name)
def __getattr__(self, name):
try:
conf_desc = self._conf_keys[name]
except KeyError:
raise AttributeError(name)
return self._get_var(name, conf_desc)
def get(self, name, default=None):
try:
conf_desc = self._conf_keys[name]
except KeyError:
return default
var = self._get_var(name, conf_desc)
if var is None:
var = default
return var
def _get_var(self, name, conf_desc):
hook, envvar, confvar, convert = conf_desc
var = self._hook_handler(name, hook)
if envvar is not None:
var = os.environ.get(envvar, var)
if confvar is not None and self._conf:
var = self._conf.get(confvar, (None, var))[1]
if convert is not None:
var = convert(var)
return var
def clone(self, hook_handler):
ec = self.__class__(distutils_section=self._distutils_section,
**self._conf_keys)
ec._hook_handler = hook_handler
return ec
def use_distribution(self, dist):
if isinstance(dist, Distribution):
self._conf = dist.get_option_dict(self._distutils_section)
else:
self._conf = dist
| mit |
pekeler/arangodb | 3rdParty/V8-4.3.61/third_party/icu/source/test/depstest/dependencies.py | 198 | 7330 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014, International Business Machines
# Corporation and others. All Rights Reserved.
#
# file name: dependencies.py
#
# created on: 2011may26
"""Reader module for dependency data for the ICU dependency tester.
Reads dependencies.txt and makes the data available.
Attributes:
files: Set of "library/filename.o" files mentioned in the dependencies file.
items: Map from library or group names to item maps.
Each item has a "type" ("library" or "group" or "system_symbols").
A library or group item can have an optional set of "files" (as in the files attribute).
Each item can have an optional set of "deps" (libraries & groups).
A group item also has a "library" name unless it is a group of system symbols.
The one "system_symbols" item and its groups have sets of "system_symbols"
with standard-library system symbol names.
libraries: Set of library names mentioned in the dependencies file.
file_to_item: Map from a symbol (ushoe.o) to library or group (shoesize)
"""
__author__ = "Markus W. Scherer"
# TODO: Support binary items.
# .txt syntax: binary: tools/genrb
# item contents: {"type": "binary"} with optional files & deps
# A binary must not be used as a dependency for anything else.
import sys
files = set()
items = {}
libraries = set()
file_to_item = {}
_line_number = 0
_groups_to_be_defined = set()
def _CheckLibraryName(name):
global _line_number
if not name:
sys.exit("Error:%d: \"library: \" without name" % _line_number)
if name.endswith(".o"):
sys.exit("Error:%d: invalid library name %s" % (_line_number, name))
def _CheckGroupName(name):
global _line_number
if not name:
sys.exit("Error:%d: \"group: \" without name" % _line_number)
if "/" in name or name.endswith(".o"):
sys.exit("Error:%d: invalid group name %s" % (_line_number, name))
def _CheckFileName(name):
global _line_number
if "/" in name or not name.endswith(".o"):
sys.exit("Error:%d: invalid file name %s" % (_line_number, name))
def _RemoveComment(line):
global _line_number
_line_number = _line_number + 1
index = line.find("#") # Remove trailing comment.
if index >= 0: line = line[:index]
return line.rstrip() # Remove trailing newlines etc.
def _ReadLine(f):
while True:
line = _RemoveComment(f.next())
if line: return line
def _ReadFiles(deps_file, item, library_name):
global files
item_files = item.get("files")
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
if item_files == None: item_files = item["files"] = set()
for file_name in line.split():
_CheckFileName(file_name)
file_name = library_name + "/" + file_name
if file_name in files:
sys.exit("Error:%d: file %s listed in multiple groups" % (_line_number, file_name))
files.add(file_name)
item_files.add(file_name)
file_to_item[file_name] = item["name"]
def _IsLibrary(item): return item and item["type"] == "library"
def _IsLibraryGroup(item): return item and "library" in item
def _ReadDeps(deps_file, item, library_name):
global items, _line_number, _groups_to_be_defined
item_deps = item.get("deps")
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
if item_deps == None: item_deps = item["deps"] = set()
for dep in line.split():
_CheckGroupName(dep)
dep_item = items.get(dep)
if item["type"] == "system_symbols" and (_IsLibraryGroup(dep_item) or _IsLibrary(dep_item)):
sys.exit(("Error:%d: system_symbols depend on previously defined " +
"library or library group %s") % (_line_number, dep))
if dep_item == None:
# Add this dependency as a new group.
items[dep] = {"type": "group"}
if library_name: items[dep]["library"] = library_name
_groups_to_be_defined.add(dep)
item_deps.add(dep)
def _AddSystemSymbol(item, symbol):
exports = item.get("system_symbols")
if exports == None: exports = item["system_symbols"] = set()
exports.add(symbol)
def _ReadSystemSymbols(deps_file, item):
global _line_number
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
line = line.lstrip()
if '"' in line:
# One double-quote-enclosed symbol on the line, allows spaces in a symbol name.
symbol = line[1:-1]
if line.startswith('"') and line.endswith('"') and '"' not in symbol:
_AddSystemSymbol(item, symbol)
else:
sys.exit("Error:%d: invalid quoted symbol name %s" % (_line_number, line))
else:
# One or more space-separate symbols.
for symbol in line.split(): _AddSystemSymbol(item, symbol)
def Load():
"""Reads "dependencies.txt" and populates the module attributes."""
global items, libraries, _line_number, _groups_to_be_defined
deps_file = open("dependencies.txt")
try:
line = None
current_type = None
while True:
while not line: line = _RemoveComment(deps_file.next())
if line.startswith("library: "):
current_type = "library"
name = line[9:].lstrip()
_CheckLibraryName(name)
if name in items:
sys.exit("Error:%d: library definition using duplicate name %s" % (_line_number, name))
libraries.add(name)
item = items[name] = {"type": "library", "name": name}
line = _ReadFiles(deps_file, item, name)
elif line.startswith("group: "):
current_type = "group"
name = line[7:].lstrip()
_CheckGroupName(name)
if name not in items:
sys.exit("Error:%d: group %s defined before mentioned as a dependency" %
(_line_number, name))
if name not in _groups_to_be_defined:
sys.exit("Error:%d: group definition using duplicate name %s" % (_line_number, name))
_groups_to_be_defined.remove(name)
item = items[name]
item["name"] = name
library_name = item.get("library")
if library_name:
line = _ReadFiles(deps_file, item, library_name)
else:
line = _ReadSystemSymbols(deps_file, item)
elif line == " deps":
if current_type == "library":
line = _ReadDeps(deps_file, items[name], name)
elif current_type == "group":
item = items[name]
line = _ReadDeps(deps_file, item, item.get("library"))
elif current_type == "system_symbols":
item = items[current_type]
line = _ReadDeps(deps_file, item, None)
else:
sys.exit("Error:%d: deps before any library or group" % _line_number)
elif line == "system_symbols:":
current_type = "system_symbols"
if current_type in items:
sys.exit("Error:%d: duplicate entry for system_symbols" % _line_number)
item = items[current_type] = {"type": current_type, "name": current_type}
line = _ReadSystemSymbols(deps_file, item)
else:
sys.exit("Syntax error:%d: %s" % (_line_number, line))
except StopIteration:
pass
if _groups_to_be_defined:
sys.exit("Error: some groups mentioned in dependencies are undefined: %s" % _groups_to_be_defined)
| apache-2.0 |
daavery/audacity | lib-src/lv2/serd/waflib/Tools/c_osx.py | 329 | 4274 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shutil,sys,platform
from waflib import TaskGen,Task,Build,Options,Utils,Errors
from waflib.TaskGen import taskgen_method,feature,after_method,before_method
app_info='''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>%s</string>
</dict>
</plist>
'''
@feature('c','cxx')
def set_macosx_deployment_target(self):
if self.env['MACOSX_DEPLOYMENT_TARGET']:
os.environ['MACOSX_DEPLOYMENT_TARGET']=self.env['MACOSX_DEPLOYMENT_TARGET']
elif'MACOSX_DEPLOYMENT_TARGET'not in os.environ:
if Utils.unversioned_sys_platform()=='darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET']='.'.join(platform.mac_ver()[0].split('.')[:2])
@taskgen_method
def create_bundle_dirs(self,name,out):
bld=self.bld
dir=out.parent.find_or_declare(name)
dir.mkdir()
macos=dir.find_or_declare(['Contents','MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name=out.name
k=name.rfind('.')
if k>=0:
name=name[:k]+'.app'
else:
name=name+'.app'
return name
@feature('cprogram','cxxprogram')
@after_method('apply_link')
def create_task_macapp(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','MacOS',out.name])
self.apptask=self.create_task('macapp',self.link_task.outputs,n1)
inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/MacOS/'%name
self.bld.install_files(inst_to,n1,chmod=Utils.O755)
if getattr(self,'mac_resources',None):
res_dir=n1.parent.parent.make_node('Resources')
inst_to=getattr(self,'install_path','/Applications')+'/%s/Resources'%name
for x in self.to_list(self.mac_resources):
node=self.path.find_node(x)
if not node:
raise Errors.WafError('Missing mac_resource %r in %r'%(x,self))
parent=node.parent
if os.path.isdir(node.abspath()):
nodes=node.ant_glob('**')
else:
nodes=[node]
for node in nodes:
rel=node.path_from(parent)
tsk=self.create_task('macapp',node,res_dir.make_node(rel))
self.bld.install_as(inst_to+'/%s'%rel,node)
if getattr(self.bld,'is_install',None):
self.install_task.hasrun=Task.SKIP_ME
@feature('cprogram','cxxprogram')
@after_method('apply_link')
def create_task_macplist(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','Info.plist'])
self.plisttask=plisttask=self.create_task('macplist',[],n1)
if getattr(self,'mac_plist',False):
node=self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code=self.mac_plist
else:
plisttask.code=app_info%self.link_task.outputs[0].name
inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/'%name
self.bld.install_files(inst_to,n1)
@feature('cshlib','cxxshlib')
@before_method('apply_link','propagate_uselib_vars')
def apply_bundle(self):
if self.env['MACBUNDLE']or getattr(self,'mac_bundle',False):
self.env['LINKFLAGS_cshlib']=self.env['LINKFLAGS_cxxshlib']=[]
self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['macbundle_PATTERN']
use=self.use=self.to_list(getattr(self,'use',[]))
if not'MACBUNDLE'in use:
use.append('MACBUNDLE')
app_dirs=['Contents','Contents/MacOS','Contents/Resources']
class macapp(Task.Task):
color='PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(),self.outputs[0].abspath())
class macplist(Task.Task):
color='PINK'
ext_in=['.bin']
def run(self):
if getattr(self,'code',None):
txt=self.code
else:
txt=self.inputs[0].read()
self.outputs[0].write(txt)
| gpl-2.0 |
tpratama/warkop-TC | assets/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
dset0x/invenio | invenio/ext/restful/pagination.py | 17 | 4908 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Restful pagination."""
from flask import request, url_for
from invenio.utils.pagination import Pagination as InvenioPagination
from .errors import InvalidPageError
class RestfulPaginationMixIn(object):
"""Provide implementaion methods :meth:`link_header` and :meth:`links`."""
def link_header(self, **kwargs):
"""Return tuple that can be used in response header."""
links = self.links(**kwargs)
keys = ("first", "prev", "next", "last")
links_string = ",".join([links[key] for key in keys if key in links])
return ('Link', links_string)
def links(self, endpoint=None, args=None):
"""Generate links for the headers.
:param endpoint: the URL endpoint
:param args: the arguments of request
:param count: the total number of items
"""
if not endpoint:
endpoint = request.endpoint
if not args:
args = request.args
links = {}
link_template = '<{}>; rel="{}"'
# arguments to stick to the URL
url_args = {}
# url_args['page'] will be updated for every link
url_args['page'] = 1
url_args['per_page'] = self.per_page
# generate link for rel first
links['first'] = link_template.format(
url_for(endpoint, **url_args), "first"
)
# generate link for prev if it exists
if self.has_prev:
url_args['page'] = self.page - 1
links['prev'] = link_template.format(
url_for(endpoint, **url_args), "prev"
)
# generate link for next if it exists
if self.has_next:
url_args['page'] = self.page + 1
links['next'] = link_template.format(
url_for(endpoint, **url_args), "next"
)
# generate link for last
url_args['page'] = self.pages
links['last'] = link_template.format(
url_for(endpoint, **url_args), "last"
)
return links
class SQLAlchemyPaginationHelper(object):
"""Wrap SQLAlchemy query `paginate` method."""
def __init__(self, query, page, per_page):
"""Initialize pagination property.
:param query: query object from SQLAlchemy
"""
self.query = query
self.page = page
self.per_page = per_page
self.pagination = self.query.paginate(self.page, self.per_page, False)
class RestfulSQLAlchemyPagination(SQLAlchemyPaginationHelper,
RestfulPaginationMixIn):
"""Implement Restful pagination for SQLAlchemy model."""
@property
def items(self):
"""Return found items."""
return self.pagination.items
@property
def pages(self):
"""Return number of pages."""
return self.pagination.pages
@property
def has_next(self):
"""Return `True` if has next page otherwise return `False`."""
return self.pagination.has_next
@property
def has_prev(self):
"""Return `True` if has previous page otherwise return `False`."""
return self.pagination.has_prev
class RestfulPagination(InvenioPagination, RestfulPaginationMixIn):
"""Implement Restful pagination for list of data."""
def __init__(self, page, per_page, total_count, validate=True):
super(RestfulPagination, self).__init__(page, per_page, total_count)
if validate:
self.validate()
def validate(self):
"""Validate the range of page and per_page."""
if self.per_page < 0:
error_msg = (
"Invalid per_page argument ('{0}'). Number of items "
"per pages must be positive integer.".format(self.per_page)
)
raise InvalidPageError(error_msg)
if self.page < 0 or self.page > self.pages:
error_msg = "Invalid page number ('{0}').".format(self.page)
raise InvalidPageError(error_msg)
def slice(self, items):
"""Return items on current page."""
return items[(self.page - 1) * self.per_page:self.page * self.per_page]
| gpl-2.0 |
Titan-C/sympy | sympy/utilities/tests/test_module_imports.py | 120 | 1500 | """
Checks that SymPy does not contain indirect imports.
An indirect import is importing a symbol from a module that itself imported the
symbol from elsewhere. Such a constellation makes it harder to diagnose
inter-module dependencies and import order problems, and is therefore strongly
discouraged.
(Indirect imports from end-user code is fine and in fact a best practice.)
Implementation note: Forcing Python into actually unloading already-imported
submodules is a tricky and partly undocumented process. To avoid these issues,
the actual diagnostic code is in bin/diagnose_imports, which is run as a
separate, pristine Python process.
"""
from __future__ import print_function
import subprocess
import sys
from os.path import abspath, dirname, join, normpath
import inspect
from sympy.utilities.pytest import XFAIL
@XFAIL
def test_module_imports_are_direct():
my_filename = abspath(inspect.getfile(inspect.currentframe()))
my_dirname = dirname(my_filename)
diagnose_imports_filename = join(my_dirname, 'diagnose_imports.py')
diagnose_imports_filename = normpath(diagnose_imports_filename)
process = subprocess.Popen(
[
sys.executable,
normpath(diagnose_imports_filename),
'--problems',
'--by-importer'
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1)
output, _ = process.communicate()
assert output == '', "There are import problems:\n" + output.decode()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/django/core/files/locks.py | 725 | 3516 | """
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return (ret == 0)
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return (ret == 0)
| mit |
bhilburn/gnuradio | gr-qtgui/examples/pyqt_histogram_f.py | 47 | 6683 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, snk, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.snk = snk
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Sine Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Sine Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Noise Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
# Control the histogram
self.hist_npts = QtGui.QLineEdit(self)
self.hist_npts.setMinimumWidth(100)
self.hist_npts.setValidator(QtGui.QIntValidator(0, 8191))
self.hist_npts.setText("{0}".format(self.snk.nsamps()))
self.layout.addRow("Number of Points:", self.hist_npts)
self.connect(self.hist_npts, QtCore.SIGNAL("editingFinished()"),
self.set_nsamps)
self.hist_bins = QtGui.QLineEdit(self)
self.hist_bins.setMinimumWidth(100)
self.hist_bins.setValidator(QtGui.QIntValidator(0, 1000))
self.hist_bins.setText("{0}".format(self.snk.bins()))
self.layout.addRow("Number of Bins:", self.hist_bins)
self.connect(self.hist_bins, QtCore.SIGNAL("editingFinished()"),
self.set_bins)
self.hist_auto = QtGui.QPushButton("scale", self)
self.layout.addRow("Autoscale X:", self.hist_auto)
self.connect(self.hist_auto, QtCore.SIGNAL("pressed()"),
self.autoscalex)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def set_nsamps(self):
res = self.hist_npts.text().toInt()
if(res[1]):
self.snk.set_nsamps(res[0])
def set_bins(self):
res = self.hist_bins.text().toInt()
if(res[1]):
self.snk.set_bins(res[0])
def autoscalex(self):
self.snk.autoscalex()
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
npts = 2048
self.qapp = QtGui.QApplication(sys.argv)
src1 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f1, 0, 0)
src2 = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
src = blocks.add_ff()
thr = blocks.throttle(gr.sizeof_float, 100*npts)
self.snk1 = qtgui.histogram_sink_f(npts, 200, -5, 5,
"Histogram")
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, thr, self.snk1)
self.ctrl_win = control_box(self.snk1)
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
#pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
edlabh/SickRage | lib/sqlalchemy/util/langhelpers.py | 75 | 37513 | # util/langhelpers.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from functools import update_wrapper
from .. import exc
import hashlib
from . import compat
from . import _collections
def md5_hex(x):
if compat.py3k:
x = x.encode('utf-8')
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
compat.reraise(exc_type, exc_value, exc_tb)
else:
self._exc_info = None # remove potential circular references
compat.reraise(type_, value, traceback)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, '__index__'):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
compat.itertools_imap(lambda i: base + str(i),
range(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata['name'] = fn.__name__
code = """\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
""" % metadata
decorated = _exec_code_in_env(code,
{targ_name: target, fn_name: fn},
fn.__name__)
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = "Construct a new :class:`.%s` object. \n\n"\
"This constructor is mirrored as a public API function; see :func:`~%s` "\
"for a full usage and argument description." % (
target.__name__, location, )
else:
fn = callable_ = target
doc = "This function is mirrored; see :func:`~%s` "\
"for a description of arguments." % location
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata['name'] = location_name
code = """\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
""" % metadata
env = {'cls': callable_, 'symbol': symbol}
exec(code, env)
decorated = env[location_name]
decorated.__doc__ = fn.__doc__
if compat.py2k or hasattr(fn, '__func__'):
fn.__func__.__doc__ = doc
else:
fn.__doc__ = doc
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def get_cls_kwargs(cls, _set=None):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed to
pass along unrecognized keywords to it's base classes, and the collection
process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
toplevel = _set == None
if toplevel:
_set = set()
ctr = cls.__dict__.get('__init__', False)
has_init = ctr and isinstance(ctr, types.FunctionType) and \
isinstance(ctr.__code__, types.CodeType)
if has_init:
names, has_kw = inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard('self')
return _set
try:
# TODO: who doesn't have this constant?
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.__code__
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getargspec(func)[0]
def get_callable_argspec(fn, no_self=False, _is_init=False):
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getargspec(fn)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getargspec(fn.__func__)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(fn.__init__, no_self=no_self, _is_init=True)
elif hasattr(fn, '__func__'):
return compat.inspect_getargspec(fn.__func__)
elif hasattr(fn, '__call__'):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
# we accept an existing argspec...
spec = fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
if compat.py3k:
apply_pos = inspect.formatargspec(spec[0], spec[1],
spec[2], None, spec[4])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
else:
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
name_args = spec[0]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults:]
else:
defaulted_vals = ()
apply_kw = inspect.formatargspec(name_args, spec[1], spec[2],
defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
(_args, _vargs, vkw, defaults) = \
inspect.getargspec(insp.__init__)
except TypeError:
continue
else:
default_len = defaults and len(defaults) or 0
if i == 0:
if _vargs:
vargs = _vargs
if default_len:
pos_args.extend(_args[1:-default_len])
else:
pos_args.extend(_args[1:])
else:
kw_args.update([
(arg, missing) for arg in _args[1:-default_len]
])
if default_len:
kw_args.update([
(arg, default)
for arg, default
in zip(_args[-default_len:], defaults)
])
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
def __init__(self, meth):
self.target = meth.__self__
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType))
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
continue
else:
if c.__module__ == '__builtin__' or not hasattr(c, '__subclasses__'):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = inspect.getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not type(obj) is dict:
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
def dependency_for(modulename):
def decorate(obj):
# TODO: would be nice to improve on this import silliness,
# unfortunately importlib doesn't work that great either
tokens = modulename.split(".")
mod = compat.import_(".".join(tokens[0:-1]), globals(), locals(), tokens[-1])
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
return obj
return decorate
class dependencies(object):
"""Apply imported dependencies as arguments to a function.
E.g.::
@util.dependencies(
"sqlalchemy.sql.widget",
"sqlalchemy.engine.default"
);
def some_func(self, widget, default, arg1, arg2, **kw):
# ...
Rationale is so that the impact of a dependency cycle can be
associated directly with the few functions that cause the cycle,
and not pollute the module-level namespace.
"""
def __init__(self, *deps):
self.import_deps = []
for dep in deps:
tokens = dep.split(".")
self.import_deps.append(
dependencies._importlater(
".".join(tokens[0:-1]),
tokens[-1]
)
)
def __call__(self, fn):
import_deps = self.import_deps
spec = compat.inspect_getfullargspec(fn)
spec_zero = list(spec[0])
hasself = spec_zero[0] in ('self', 'cls')
for i in range(len(import_deps)):
spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i
inner_spec = format_argspec_plus(spec, grouped=False)
for impname in import_deps:
del spec_zero[1 if hasself else 0]
spec[0][:] = spec_zero
outer_spec = format_argspec_plus(spec, grouped=False)
code = 'lambda %(args)s: fn(%(apply_kw)s)' % {
"args": outer_spec['args'],
"apply_kw": inner_spec['apply_kw']
}
decorated = eval(code, locals())
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
return update_wrapper(decorated, fn)
@classmethod
def resolve_all(cls, path):
for m in list(dependencies._unresolved):
if m._full_path.startswith(path):
m._resolve()
_unresolved = set()
_by_key = {}
class _importlater(object):
_unresolved = set()
_by_key = {}
def __new__(cls, path, addtl):
key = path + "." + addtl
if key in dependencies._by_key:
return dependencies._by_key[key]
else:
dependencies._by_key[key] = imp = object.__new__(cls)
return imp
def __init__(self, path, addtl):
self._il_path = path
self._il_addtl = addtl
dependencies._unresolved.add(self)
@property
def _full_path(self):
return self._il_path + "." + self._il_addtl
@memoized_property
def module(self):
if self in dependencies._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't "
"been called (this is %s %s)"
% (self._il_path, self._il_addtl))
return getattr(self._initial_import, self._il_addtl)
def _resolve(self):
dependencies._unresolved.discard(self)
self._initial_import = compat.import_(
self._il_path, globals(), locals(),
[self._il_addtl])
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaulate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and type(kw[key]) is not type_ and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__)
return cls(**kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return next(counter)
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, 'items'):
return list(dictlike.items())
else:
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func, expr=None):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def warn(msg, stacklevel=3):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
.. note::
This function is swapped out when the test suite
runs, with a compatible version that uses
warnings.warn_explicit, so that the warnings registry can
be controlled.
"""
if isinstance(msg, compat.string_types):
warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel)
else:
warnings.warn(msg, stacklevel=stacklevel)
def only_once(fn):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
if once:
once_fn = once.pop()
return once_fn(*arg, **kw)
return go
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of ``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end + 1]
NoneType = type(None)
| gpl-3.0 |
maxalbert/Pytess | __private__/old_drafts/voronoi_test.py | 2 | 1200 | # voronoi test
# taken from http://rosettacode.org/wiki/Voronoi_diagram#Python
# but only puts pixels instead of storing the polygons
# here is another implementation: http://svn.osgeo.org/qgis/trunk/qgis/python/plugins/fTools/tools/voronoi.py
from PIL import Image
import random
import math
def generate_voronoi_diagram(width, height, num_cells):
image = Image.new("RGB", (width, height))
putpixel = image.putpixel
imgx, imgy = image.size
nx = []
ny = []
nr = []
ng = []
nb = []
for i in range(num_cells):
nx.append(random.randrange(imgx))
ny.append(random.randrange(imgy))
nr.append(random.randrange(256))
ng.append(random.randrange(256))
nb.append(random.randrange(256))
for y in range(imgy):
for x in range(imgx):
dmin = math.hypot(imgx-1, imgy-1)
j = -1
for i in range(num_cells):
d = math.hypot(nx[i]-x, ny[i]-y)
if d < dmin:
dmin = d
j = i
putpixel((x, y), (nr[j], ng[j], nb[j]))
image.save("VoronoiDiagram.png", "PNG")
image.show()
generate_voronoi_diagram(500, 500, 25)
| mit |
SheffieldML/TVB | likelihoods.py | 1 | 1637 | # Copyright (c) 2014, James Hensman, Max Zwiessele
# Distributed under the terms of the GNU General public License, see LICENSE.txt
import numpy as np
from scipy.special import gamma, digamma
from scipy import stats
class student_t():
def __init__(self):
self._set_params(np.ones(2))
def _set_params(self, p):
self.nu, self.lamb = p
#compute some constants so that they don't appear in a loop
self._pdf_const = gamma((self.nu + 1)/2.) / gamma(self.nu/2.) * np.sqrt(self.lamb/(self.nu*np.pi) )
self._dnu_const = 0.5*digamma((self.nu + 1.)/2.) - 0.5*digamma(self.nu/2.) - 0.5/self.nu
def _get_params(self):
return np.array([self.nu, self.lamb])
def _get_param_names(self):
return ['nu', 'lambda']
def pdf(self, x, Y):
x2 = np.square(x-Y)
return self._pdf_const * np.power(1 + self.lamb*x2/self.nu, -(self.nu + 1.)/2.)
def dlnpdf_dtheta(self, x, Y):
x2 = np.square(x-Y)
dnu = self._dnu_const - 0.5*np.log(1. + self.lamb*x2/self.nu) + 0.5*(self.nu + 1.)*(self.lamb*x2/self.nu**2)/(1. + self.lamb*x2/self.nu)
dlamb = 0.5/self.lamb - 0.5*(self.nu + 1.)*(x2/self.nu/(1.+self.lamb*x2/self.nu))
return np.vstack((dnu, dlamb))
def predictive_values(self, mu, var, percentiles):
if len(percentiles)==0:
return mu, []
samples = (np.random.randn(40e3,*mu.shape) + mu)*np.sqrt(var)
samples = stats.t.rvs(self.nu, loc=samples, scale=np.array(self.lamb).reshape(1,1))
qs = [stats.scoreatpercentile(samples,q,axis=0) for q in percentiles]
return samples.mean(0), qs
| gpl-3.0 |
sahiljain/catapult | third_party/gsutil/third_party/oauth2client/docs/conf.py | 17 | 1973 | # -*- coding: utf-8 -*-
#
# oauth2client documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 17 23:13:19 2014.
#
import sys
import os
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'oauth2client'
copyright = u'2014, Google, Inc'
# Version info
import oauth2client
version = oauth2client.__version__
release = oauth2client.__version__
exclude_patterns = ['_build']
# In order to load django before 1.7, we need to create a faux
# settings module and load it.
import django
if django.VERSION[1] < 7:
sys.path.insert(0, '.')
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_settings'
# -- Options for HTML output ----------------------------------------------
# We want to set the RTD theme, but not if we're on RTD.
if os.environ.get('READTHEDOCS', '') != 'True':
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
html_logo = '_static/google_logo.png'
htmlhelp_basename = 'oauth2clientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [
('index', 'oauth2client.tex', u'oauth2client Documentation',
u'Google, Inc.', 'manual'),
]
# -- Options for manual page output ---------------------------------------
man_pages = [
('index', 'oauth2client', u'oauth2client Documentation',
[u'Google, Inc.'], 1)
]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
('index', 'oauth2client', u'oauth2client Documentation',
u'Google, Inc.', 'oauth2client', 'One line description of project.',
'Miscellaneous'),
]
| bsd-3-clause |
gunan/tensorflow | tensorflow/python/summary/plugin_asset_test.py | 152 | 2859 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.summary import plugin_asset
class _UnnamedPluginAsset(plugin_asset.PluginAsset):
"""An example asset with a dummy serialize method provided, but no name."""
def assets(self):
return {}
class _ExamplePluginAsset(_UnnamedPluginAsset):
"""Simple example asset."""
plugin_name = "_ExamplePluginAsset"
class _OtherExampleAsset(_UnnamedPluginAsset):
"""Simple example asset."""
plugin_name = "_OtherExampleAsset"
class _ExamplePluginThatWillCauseCollision(_UnnamedPluginAsset):
plugin_name = "_ExamplePluginAsset"
class PluginAssetTest(test_util.TensorFlowTestCase):
def testGetPluginAsset(self):
epa = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
self.assertIsInstance(epa, _ExamplePluginAsset)
epa2 = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
self.assertIs(epa, epa2)
opa = plugin_asset.get_plugin_asset(_OtherExampleAsset)
self.assertIsNot(epa, opa)
def testUnnamedPluginFails(self):
with self.assertRaises(ValueError):
plugin_asset.get_plugin_asset(_UnnamedPluginAsset)
def testPluginCollisionDetected(self):
plugin_asset.get_plugin_asset(_ExamplePluginAsset)
with self.assertRaises(ValueError):
plugin_asset.get_plugin_asset(_ExamplePluginThatWillCauseCollision)
def testGetAllPluginAssets(self):
epa = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
opa = plugin_asset.get_plugin_asset(_OtherExampleAsset)
self.assertItemsEqual(plugin_asset.get_all_plugin_assets(), [epa, opa])
def testRespectsGraphArgument(self):
g1 = ops.Graph()
g2 = ops.Graph()
e1 = plugin_asset.get_plugin_asset(_ExamplePluginAsset, g1)
e2 = plugin_asset.get_plugin_asset(_ExamplePluginAsset, g2)
self.assertEqual(e1, plugin_asset.get_all_plugin_assets(g1)[0])
self.assertEqual(e2, plugin_asset.get_all_plugin_assets(g2)[0])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/html5lib/setup.py | 18 | 1301 | from setuptools import setup, find_packages
import os
long_description="""HTML parser designed to follow the HTML5
specification. The parser is designed to handle all flavours of HTML and
parses invalid documents using well-defined error handling rules compatible
with the behaviour of major desktop web browsers.
Output is to a tree structure; the current release supports output to
DOM, ElementTree, lxml and BeautifulSoup tree formats as well as a
simple custom format"""
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
]
setup(name='html5lib',
version='0.95',
url='http://code.google.com/p/html5lib/',
license="MIT License",
description='HTML parser based on the WHAT-WG Web Applications 1.0'
'("HTML5") specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='james@hoppipolla.co.uk',
packages=find_packages(),
include_package_data=True
)
| agpl-3.0 |
drawks/ansible | lib/ansible/modules/cloud/azure/azure_rm_storageblob.py | 19 | 21434 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_storageblob
short_description: Manage blob containers and blob objects.
version_added: "2.1"
description:
- Create, update and delete blob containers and blob objects. Use to upload a file and store it as a blob object,
or download a blob object to a file.
options:
storage_account_name:
description:
- Name of the storage account to use.
required: true
aliases:
- account_name
- storage_account
blob:
description:
- Name of a blob object within the container.
aliases:
- blob_name
blob_type:
description:
- Type of Blob Object.
default: block
choices:
- block
- page
version_added: "2.5"
container:
description:
- Name of a blob container within the storage account.
required: true
aliases:
- container_name
content_type:
description:
- Set the blob content-type header. For example, 'image/png'.
cache_control:
description:
- Set the blob cache-control header.
content_disposition:
description:
- Set the blob content-disposition header.
content_encoding:
description:
- Set the blob encoding header.
content_language:
description:
- Set the blob content-language header.
content_md5:
description:
- Set the blob md5 hash value.
dest:
description:
- Destination file path. Use with state C(present) to download a blob.
aliases:
- destination
force:
description:
- Overwrite existing blob or file when uploading or downloading. Force deletion of a container
that contains blobs.
type: bool
default: no
resource_group:
description:
- Name of the resource group to use.
required: true
aliases:
- resource_group_name
src:
description:
- Source file path. Use with state C(present) to upload a blob.
aliases:
- source
state:
description:
- Assert the state of a container or blob.
- Use state C(absent) with a container value only to delete a container. Include a blob value to remove
a specific blob. A container will not be deleted, if it contains blobs. Use the force option to override,
deleting the container and all associated blobs.
- Use state C(present) to create or update a container and upload or download a blob. If the container
does not exist, it will be created. If it exists, it will be updated with configuration options. Provide
a blob name and either src or dest to upload or download. Provide a src path to upload and a dest path
to download. If a blob (uploading) or a file (downloading) already exists, it will not be overwritten
unless the force parameter is true.
default: present
choices:
- absent
- present
public_access:
description:
- Determine a container's level of public access. By default containers are private. Can only be set at
time of container creation.
choices:
- container
- blob
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Remove container foo
azure_rm_storageblob:
resource_group: myResourceGroup
storage_account_name: clh0002
container: foo
state: absent
- name: Create container foo and upload a file
azure_rm_storageblob:
resource_group: myResourceGroup
storage_account_name: clh0002
container: foo
blob: graylog.png
src: ./files/graylog.png
public_access: container
content_type: 'application/image'
- name: Download the file
azure_rm_storageblob:
resource_group: myResourceGroup
storage_account_name: clh0002
container: foo
blob: graylog.png
dest: ~/tmp/images/graylog.png
'''
RETURN = '''
blob:
description: Facts about the current state of the blob.
returned: when a blob is operated on
type: dict
sample: {
"content_length": 136532,
"content_settings": {
"cache_control": null,
"content_disposition": null,
"content_encoding": null,
"content_language": null,
"content_md5": null,
"content_type": "application/image"
},
"last_modified": "09-Mar-2016 22:08:25 +0000",
"name": "graylog.png",
"tags": {},
"type": "BlockBlob"
}
container:
description: Facts about the current state of the selected container.
returned: always
type: dict
sample: {
"last_mdoified": "09-Mar-2016 19:28:26 +0000",
"name": "foo",
"tags": {}
}
'''
import os
try:
from azure.storage.blob.models import ContentSettings
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
class AzureRMStorageBlob(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
storage_account_name=dict(required=True, type='str', aliases=['account_name', 'storage_account']),
blob=dict(type='str', aliases=['blob_name']),
blob_type=dict(type='str', default='block', choices=['block', 'page']),
container=dict(required=True, type='str', aliases=['container_name']),
dest=dict(type='path', aliases=['destination']),
force=dict(type='bool', default=False),
resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
src=dict(type='str', aliases=['source']),
state=dict(type='str', default='present', choices=['absent', 'present']),
public_access=dict(type='str', choices=['container', 'blob']),
content_type=dict(type='str'),
content_encoding=dict(type='str'),
content_language=dict(type='str'),
content_disposition=dict(type='str'),
cache_control=dict(type='str'),
content_md5=dict(type='str'),
)
mutually_exclusive = [('src', 'dest')]
self.blob_client = None
self.blob_details = None
self.storage_account_name = None
self.blob = None
self.blob_obj = None
self.blob_type = None
self.container = None
self.container_obj = None
self.dest = None
self.force = None
self.resource_group = None
self.src = None
self.state = None
self.tags = None
self.public_access = None
self.results = dict(
changed=False,
actions=[],
container=dict(),
blob=dict()
)
super(AzureRMStorageBlob, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
self.results['check_mode'] = self.check_mode
# add file path validation
self.blob_client = self.get_blob_client(self.resource_group, self.storage_account_name, self.blob_type)
self.container_obj = self.get_container()
if self.blob is not None:
self.blob_obj = self.get_blob()
if self.state == 'present':
if not self.container_obj:
# create the container
self.create_container()
elif self.container_obj and not self.blob:
# update container attributes
update_tags, self.container_obj['tags'] = self.update_tags(self.container_obj.get('tags'))
if update_tags:
self.update_container_tags(self.container_obj['tags'])
if self.blob:
# create, update or download blob
if self.src and self.src_is_valid():
if self.blob_obj and not self.force:
self.log("Cannot upload to {0}. Blob with that name already exists. "
"Use the force option".format(self.blob))
else:
self.upload_blob()
elif self.dest and self.dest_is_valid():
self.download_blob()
update_tags, self.blob_obj['tags'] = self.update_tags(self.blob_obj.get('tags'))
if update_tags:
self.update_blob_tags(self.blob_obj['tags'])
if self.blob_content_settings_differ():
self.update_blob_content_settings()
elif self.state == 'absent':
if self.container_obj and not self.blob:
# Delete container
if self.container_has_blobs():
if self.force:
self.delete_container()
else:
self.log("Cannot delete container {0}. It contains blobs. Use the force option.".format(
self.container))
else:
self.delete_container()
elif self.container_obj and self.blob_obj:
# Delete blob
self.delete_blob()
# until we sort out how we want to do this globally
del self.results['actions']
return self.results
def get_container(self):
result = {}
container = None
if self.container:
try:
container = self.blob_client.get_container_properties(self.container)
except AzureMissingResourceHttpError:
pass
if container:
result = dict(
name=container.name,
tags=container.metadata,
last_mdoified=container.properties.last_modified.strftime('%d-%b-%Y %H:%M:%S %z'),
)
return result
def get_blob(self):
result = dict()
blob = None
if self.blob:
try:
blob = self.blob_client.get_blob_properties(self.container, self.blob)
except AzureMissingResourceHttpError:
pass
if blob:
result = dict(
name=blob.name,
tags=blob.metadata,
last_modified=blob.properties.last_modified.strftime('%d-%b-%Y %H:%M:%S %z'),
type=blob.properties.blob_type,
content_length=blob.properties.content_length,
content_settings=dict(
content_type=blob.properties.content_settings.content_type,
content_encoding=blob.properties.content_settings.content_encoding,
content_language=blob.properties.content_settings.content_language,
content_disposition=blob.properties.content_settings.content_disposition,
cache_control=blob.properties.content_settings.cache_control,
content_md5=blob.properties.content_settings.content_md5
)
)
return result
def create_container(self):
self.log('Create container %s' % self.container)
tags = None
if not self.blob and self.tags:
# when a blob is present, then tags are assigned at the blob level
tags = self.tags
if not self.check_mode:
try:
self.blob_client.create_container(self.container, metadata=tags, public_access=self.public_access)
except AzureHttpError as exc:
self.fail("Error creating container {0} - {1}".format(self.container, str(exc)))
self.container_obj = self.get_container()
self.results['changed'] = True
self.results['actions'].append('created container {0}'.format(self.container))
self.results['container'] = self.container_obj
def upload_blob(self):
content_settings = None
if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \
self.cache_control or self.content_md5:
content_settings = ContentSettings(
content_type=self.content_type,
content_encoding=self.content_encoding,
content_language=self.content_language,
content_disposition=self.content_disposition,
cache_control=self.cache_control,
content_md5=self.content_md5
)
if not self.check_mode:
try:
self.blob_client.create_blob_from_path(self.container, self.blob, self.src,
metadata=self.tags, content_settings=content_settings)
except AzureHttpError as exc:
self.fail("Error creating blob {0} - {1}".format(self.blob, str(exc)))
self.blob_obj = self.get_blob()
self.results['changed'] = True
self.results['actions'].append('created blob {0} from {1}'.format(self.blob, self.src))
self.results['container'] = self.container_obj
self.results['blob'] = self.blob_obj
def download_blob(self):
if not self.check_mode:
try:
self.blob_client.get_blob_to_path(self.container, self.blob, self.dest)
except Exception as exc:
self.fail("Failed to download blob {0}:{1} to {2} - {3}".format(self.container,
self.blob,
self.dest,
exc))
self.results['changed'] = True
self.results['actions'].append('downloaded blob {0}:{1} to {2}'.format(self.container,
self.blob,
self.dest))
self.results['container'] = self.container_obj
self.results['blob'] = self.blob_obj
def src_is_valid(self):
if not os.path.isfile(self.src):
self.fail("The source path must be a file.")
try:
fp = open(self.src, 'r')
fp.close()
except IOError:
self.fail("Failed to access {0}. Make sure the file exists and that you have "
"read access.".format(self.src))
return True
def dest_is_valid(self):
if not self.check_mode:
if not os.path.basename(self.dest):
# dest is a directory
if os.path.isdir(self.dest):
self.log("Path is dir. Appending blob name.")
self.dest += self.blob
else:
try:
self.log('Attempting to makedirs {0}'.format(self.dest))
os.makedirs(self.dest)
except IOError as exc:
self.fail("Failed to create directory {0} - {1}".format(self.dest, str(exc)))
self.dest += self.blob
else:
# does path exist without basename
file_name = os.path.basename(self.dest)
path = self.dest.replace(file_name, '')
self.log('Checking path {0}'.format(path))
if not os.path.isdir(path):
try:
self.log('Attempting to makedirs {0}'.format(path))
os.makedirs(path)
except IOError as exc:
self.fail("Failed to create directory {0} - {1}".format(path, str(exc)))
self.log('Checking final path {0}'.format(self.dest))
if os.path.isfile(self.dest) and not self.force:
# dest already exists and we're not forcing
self.log("Dest {0} already exists. Cannot download. Use the force option.".format(self.dest))
return False
return True
def delete_container(self):
if not self.check_mode:
try:
self.blob_client.delete_container(self.container)
except AzureHttpError as exc:
self.fail("Error deleting container {0} - {1}".format(self.container, str(exc)))
self.results['changed'] = True
self.results['actions'].append('deleted container {0}'.format(self.container))
def container_has_blobs(self):
try:
list_generator = self.blob_client.list_blobs(self.container)
except AzureHttpError as exc:
self.fail("Error list blobs in {0} - {1}".format(self.container, str(exc)))
if len(list_generator.items) > 0:
return True
return False
def delete_blob(self):
if not self.check_mode:
try:
self.blob_client.delete_blob(self.container, self.blob)
except AzureHttpError as exc:
self.fail("Error deleting blob {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
self.results['changed'] = True
self.results['actions'].append('deleted blob {0}:{1}'.format(self.container, self.blob))
self.results['container'] = self.container_obj
def update_container_tags(self, tags):
if not self.check_mode:
try:
self.blob_client.set_container_metadata(self.container, metadata=tags)
except AzureHttpError as exc:
self.fail("Error updating container tags {0} - {1}".format(self.container, str(exc)))
self.container_obj = self.get_container()
self.results['changed'] = True
self.results['actions'].append("updated container {0} tags.".format(self.container))
self.results['container'] = self.container_obj
def update_blob_tags(self, tags):
if not self.check_mode:
try:
self.blob_client.set_blob_metadata(self.container, self.blob, metadata=tags)
except AzureHttpError as exc:
self.fail("Update blob tags {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
self.blob_obj = self.get_blob()
self.results['changed'] = True
self.results['actions'].append("updated blob {0}:{1} tags.".format(self.container, self.blob))
self.results['container'] = self.container_obj
self.results['blob'] = self.blob_obj
def blob_content_settings_differ(self):
if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \
self.cache_control or self.content_md5:
settings = dict(
content_type=self.content_type,
content_encoding=self.content_encoding,
content_language=self.content_language,
content_disposition=self.content_disposition,
cache_control=self.cache_control,
content_md5=self.content_md5
)
if self.blob_obj['content_settings'] != settings:
return True
return False
def update_blob_content_settings(self):
content_settings = ContentSettings(
content_type=self.content_type,
content_encoding=self.content_encoding,
content_language=self.content_language,
content_disposition=self.content_disposition,
cache_control=self.cache_control,
content_md5=self.content_md5
)
if not self.check_mode:
try:
self.blob_client.set_blob_properties(self.container, self.blob, content_settings=content_settings)
except AzureHttpError as exc:
self.fail("Update blob content settings {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
self.blob_obj = self.get_blob()
self.results['changed'] = True
self.results['actions'].append("updated blob {0}:{1} content settings.".format(self.container, self.blob))
self.results['container'] = self.container_obj
self.results['blob'] = self.blob_obj
def main():
AzureRMStorageBlob()
if __name__ == '__main__':
main()
| gpl-3.0 |
krux/graphite-web | webapp/tests/test_whitelist.py | 1 | 3982 | import errno
import mock
import os
import pickle
from . import DATA_DIR
from django.conf import settings
from django.core.urlresolvers import reverse
from .base import TestCase
from graphite.whitelist.views import load_whitelist, save_whitelist
class WhitelistTester(TestCase):
settings.WHITELIST_FILE = os.path.join(DATA_DIR, 'lists/whitelist')
def wipe_whitelist(self):
try:
os.remove(settings.WHITELIST_FILE)
except OSError:
pass
def create_whitelist(self):
try:
os.makedirs(settings.WHITELIST_FILE.replace('whitelist', ''))
except OSError:
pass
fh = open(settings.WHITELIST_FILE, 'wb')
pickle.dump({'a.b.c.d', 'e.f.g.h'}, fh)
fh.close()
def test_whitelist_show_no_whitelist(self):
url = reverse('whitelist_show')
with self.assertRaises(IOError):
response = self.client.get(url)
def test_whitelist_show(self):
url = reverse('whitelist_show')
self.create_whitelist()
self.addCleanup(self.wipe_whitelist)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "a.b.c.d\ne.f.g.h")
def test_whitelist_add(self):
self.create_whitelist()
self.addCleanup(self.wipe_whitelist)
url = reverse('whitelist_add')
response = self.client.post(url, {'metrics': ['i.j.k.l']})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "OK")
url = reverse('whitelist_show')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "a.b.c.d\ne.f.g.h\ni.j.k.l")
def test_whitelist_add_existing(self):
self.create_whitelist()
self.addCleanup(self.wipe_whitelist)
url = reverse('whitelist_add')
response = self.client.post(url, {'metrics': ['a.b.c.d']})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "OK")
url = reverse('whitelist_show')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "a.b.c.d\ne.f.g.h")
def test_whitelist_remove(self):
self.create_whitelist()
self.addCleanup(self.wipe_whitelist)
url = reverse('whitelist_remove')
response = self.client.post(url, {'metrics': ['a.b.c.d']})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "OK")
url = reverse('whitelist_show')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "e.f.g.h")
def test_whitelist_remove_missing(self):
self.create_whitelist()
self.addCleanup(self.wipe_whitelist)
url = reverse('whitelist_remove')
response = self.client.post(url, {'metrics': ['i.j.k.l']})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "OK")
url = reverse('whitelist_show')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "a.b.c.d\ne.f.g.h")
def test_save_whitelist(self):
try:
os.makedirs(settings.WHITELIST_FILE.replace('whitelist', ''))
except OSError:
pass
self.addCleanup(self.wipe_whitelist)
self.assertEqual(save_whitelist({'a.b.c.d','e.f.g.h'}), None)
self.assertEqual(load_whitelist(), {'a.b.c.d','e.f.g.h'})
@mock.patch('os.rename')
def test_save_whitelist_rename_failure(self, rename):
self.addCleanup(self.wipe_whitelist)
rename.side_effect = OSError(errno.EPERM, 'Operation not permitted')
with self.assertRaises(OSError):
save_whitelist({'a.b.c.d','e.f.g.h'})
| apache-2.0 |
Runbook/runbook | src/web/monitorforms/http-post/__init__.py | 3 | 4753 | """HTTP Post Form"""
from wtforms import SelectMultipleField, TextAreaField, TextField
from wtforms.validators import DataRequired, Optional, URL, ValidationError
from ..datacenter import DatacenterCheckForm
class HeaderList(object):
"""Custom wtform validator for headers."""
def __call__(self, form, field):
try:
for header in str.splitlines(str(field.data)):
header = header.strip()
# Ignore empty lines
if not header:
continue
key, value = header.split(':')
key = key.strip()
value = value.strip()
assert key
assert value
except Exception:
raise ValidationError('Invalid headers. Use key:value format.')
class CheckForm(DatacenterCheckForm):
''' Form for creating the http post monitor '''
title = "HTTP: Post"
description = """
This monitor will perform an HTTP POST request and either validate the status code, headers or response content. This Monitor is a useful for monitoring other services and web applications.
"""
placeholders = DatacenterCheckForm.placeholders
placeholders.update({
'payload' : 'POST Data',
'response_regex' : '.*[s|S]uccess.*',
'response_headers' : 'header:value',
})
choices = [
("100", '100 - Continue'),
("101", '101 - Switching protocols'),
("200", '200 - Successful'),
("201", '201 - Created'),
("202", '202 - Accepted'),
("203", '203 - Non-authoritative information'),
("204", '204 - No content'),
("205", '205 - Reset content'),
("206", '206 - Partial content'),
("300", '300 - Multiple choices'),
("301", '301 - Move permanently'),
("302", '302 - Moved temporarily'),
("303", '303 - See other location'),
("304", '304 - Not Modified'),
("305", '305 - Use proxy'),
("307", '307 - Temporary redirect'),
("400", '400 - Bad request'),
("401", '401 - Not authorized'),
("403", '403 - Forbidden'),
("404", '404 - Not found'),
("405", '405 - Method not allowed'),
("406", '406 - Not acceptable'),
("407", '407 - Proxy authentication required'),
("408", '408 - Request timeout'),
("409", '409 - Conflict'),
("410", '410 - Gone'),
("411", '411 - Length required'),
("412", '412 - Precondition failed'),
("413", '413 - Request entity too large'),
("414", '414 - Requested URI is too long'),
("415", '415 - Unsupported media type'),
("416", '416 - Requested range not satisfiable'),
("417", '417 - Expectation failed'),
("500", '500 - Internal server error'),
("501", '501 - Not implemented'),
("502", '502 - Bad gateway'),
("503", '503 - Service unavailable'),
("504", '504 - Gateway timeout'),
("505", '505 - HTTP version not supported')
]
url = TextField(
'URL',
description="""
The web address you wish to send the POST request to
""",
validators=[DataRequired(message='URL is a required field.'),
URL(message='Must be a url such as "https://127.0.0.1"')])
host = TextField(
'Host Header',
description="""
The Host header used to address a specific domain even when the URL is to another domain or IP address
""",
validators=[DataRequired(message='Host header is a required field')])
payload = TextAreaField(
'POST Data',
description="""
This section contains the POST data to be used when the monitor makes a request
""",
validators=[Optional()])
extra_headers = TextAreaField(
'Additional Headers',
description="""
Use this field to add additional HTTP headers. Values are in a : seperated Key:Value format.
""",
validators=[HeaderList()])
status_codes = SelectMultipleField(
'HTTP Status Codes',
description="""
Select the desired HTTP Status Codes
""",
choices=choices,
validators=[Optional()])
response_regex = TextField(
'Keyword',
description="""
If defined this field will search for the specified keyword or regular expression
""",
validators=[Optional()])
response_headers = TextAreaField(
'Response Headers',
description="""
Validate the specified headers are provided in the HTTP response
""",
validators=[HeaderList()])
if __name__ == '__main__':
pass
| apache-2.0 |
Skycker/watermarker | setup.py | 2 | 1345 | import os
from distutils.core import setup
import watermarker
with open('pypi_doc.txt') as file:
long_description = file.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-watermarker',
version=watermarker.__version__,
packages=[
'watermarker',
'watermarker.templatetags',
'watermarker.locale',
'watermarker.locale.ru',
'watermarker.locale.ru.LC_MESSAGES',
],
url='https://github.com/Skycker/watermarker',
license='BSD License',
author='Kirill Kostyukhin',
description='A tool for easy working with watermarks in django projects',
long_description=long_description,
keywords="django, watermark, watermarks, watermarker, image",
install_requires=['pillow'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Multimedia :: Graphics",
"Topic :: Internet :: WWW/HTTP",
],
)
| bsd-3-clause |
GenericStudent/home-assistant | homeassistant/components/climate/device_condition.py | 9 | 3833 | """Provide the device automations for Climate."""
from typing import Dict, List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import DOMAIN, const
CONDITION_TYPES = {"is_hvac_mode", "is_preset_mode"}
HVAC_MODE_CONDITION = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): "is_hvac_mode",
vol.Required(const.ATTR_HVAC_MODE): vol.In(const.HVAC_MODES),
}
)
PRESET_MODE_CONDITION = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): "is_preset_mode",
vol.Required(const.ATTR_PRESET_MODE): str,
}
)
CONDITION_SCHEMA = vol.Any(HVAC_MODE_CONDITION, PRESET_MODE_CONDITION)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions for Climate devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_hvac_mode",
}
)
if (
state
and state.attributes[ATTR_SUPPORTED_FEATURES] & const.SUPPORT_PRESET_MODE
):
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_preset_mode",
}
)
return conditions
@callback
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == "is_hvac_mode":
attribute = const.ATTR_HVAC_MODE
else:
attribute = const.ATTR_PRESET_MODE
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
state = hass.states.get(config[ATTR_ENTITY_ID])
return state and state.attributes.get(attribute) == config[attribute]
return test_is_state
async def async_get_condition_capabilities(hass, config):
"""List condition capabilities."""
state = hass.states.get(config[CONF_ENTITY_ID])
condition_type = config[CONF_TYPE]
fields = {}
if condition_type == "is_hvac_mode":
hvac_modes = state.attributes[const.ATTR_HVAC_MODES] if state else []
fields[vol.Required(const.ATTR_HVAC_MODE)] = vol.In(hvac_modes)
elif condition_type == "is_preset_mode":
if state:
preset_modes = state.attributes.get(const.ATTR_PRESET_MODES, [])
else:
preset_modes = []
fields[vol.Required(const.ATTR_PRESET_MODES)] = vol.In(preset_modes)
return {"extra_fields": vol.Schema(fields)}
| apache-2.0 |
AlexMooney/python-pptx | pptx/parts/presentation.py | 4 | 6168 | # encoding: utf-8
"""
Presentation part, the main part in a .pptx package.
"""
from __future__ import absolute_import
from warnings import warn
from ..opc.constants import RELATIONSHIP_TYPE as RT
from ..opc.package import XmlPart
from ..opc.packuri import PackURI
from .slide import Slide
from ..util import lazyproperty
class PresentationPart(XmlPart):
"""
Top level class in object model, represents the contents of the /ppt
directory of a .pptx file.
"""
@property
def sldMasterIdLst(self):
"""
The ``<p:sldMasterIdLst>`` child element specifying the slide masters
of this presentation in the XML.
"""
return self._element.get_or_add_sldMasterIdLst()
@lazyproperty
def slide_masters(self):
"""
Sequence of |SlideMaster| objects belonging to this presentation
"""
return _SlideMasters(self)
@property
def slidemasters(self):
"""
Deprecated. Use ``.slide_masters`` property instead.
"""
msg = (
'Presentation.slidemasters property is deprecated. Use .slide_ma'
'sters instead.'
)
warn(msg, UserWarning, stacklevel=2)
return self.slide_masters
@property
def slide_height(self):
"""
Height of slides in this presentation, in English Metric Units (EMU)
"""
sldSz = self._element.sldSz
return sldSz.cy
@slide_height.setter
def slide_height(self, height):
sldSz = self._element.sldSz
sldSz.cy = height
@property
def slide_width(self):
"""
Width of slides in this presentation, in English Metric Units (EMU)
"""
sldSz = self._element.sldSz
return sldSz.cx
@slide_width.setter
def slide_width(self, width):
sldSz = self._element.sldSz
sldSz.cx = width
@lazyproperty
def slides(self):
"""
|_Slides| object containing the slides in this presentation.
"""
sldIdLst = self._element.get_or_add_sldIdLst()
slides = _Slides(sldIdLst, self)
slides.rename_slides() # start from known state
return slides
class _Slides(object):
"""
Sequence of slides belonging to an instance of |Presentation|, having list
semantics for access to individual slides. Supports indexed access,
len(), and iteration.
"""
def __init__(self, sldIdLst, prs):
super(_Slides, self).__init__()
self._sldIdLst = sldIdLst
self._prs = prs
def __getitem__(self, idx):
"""
Provide indexed access, (e.g. 'slides[0]').
"""
if idx >= len(self._sldIdLst):
raise IndexError('slide index out of range')
rId = self._sldIdLst[idx].rId
return self._prs.related_parts[rId]
def __iter__(self):
"""
Support iteration (e.g. 'for slide in slides:').
"""
for sldId in self._sldIdLst:
rId = sldId.rId
yield self._prs.related_parts[rId]
def __len__(self):
"""
Support len() built-in function (e.g. 'len(slides) == 4').
"""
return len(self._sldIdLst)
def add_slide(self, slidelayout):
"""
Return a newly added slide that inherits layout from *slidelayout*.
"""
partname = self._next_partname
package = self._prs.package
slide = Slide.new(slidelayout, partname, package)
rId = self._prs.relate_to(slide, RT.SLIDE)
self._sldIdLst.add_sldId(rId)
return slide
def rename_slides(self):
"""
Assign partnames like ``/ppt/slides/slide9.xml`` to all slides in the
collection. The name portion is always ``slide``. The number part
forms a continuous sequence starting at 1 (e.g. 1, 2, 3, ...). The
extension is always ``.xml``.
"""
for idx, slide in enumerate(self):
partname_str = '/ppt/slides/slide%d.xml' % (idx+1)
slide.partname = PackURI(partname_str)
@property
def _next_partname(self):
"""
Return |PackURI| instance containing the partname for a slide to be
appended to this slide collection, e.g. ``/ppt/slides/slide9.xml``
for a slide collection containing 8 slides.
"""
partname_str = '/ppt/slides/slide%d.xml' % (len(self)+1)
return PackURI(partname_str)
class _SlideMasters(object):
"""
Collection of |SlideMaster| instances belonging to a presentation. Has
list access semantics, supporting indexed access, len(), and iteration.
"""
def __init__(self, presentation):
super(_SlideMasters, self).__init__()
self._presentation = presentation
def __getitem__(self, idx):
"""
Provide indexed access, (e.g. ``slide_masters[2]``).
"""
sldMasterId_lst = self._sldMasterIdLst.sldMasterId_lst
if idx >= len(sldMasterId_lst):
raise IndexError('slide master index out of range')
rId = sldMasterId_lst[idx].rId
return self._presentation.related_parts[rId]
def __iter__(self):
"""
Generate a reference to each of the |SlideMaster| instances in the
collection, in sequence.
"""
for rId in self._iter_rIds():
yield self._presentation.related_parts[rId]
def __len__(self):
"""
Support len() built-in function (e.g. 'len(slide_masters) == 4').
"""
return len(self._sldMasterIdLst)
def _iter_rIds(self):
"""
Generate the rId for each slide master in the collection, in
sequence.
"""
sldMasterId_lst = self._sldMasterIdLst.sldMasterId_lst
for sldMasterId in sldMasterId_lst:
yield sldMasterId.rId
@property
def _sldMasterIdLst(self):
"""
The ``<p:sldMasterIdLst>`` element specifying the slide masters in
this collection. This element is a child of the ``<p:presentation>``
element, the root element of a presentation part.
"""
return self._presentation.sldMasterIdLst
| mit |
bulax41/Commands | scripts/mcast_listen.py | 1 | 2340 | #!/bin/python
import socket
import struct
import sys
import signal
import time
import datetime
import argparse
import threading
class McastSocket(socket.socket):
def __init__(self, local_port='', reuse=False):
socket.socket.__init__(self, socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if(reuse):
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
self.setsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF,8388608)
self.bind(('', local_port))
def mcast_add(self, addr, iface):
self.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(addr) + socket.inet_aton(iface))
def signal_handler(signal, frame):
global estop
estop.set()
sys.exit(0)
def join_group(group,args,event):
global count
(mcast_group,mcast_port) = group.split(":")
sock = McastSocket(local_port=int(mcast_port),reuse=1)
sock.mcast_add(mcast_group, args.interface)
stime= datetime.datetime.now()
print "Joining %s:%s at %s" % (mcast_group,mcast_port,stime.strftime("%b %d %Y %X.%f"))
while not event.isSet():
msg,source = sock.recvfrom(1500)
count[group] += 1
print "Exiting Group %s... %s" % (group,datetime.datetime.now().strftime("%b %d %Y %X.%f"))
def main():
parser = argparse.ArgumentParser(description='Subscribe and decode multicast for CME or LMAX')
parser.add_argument('-g', '--group',action="append",required=True,help="Group to join in IP:Port format, may be used more than once")
parser.add_argument('-i','--interface',required=True,help="IP address of the Interface to join on")
parser.add_argument('-q','--quiet',action="count",help="Do not print packet count")
args = parser.parse_args()
global estop, count
count = {}
signal.signal(signal.SIGINT, signal_handler)
estop = threading.Event()
threads = []
for group in args.group:
count[group] = 0
t = threading.Thread(target=join_group, args=(group,args,estop))
threads.append(t)
t.start()
while True:
time.sleep(1)
for c,v in count.items():
print "%s: %s" % (c,v),
print "\r",
if __name__ == '__main__':
main()
| gpl-3.0 |
asanka-code/RIOT | tests/lwip/tests/01-run.py | 24 | 9890 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Martine Lenders <mail@martine-lenders.eu>
#
# Distributed under terms of the MIT license.
from __future__ import print_function
import argparse
import os, sys
import random
import pexpect
import subprocess
import time
import types
DEFAULT_TIMEOUT = 5
class Strategy(object):
def __init__(self, func=None):
if func != None:
if sys.version_info < (3,):
self.__class__.execute = types.MethodType(func, self, self.__class__)
else:
self.__class__.execute = types.MethodType(func, self)
def execute(self, *args, **kwargs):
raise NotImplementedError()
class ApplicationStrategy(Strategy):
def __init__(self, app_dir=os.getcwd(), func=None):
super(ApplicationStrategy, self).__init__(func)
self.app_dir = app_dir
class BoardStrategy(Strategy):
def __init__(self, board, func=None):
super(BoardStrategy, self).__init__(func)
self.board = board
def __run_make(self, application, make_targets, env=None):
env = os.environ.copy()
if env != None:
env.update(env)
env.update(self.board.to_env())
cmd = ("make", "-C", application) + make_targets
print(' '.join(cmd))
print(subprocess.check_output(cmd, env=env))
def execute(self, application):
super(BoardStrategy, self).execute(application)
class CleanStrategy(BoardStrategy):
def execute(self, application, env=None):
super(CleanStrategy, self).__run_make(application, ("-B", "clean"), env)
class BuildStrategy(BoardStrategy):
def execute(self, application, env=None):
super(BuildStrategy, self).__run_make(application, ("all",), env)
class FlashStrategy(BoardStrategy):
def execute(self, application, env=None):
super(FlashStrategy, self).__run_make(application, ("all",), env)
class ResetStrategy(BoardStrategy):
def execute(self, application, env=None):
super(ResetStrategy, self).__run_make(application, ("reset",), env)
class Board(object):
def __init__(self, name, port=None, serial=None, clean=None,
build=None, flash=None,
reset=None, term=None):
def _reset_native_execute(obj, application, env=None, *args, **kwargs):
pass
if (name == "native") and (reset == None):
reset = _reset_native_execute
self.name = name
self.port = port
self.serial = serial
self.clean_strategy = CleanStrategy(self, clean)
self.build_strategy = BuildStrategy(self, build)
self.flash_strategy = FlashStrategy(self, flash)
self.reset_strategy = ResetStrategy(self, reset)
def __len__(self):
return 1
def __iter__(self):
return self
def next(self):
raise StopIteration()
def __repr__(self):
return ("<Board %s,port=%s,serial=%s>" %
(repr(self.name), repr(self.port), repr(self.serial)))
def to_env(self):
env = {}
if self.name:
env['BOARD'] = self.name
if self.port:
env['PORT'] = self.port
if self.serial:
env['SERIAL'] = self.serial
return env
def clean(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def build(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def flash(self, application=os.getcwd(), env=None):
self.flash_strategy.execute(application, env)
def reset(self, application=os.getcwd(), env=None):
self.reset_strategy.execute(application, env)
class BoardGroup(object):
def __init__(self, boards):
self.boards = boards
def __len__(self):
return len(self.boards)
def __iter__(self):
return iter(self.boards)
def __repr__(self):
return str(self.boards)
def clean(self, application=os.getcwd(), env=None):
for board in self.boards:
board.clean(application, env)
def build(self, application=os.getcwd(), env=None):
for board in self.boards:
board.build(application, env)
def flash(self, application=os.getcwd(), env=None):
for board in self.boards:
board.flash(application, env)
def reset(self, application=os.getcwd(), env=None):
for board in self.boards:
board.reset(application, env)
def default_test_case(board_group, application, env=None):
for board in board_group:
env = os.environ.copy()
if env != None:
env.update(env)
env.update(board.to_env())
with pexpect.spawn("make", ["-C", application, "term"], env=env,
timeout=DEFAULT_TIMEOUT,
logfile=sys.stdout) as spawn:
spawn.expect("TEST: SUCCESS")
class TestStrategy(ApplicationStrategy):
def execute(self, board_groups, test_cases=[default_test_case],
timeout=DEFAULT_TIMEOUT, env=None):
for board_group in board_groups:
print("Testing for %s: " % board_group)
for test_case in test_cases:
board_group.reset()
test_case(board_group, self.app_dir, env=None)
sys.stdout.write('.')
sys.stdout.flush()
print()
def get_ipv6_address(spawn):
spawn.sendline(u"ifconfig")
spawn.expect(u"[A-Za-z0-9]{2}[0-9]+: inet6 (fe80::[0-9a-f:]+)")
return spawn.match.group(1)
def test_ipv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawn("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawn("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"ip send %s %d 01:23:45:67:89:ab:cd:ef" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 8 byte to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"000000 60 00 00 00 00 08 %s ff fe 80 00 00 00 00 00 00" % hex(ipprot)[2:])
receiver.expect(u"000010( [0-9a-f]{2}){8} fe 80 00 00 00 00 00 00")
receiver.expect(u"000020( [0-9a-f]{2}){8} 01 23 45 67 89 ab cd ef")
def test_udpv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawn("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawn("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d ab:cd:ef" % (receiver_ip, port))
sender.expect_exact(u"Success: send 3 byte to [%s]:%d" %
(receiver_ip, port))
receiver.expect(u"000000 ab cd ef")
def test_dual_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawn("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawn("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d 01:23" % (receiver_ip, port))
sender.expect_exact(u"Success: send 2 byte to [%s]:%d" %
(receiver_ip, port))
receiver.expect(u"000000 01 23")
sender.sendline(u"ip send %s %d 01:02:03:04" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 4 byte to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"000000 60 00 00 00 00 04 %s ff fe 80 00 00 00 00 00 00" % hex(ipprot)[2:])
receiver.expect(u"000010( [0-9a-f]{2}){8} fe 80 00 00 00 00 00 00")
receiver.expect(u"000020( [0-9a-f]{2}){8} 01 02 03 04")
if __name__ == "__main__":
del os.environ['TERMFLAGS']
TestStrategy().execute([BoardGroup((Board("native", "tap0"), \
Board("native", "tap1")))], \
[test_ipv6_send, test_udpv6_send, test_dual_send])
| lgpl-2.1 |
PatrikValkovic/grammpy | tests/transformations_test/RemoveEpsilonRules/Reverse/SimpleTest.py | 1 | 1825 | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 16:01
:Licence MIT
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
from grammpy.parsers import cyk
from grammpy.transforms import ContextFree, InverseContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class RuleS0B(Rule): rule = ([S], [0, B])
class RuleA1B(Rule): rule = ([A], [1, B])
class RuleAEps(Rule): rule = ([A], [EPS])
class RuleBEps(Rule): rule = ([B], [EPS])
class RuleB1C(Rule): rule = ([B], [2, C])
class RuleC11(Rule): rule = ([C], [3, 3])
"""
S->1B A->1B A->eps B->eps B->1C C->11
ToEpsilon: A,B
S->1B A->1B A->eps B->eps B->1C C->11 S->1 A->1
------ ------ ++++ ++++
"""
class SimpleTest(TestCase):
def test_simpleTest(self):
g = Grammar(terminals=[0, 1, 2, 3],
nonterminals=[S, A, B, C],
rules=[RuleS0B, RuleA1B, RuleAEps, RuleBEps, RuleB1C, RuleC11],
start_symbol=S)
gr = ContextFree.transform_to_chomsky_normal_form(ContextFree.remove_unit_rules(ContextFree.remove_rules_with_epsilon(g)))
pars = cyk(gr, [0])
s = InverseContextFree.epsilon_rules_restore(InverseContextFree.unit_rules_restore(InverseContextFree.transform_from_chomsky_normal_form(pars)))
self.assertIsInstance(s, S)
self.assertIsInstance(s.to_rule, RuleS0B)
self.assertIsInstance(s.to_rule.to_symbols[0], Terminal)
self.assertEqual(s.to_rule.to_symbols[0].s, 0)
b = s.to_rule.to_symbols[1]
self.assertIsInstance(b, B)
self.assertIsInstance(b.to_rule, RuleBEps)
self.assertIs(b.to_rule.to_symbols[0].s, EPS)
if __name__ == '__main__':
main()
| mit |
McIntyre-Lab/papers | newman_events_2017/python_workflow/programs/build_intron2border_junction_index.py | 1 | 5945 | #!/usr/bin/env python3
#######################################################################################################################
#
# DATE: 2017-12-15
# NAME: build_Event2Transcript_index.py
# AUTHOR: Jeremy R. B. Newman (jrbnewman@ufl.edu)
#
# DESCRIPTION: This script creates an intron-to-border junction index file used by Event Analysis to report
# the read coverage of introns, their associated border junctions and flanking exonic regions (fusions), to aid
# the user in deciding whether there is evidence on intron retention, alternative/novel splice usage, etc.
# It takes the annotation CSVs for junctions, exonic regions and introns to assemble a complete intron/border index,
# where each border junction and intron are assigned to a single intron event, flanked by its neighboring
# exonic regions. Where the exonic regions of intron events can be assigned to multiple genes, then the output of this
# intron event is suppressed, to avoid instances of overlapping intron events.
#
# REQUIRED PACKAGES: pandas (tested with v0.19.2)
# argparse (tested with v1.1)
# logging (tested with v0.5.1.2)
#
#######################################################################################################################
# Import required packages
import pandas as pd
import logging
import argparse
import sqlite3
def getOptions():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Generate an intron-to-border-junction index file for"
"interpreting read coverage of intronic regions")
parser.add_argument('--intron-annotation-file', dest="inIntrons", required=True, help="Input intron annotation CSV")
parser.add_argument("--junction-annotation-file", dest="inJunctions", required=True,
help="Input junction annotation CSV")
parser.add_argument("--output-intron-index", dest="outCSV", required=True,
help="Output event index CSV")
args = parser.parse_args()
return args
def main():
# Connect to SQL database
con = sqlite3.connect(":memory:")
cur = con.cursor()
# Import intron and junction annotations
logger.info("Importing intron and junction annotations")
intronDF = pd.read_csv(args.inIntrons, usecols=('intron_id','chr','intron_start','intron_stop','gene_id',
'exonic_region_id_5prime','exonic_region_id_3prime'))
juncDF = pd.read_csv(args.inJunctions, usecols=('junction_id','chr','donor_stop','acceptor_start','transcript_id',
'gene_id','flag_border_junction'))
# Convert to SQL tables
intronDF.to_sql("intronInfo", con, if_exists="replace")
juncDF.to_sql("juncInfo", con, if_exists="replace")
# So border junctions and introns can be merged, donor_stop and acceptor start need to renamed to intron_start
# and intron_stop respectively. When the "donor exon" is an intron, donor_stop = intron_stop
# When the "acceptor exon" is an intron, acceptor_start = intron_start
# I am going to first map 5' border junctions to the 5' end of introns, then 3'
# border junctions for the 3' end of the introns.
# First, I want to expand concatenated gene IDs. Junctions with multiple gene ID shouldn't be retained in the
# final output, but iterate over these for completeness
cur.execute("""Select junction_id, chr , donor_stop , acceptor_start , gene_id from juncInfo WHERE flag_border_junction = 1""")
allBorders = cur.fetchall()
cur.execute("""CREATE TABLE IF NOT EXISTS borderInfo
(junction_id TEXT, chr TEXT, donor_stop INT, acceptor_start INT, gene_id TEXT)""")
for border in allBorders:
genes = border[4].split("|")
for gn in genes:
cur.execute("INSERT INTO borderInfo VALUES(:junction_id, :chr, :donor_stop, :acceptor_start, :gene_id)",
{"junction_id": border[0], "chr": border[1], "donor_stop": border[2],
"acceptor_start": border[3], "gene_id":gn})
# Merge INNER with intron table on chromosome, gene, and acceptor_start (as intron_start)
cur.execute("CREATE TABLE intronWstart AS SELECT in1.intron_id, in1.chr, in1.intron_start, in1.intron_stop, "
"in1.gene_id, in1.exonic_region_id_5prime, in2.junction_id AS border_junction_id_5prime "
"FROM intronInfo in1 INNER JOIN borderInfo in2 "
"ON in1.chr = in2.chr AND in1.gene_id = in2.gene_id AND in1.intron_start = in2.acceptor_start ;")
# Merge INNER with intron table on chromosome, gene, and donor_stop (as intron_stop)
cur.execute("CREATE TABLE intronWstop AS SELECT in1.intron_id, in1.chr, in1.gene_id, "
"in1.exonic_region_id_3prime, in2.junction_id AS border_junction_id_3prime "
"FROM intronInfo in1 INNER JOIN borderInfo in2 "
"ON in1.chr = in2.chr AND in1.gene_id = in2.gene_id AND in1.intron_stop = in2.donor_stop ;")
cur.execute("CREATE TABLE intronBorderIndex AS SELECT in1.*, in2.exonic_region_id_3prime,"
"in2.border_junction_id_3prime FROM intronWstart in1 "
"INNER JOIN intronWstop in2 ON in1.gene_id = in2.gene_id AND in1.intron_id = in2.intron_id ;")
intronBorderIndexDF = pd.read_sql("SELECT * FROM intronBorderIndex ORDER BY chr, intron_start, intron_stop ;", con)
# Write output index
with open(args.outCSV, 'w') as outIndex:
intronBorderIndexDF.to_csv(outIndex, encoding='utf-8', index=False)
if __name__ == '__main__':
# Parse command line arguments
global args
args = getOptions()
# Setting up logger
logger = logging.getLogger()
logger.info('Starting script')
# Calling main script
main()
logger.info('Script complete: index created!') | lgpl-3.0 |
jguyomard/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/generator/gypd.py | 151 | 3320 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| bsd-3-clause |
tangfeixiong/nova | nova/db/base.py | 64 | 1342 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for classes that need modular database access."""
from oslo_config import cfg
from oslo_utils import importutils
db_driver_opt = cfg.StrOpt('db_driver',
default='nova.db',
help='The driver to use for database access')
CONF = cfg.CONF
CONF.register_opt(db_driver_opt)
class Base(object):
"""DB driver is injected in the init method."""
def __init__(self, db_driver=None):
super(Base, self).__init__()
if not db_driver:
db_driver = CONF.db_driver
self.db = importutils.import_module(db_driver)
| apache-2.0 |
amarant/servo | tests/wpt/css-tests/tools/webdriver/webdriver/command.py | 258 | 3985 | """Dispatches requests to remote WebDriver endpoint."""
import exceptions
import httplib
import json
import urlparse
import webelement
class CommandExecutor(object):
"""Dispatches requests to remote WebDriver endpoint."""
_HEADERS = {
"User-Agent": "Python WebDriver Local End",
"Content-Type": "application/json;charset=\"UTF-8\"",
"Accept": "application/json",
"Accept-Charset": "utf-8",
"Accept-Encoding": "identity",
"Connection": "close",
}
def __init__(self, url, mode='strict'):
self._parsed_url = urlparse.urlparse(url)
self._conn = httplib.HTTPConnection(self._parsed_url.hostname,
self._parsed_url.port)
self._mode = mode
def execute(self,
method,
path,
session_id,
name,
parameters=None,
object_hook=None):
"""Execute a command against the WebDriver endpoint.
Arguments:
method -- one of GET, POST, DELETE
path -- the path of the url endpoint (needs to include
session/<sessionId> if needed)
session_id -- the sessionId to include in the JSON body
name -- name of the command that is being executed to include in
the JSON body
parameters -- the JSON body to send with the command. Only used if
method is POST
object_hook -- function used by json.loads to properly deserialize
objects in the request
"""
if self._mode == 'strict':
return self._execute_strict(
method, path, session_id, name, parameters, object_hook)
elif self._mode == 'compatibility':
return self._execute_compatibility(
method, path, session_id, name, parameters, object_hook)
else:
raise Exception("Unknown mode: " + self._mode)
def _execute_compatibility(self,
method,
path,
session_id,
name,
parameters,
object_hook):
body = {'sessionId': session_id, 'name': name }
if parameters:
body.update(parameters)
self._conn.request(
method,
self._parsed_url.path + path,
json.dumps(body, default = self._json_encode).encode('utf-8'),
self._HEADERS)
resp = self._conn.getresponse()
data = resp.read().decode('utf-8')
if data:
data = json.loads(data, object_hook = object_hook)
if data['status'] != 0:
raise exceptions.create_webdriver_exception_compatibility(
data['status'], data['value']['message'])
return data
if resp.status < 200 or resp.status > 299:
raise exceptions.create_webdriver_exception_compatibility(
resp.status, resp.reason)
def _execute_strict(self,
method,
path,
session_id,
name,
parameters,
object_hook):
body = {
'sessionId': session_id,
'name': name,
'parameters': parameters }
self._conn.request(
method,
self._parsed_url.path + path,
json.dumps(body, default = self._json_encode).encode('utf-8'),
self._HEADERS)
resp = self._conn.getresponse()
data = json.loads(
resp.read().decode('utf-8'), object_hook = object_hook)
if data['status'] != 'success':
raise exceptions.create_webdriver_exception_strict(
data['status'], data['value'])
return data
def _json_encode(self, obj):
return obj.to_json()
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.