repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/django/utils/cache.py | 105 | 11097 | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
from __future__ import unicode_literals
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import get_cache
from django.utils.encoding import iri_to_uri, force_bytes, force_text
from django.utils.http import http_date
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def _set_response_etag(response):
if not response.streaming:
response['ETag'] = '"%s"' % hashlib.md5(response.content).hexdigest()
return response
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(_set_response_etag)
else:
response = _set_response_etag(response)
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, adds the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
# The datetime module doesn't restrict the output of tzname().
# Windows is known to use non-standard, locale-dependent names.
# User-defined tzinfo classes may return absolutely anything.
# Hence this paranoid conversion to create a valid cache key.
tz_name = force_text(get_current_timezone_name(), errors='ignore')
cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(force_bytes(value))
path = hashlib.md5(force_bytes(iri_to_uri(request.get_full_path())))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = hashlib.md5(force_bytes(iri_to_uri(request.get_full_path())))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header == 'ACCEPT_LANGUAGE' and is_accept_language_redundant:
continue
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| gpl-3.0 |
appsembler/edx-platform | openedx/core/lib/cache_utils.py | 20 | 3273 | """
Utilities related to caching.
"""
import collections
import cPickle as pickle
import functools
import zlib
from xblock.core import XBlock
def memoize_in_request_cache(request_cache_attr_name=None):
"""
Memoize a method call's results in the request_cache if there's one. Creates the cache key by
joining the unicode of all the args with &; so, if your arg may use the default &, it may
have false hits.
Arguments:
request_cache_attr_name - The name of the field or property in this method's containing
class that stores the request_cache.
"""
def _decorator(func):
"""Outer method decorator."""
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
"""
Wraps a method to memoize results.
"""
request_cache = getattr(self, request_cache_attr_name, None)
if request_cache:
cache_key = '&'.join([hashvalue(arg) for arg in args])
if cache_key in request_cache.data.setdefault(func.__name__, {}):
return request_cache.data[func.__name__][cache_key]
result = func(self, *args, **kwargs)
request_cache.data[func.__name__][cache_key] = result
return result
else:
return func(self, *args, **kwargs)
return _wrapper
return _decorator
class memoized(object): # pylint: disable=invalid-name
"""
Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
WARNING: Only use this memoized decorator for caching data that
is constant throughout the lifetime of a gunicorn worker process,
is costly to compute, and is required often. Otherwise, it can lead to
unwanted memory leakage.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
"""
Return the function's docstring.
"""
return self.func.__doc__
def __get__(self, obj, objtype):
"""
Support instance methods.
"""
return functools.partial(self.__call__, obj)
def hashvalue(arg):
"""
If arg is an xblock, use its location. otherwise just turn it into a string
"""
if isinstance(arg, XBlock):
return unicode(arg.location)
else:
return unicode(arg)
def zpickle(data):
"""Given any data structure, returns a zlib compressed pickled serialization."""
return zlib.compress(pickle.dumps(data, pickle.HIGHEST_PROTOCOL))
def zunpickle(zdata):
"""Given a zlib compressed pickled serialization, returns the deserialized data."""
return pickle.loads(zlib.decompress(zdata))
| agpl-3.0 |
nickmuir/ZenPacks.community.PanduitMonitoring | ZenPacks/community/PanduitMonitoring/modeler/plugins/community/snmp/PanduitTemperatureSensorMap.py | 1 | 1330 | """PanduitTemperatureSensorMap
Gathers Temperature Sensors from Panduit Environmental Monitoring Devices
"""
from Products.DataCollector.plugins.CollectorPlugin import (SnmpPlugin, GetTableMap)
class PanduitTemperatureSensorMap(SnmpPlugin):
relname = 'panduitTemperatureSensors'
modname = 'ZenPacks.community.PanduitMonitoring.PanduitTemperatureSensor'
snmpGetTableMaps = (
GetTableMap(
'ipTHAEntry', '.1.3.6.1.4.1.3711.24.1.1.1.2.2.1', {
'.3':'ipTHAName',
'.1':'ipTHAChan',
'.6':'ipTHAType',
}
),
)
def process(self, device, results, log):
""" Process results and return a relationship map"""
log.info('processing %s for device %s', self.name(), device.id)
sensorinfo = results[1].get('ipTHAEntry', {})
relmap = self.relMap()
for snmpindex, row in sensorinfo.items():
# do check for no data?
name = row.get('ipTHAName')
if row.get('ipTHAType') == 2:
relmap.append(self.objectMap({
'id': self.prepId(name),
'title': name,
'snmpindex': snmpindex.strip('.'),
'channel': row.get('ipTHAChan'),
}))
return relmap
| lgpl-3.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model_test.py | 17 | 31749 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state space model infrastructure."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import saved_model_utils
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.contrib.timeseries.python.timeseries import test_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
class RandomStateSpaceModel(state_space_model.StateSpaceModel):
def __init__(self,
state_dimension,
state_noise_dimension,
configuration=state_space_model.StateSpaceModelConfiguration()):
self.transition = numpy.random.normal(
size=[state_dimension, state_dimension]).astype(
configuration.dtype.as_numpy_dtype)
self.noise_transform = numpy.random.normal(
size=(state_dimension, state_noise_dimension)).astype(
configuration.dtype.as_numpy_dtype)
# Test batch broadcasting
self.observation_model = numpy.random.normal(
size=(configuration.num_features, state_dimension)).astype(
configuration.dtype.as_numpy_dtype)
super(RandomStateSpaceModel, self).__init__(
configuration=configuration._replace(
covariance_prior_fn=lambda _: 0.))
def get_state_transition(self):
return self.transition
def get_noise_transform(self):
return self.noise_transform
def get_observation_model(self, times):
return self.observation_model
class ConstructionTests(test.TestCase):
def test_initialize_graph_error(self):
with self.assertRaisesRegexp(ValueError, "initialize_graph"):
model = RandomStateSpaceModel(2, 2)
outputs = model.define_loss(
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run([initializer])
outputs.loss.eval()
def test_initialize_graph_state_manager_error(self):
with self.assertRaisesRegexp(ValueError, "initialize_graph"):
model = RandomStateSpaceModel(2, 2)
state_manager = state_management.ChainingStateManager()
outputs = state_manager.define_loss(
model=model,
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run([initializer])
outputs.loss.eval()
class GapTests(test.TestCase):
def _gap_test_template(self, times, values):
random_model = RandomStateSpaceModel(
state_dimension=1, state_noise_dimension=1,
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=1))
random_model.initialize_graph()
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}))
features, _ = input_fn()
times = features[feature_keys.TrainEvalFeatures.TIMES]
values = features[feature_keys.TrainEvalFeatures.VALUES]
model_outputs = random_model.get_batch_loss(
features={
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
},
mode=None,
state=math_utils.replicate_state(
start_state=random_model.get_start_state(),
batch_size=array_ops.shape(times)[0]))
with self.test_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
def test_start_gap(self):
self._gap_test_template(times=[20, 21, 22], values=numpy.arange(3))
def test_mid_gap(self):
self._gap_test_template(times=[2, 60, 61], values=numpy.arange(3))
def test_end_gap(self):
self._gap_test_template(times=[2, 3, 73], values=numpy.arange(3))
def test_all_gaps(self):
self._gap_test_template(times=[2, 4, 8, 16, 32, 64, 128],
values=numpy.arange(7))
class StateSpaceEquivalenceTests(test.TestCase):
def test_savedmodel_state_override(self):
random_model = RandomStateSpaceModel(
state_dimension=5,
state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
exogenous_feature_columns=[layers.real_valued_column("exogenous")],
dtype=dtypes.float64, num_features=1))
estimator = estimators.StateSpaceRegressor(
model=random_model,
optimizer=gradient_descent.GradientDescentOptimizer(0.1))
combined_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
"exogenous": [-1., -2., -3., -4.]
}))
estimator.train(combined_input_fn, steps=1)
export_location = estimator.export_savedmodel(
self.get_temp_dir(),
estimator.build_raw_serving_input_receiver_fn())
with ops.Graph().as_default() as graph:
random_model.initialize_graph()
with self.test_session(graph=graph) as session:
variables.global_variables_initializer().run()
evaled_start_state = session.run(random_model.get_start_state())
evaled_start_state = [
state_element[None, ...] for state_element in evaled_start_state]
with ops.Graph().as_default() as graph:
with self.test_session(graph=graph) as session:
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
first_split_filtering = saved_model_utils.filter_continuation(
continue_from={
feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [1, 2],
feature_keys.FilteringFeatures.VALUES: [1., 2.],
"exogenous": [[-1.], [-2.]]})
second_split_filtering = saved_model_utils.filter_continuation(
continue_from=first_split_filtering,
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [3, 4],
feature_keys.FilteringFeatures.VALUES: [3., 4.],
"exogenous": [[-3.], [-4.]]
})
combined_filtering = saved_model_utils.filter_continuation(
continue_from={
feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
signatures=signatures,
session=session,
features={
feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
"exogenous": [[-1.], [-2.], [-3.], [-4.]]
})
split_predict = saved_model_utils.predict_continuation(
continue_from=second_split_filtering,
signatures=signatures,
session=session,
steps=1,
exogenous_features={
"exogenous": [[[-5.]]]})
combined_predict = saved_model_utils.predict_continuation(
continue_from=combined_filtering,
signatures=signatures,
session=session,
steps=1,
exogenous_features={
"exogenous": [[[-5.]]]})
for state_key, combined_state_value in combined_filtering.items():
if state_key == feature_keys.FilteringResults.TIMES:
continue
self.assertAllClose(
combined_state_value, second_split_filtering[state_key])
for prediction_key, combined_value in combined_predict.items():
self.assertAllClose(combined_value, split_predict[prediction_key])
def _equivalent_to_single_model_test_template(self, model_generator):
with self.test_session() as session:
random_model = RandomStateSpaceModel(
state_dimension=5,
state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtypes.float64, num_features=1))
random_model.initialize_graph()
series_length = 10
model_data = random_model.generate(
number_of_series=1, series_length=series_length,
model_parameters=random_model.random_model_parameters())
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(model_data))
features, _ = input_fn()
model_outputs = random_model.get_batch_loss(
features=features,
mode=None,
state=math_utils.replicate_state(
start_state=random_model.get_start_state(),
batch_size=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0]))
variables.global_variables_initializer().run()
compare_outputs_evaled_fn = model_generator(
random_model, model_data)
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
compare_outputs_evaled = compare_outputs_evaled_fn(session)
model_outputs_evaled = session.run(
(model_outputs.end_state, model_outputs.predictions))
coordinator.request_stop()
coordinator.join()
model_posteriors, model_predictions = model_outputs_evaled
(_, compare_posteriors,
compare_predictions) = compare_outputs_evaled
(model_posterior_mean, model_posterior_var,
model_from_time) = model_posteriors
(compare_posterior_mean, compare_posterior_var,
compare_from_time) = compare_posteriors
self.assertAllClose(model_posterior_mean, compare_posterior_mean[0])
self.assertAllClose(model_posterior_var, compare_posterior_var[0])
self.assertAllClose(model_from_time, compare_from_time)
self.assertEqual(sorted(model_predictions.keys()),
sorted(compare_predictions.keys()))
for prediction_name in model_predictions:
if prediction_name == "loss":
# Chunking means that losses will be different; skip testing them.
continue
# Compare the last chunk to their corresponding un-chunked model
# predictions
last_prediction_chunk = compare_predictions[prediction_name][-1]
comparison_values = last_prediction_chunk.shape[0]
model_prediction = (
model_predictions[prediction_name][0, -comparison_values:])
self.assertAllClose(model_prediction,
last_prediction_chunk)
def _model_equivalent_to_chained_model_test_template(self, chunk_size):
def chained_model_outputs(original_model, data):
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=chunk_size)
state_manager = state_management.ChainingStateManager(
state_saving_interval=1)
features, _ = input_fn()
state_manager.initialize_graph(original_model)
model_outputs = state_manager.define_loss(
model=original_model,
features=features,
mode=estimator_lib.ModeKeys.TRAIN)
def _eval_outputs(session):
for _ in range(50):
# Warm up saved state
model_outputs.loss.eval()
(posterior_mean, posterior_var,
priors_from_time) = model_outputs.end_state
posteriors = ((posterior_mean,), (posterior_var,), priors_from_time)
outputs = (model_outputs.loss, posteriors,
model_outputs.predictions)
chunked_outputs_evaled = session.run(outputs)
return chunked_outputs_evaled
return _eval_outputs
self._equivalent_to_single_model_test_template(chained_model_outputs)
def test_model_equivalent_to_chained_model_chunk_size_one(self):
numpy.random.seed(2)
random_seed.set_random_seed(3)
self._model_equivalent_to_chained_model_test_template(1)
def test_model_equivalent_to_chained_model_chunk_size_five(self):
numpy.random.seed(4)
random_seed.set_random_seed(5)
self._model_equivalent_to_chained_model_test_template(5)
class PredictionTests(test.TestCase):
def _check_predictions(
self, predicted_mean, predicted_covariance, window_size):
self.assertAllEqual(predicted_covariance.shape,
[1, # batch
window_size,
1, # num features
1]) # num features
self.assertAllEqual(predicted_mean.shape,
[1, # batch
window_size,
1]) # num features
for position in range(window_size - 2):
self.assertGreater(predicted_covariance[0, position + 2, 0, 0],
predicted_covariance[0, position, 0, 0])
def test_predictions_direct(self):
dtype = dtypes.float64
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
random_model.initialize_graph()
prediction_dict = random_model.predict(features={
feature_keys.PredictionFeatures.TIMES: [[1, 3, 5, 6]],
feature_keys.PredictionFeatures.STATE_TUPLE:
math_utils.replicate_state(
start_state=random_model.get_start_state(), batch_size=1)
})
with self.test_session():
variables.global_variables_initializer().run()
predicted_mean = prediction_dict["mean"].eval()
predicted_covariance = prediction_dict["covariance"].eval()
self._check_predictions(predicted_mean, predicted_covariance,
window_size=4)
def test_predictions_after_loss(self):
dtype = dtypes.float32
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
features = {
feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3, 4]],
feature_keys.TrainEvalFeatures.VALUES:
array_ops.ones([1, 4, 1], dtype=dtype)
}
passthrough = state_management.PassthroughStateManager()
random_model.initialize_graph()
passthrough.initialize_graph(random_model)
model_outputs = passthrough.define_loss(
model=random_model,
features=features,
mode=estimator_lib.ModeKeys.EVAL)
predictions = random_model.predict({
feature_keys.PredictionFeatures.TIMES: [[5, 7, 8]],
feature_keys.PredictionFeatures.STATE_TUPLE: model_outputs.end_state
})
with self.test_session():
variables.global_variables_initializer().run()
predicted_mean = predictions["mean"].eval()
predicted_covariance = predictions["covariance"].eval()
self._check_predictions(predicted_mean, predicted_covariance,
window_size=3)
class ExogenousTests(test.TestCase):
def test_noise_increasing(self):
for dtype in [dtypes.float32, dtypes.float64]:
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
original_covariance = array_ops.diag(array_ops.ones(shape=[5]))
_, new_covariance, _ = random_model._exogenous_noise_increasing(
current_times=[[1]],
exogenous_values=[[5.]],
state=[
array_ops.ones(shape=[1, 5]), original_covariance[None], [0]
])
with self.test_session() as session:
variables.global_variables_initializer().run()
evaled_new_covariance, evaled_original_covariance = session.run(
[new_covariance[0], original_covariance])
new_variances = numpy.diag(evaled_new_covariance)
original_variances = numpy.diag(evaled_original_covariance)
for i in range(5):
self.assertGreater(new_variances[i], original_variances[i])
def test_noise_decreasing(self):
for dtype in [dtypes.float32, dtypes.float64]:
with variable_scope.variable_scope(dtype.name):
random_model = RandomStateSpaceModel(
state_dimension=5, state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype, num_features=1))
random_model.initialize_graph()
original_covariance = array_ops.diag(
array_ops.ones(shape=[5], dtype=dtype))
_, new_covariance, _ = random_model._exogenous_noise_decreasing(
current_times=[[1]],
exogenous_values=constant_op.constant([[-2.]], dtype=dtype),
state=[
-array_ops.ones(shape=[1, 5], dtype=dtype),
original_covariance[None], [0]
])
with self.test_session() as session:
variables.global_variables_initializer().run()
evaled_new_covariance, evaled_original_covariance = session.run(
[new_covariance[0], original_covariance])
new_variances = numpy.diag(evaled_new_covariance)
original_variances = numpy.diag(evaled_original_covariance)
for i in range(5):
self.assertLess(new_variances[i], original_variances[i])
class StubStateSpaceModel(state_space_model.StateSpaceModel):
def __init__(self,
transition,
state_noise_dimension,
configuration=state_space_model.StateSpaceModelConfiguration()):
self.transition = transition
self.noise_transform = numpy.random.normal(
size=(transition.shape[0], state_noise_dimension)).astype(numpy.float32)
# Test feature + batch broadcasting
self.observation_model = numpy.random.normal(
size=(transition.shape[0])).astype(numpy.float32)
super(StubStateSpaceModel, self).__init__(
configuration=configuration)
def get_state_transition(self):
return self.transition
def get_noise_transform(self):
return self.noise_transform
def get_observation_model(self, times):
return self.observation_model
GeneratedModel = collections.namedtuple(
"GeneratedModel", ["model", "data", "true_parameters"])
class PosteriorTests(test.TestCase):
def _get_cycle_transition(self, period):
cycle_transition = numpy.zeros([period - 1, period - 1],
dtype=numpy.float32)
cycle_transition[0, :] = -1
cycle_transition[1:, :-1] = numpy.identity(period - 2)
return cycle_transition
_adder_transition = numpy.array([[1, 1],
[0, 1]], dtype=numpy.float32)
def _get_single_model(self):
numpy.random.seed(8)
stub_model = StubStateSpaceModel(
transition=self._get_cycle_transition(5), state_noise_dimension=0)
series_length = 1000
stub_model.initialize_graph()
true_params = stub_model.random_model_parameters()
data = stub_model.generate(
number_of_series=1, series_length=series_length,
model_parameters=true_params)
return GeneratedModel(
model=stub_model, data=data, true_parameters=true_params)
def test_exact_posterior_recovery_no_transition_noise(self):
with self.test_session() as session:
stub_model, data, true_params = self._get_single_model()
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(data))
features, _ = input_fn()
model_outputs = stub_model.get_batch_loss(
features=features,
mode=None,
state=math_utils.replicate_state(
start_state=stub_model.get_start_state(),
batch_size=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0]))
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
posterior_mean, posterior_var, posterior_times = session.run(
# Feed the true model parameters so that this test doesn't depend on
# the generated parameters being close to the variable initializations
# (an alternative would be training steps to fit the noise values,
# which would be slow).
model_outputs.end_state, feed_dict=true_params)
coordinator.request_stop()
coordinator.join()
self.assertAllClose(numpy.zeros([1, 4, 4]), posterior_var,
atol=1e-2)
self.assertAllClose(
numpy.dot(
numpy.linalg.matrix_power(
stub_model.transition,
data[feature_keys.TrainEvalFeatures.TIMES].shape[1]),
true_params[stub_model.prior_state_mean]),
posterior_mean[0],
rtol=1e-1)
self.assertAllClose(
math_utils.batch_end_time(
features[feature_keys.TrainEvalFeatures.TIMES]).eval(),
posterior_times)
def test_chained_exact_posterior_recovery_no_transition_noise(self):
with self.test_session() as session:
stub_model, data, true_params = self._get_single_model()
chunk_size = 10
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=chunk_size)
features, _ = input_fn()
state_manager = state_management.ChainingStateManager(
state_saving_interval=1)
state_manager.initialize_graph(stub_model)
model_outputs = state_manager.define_loss(
model=stub_model,
features=features,
mode=estimator_lib.ModeKeys.TRAIN)
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
for _ in range(
data[feature_keys.TrainEvalFeatures.TIMES].shape[1] // chunk_size):
model_outputs.loss.eval()
posterior_mean, posterior_var, posterior_times = session.run(
model_outputs.end_state, feed_dict=true_params)
coordinator.request_stop()
coordinator.join()
self.assertAllClose(numpy.zeros([1, 4, 4]), posterior_var,
atol=1e-2)
self.assertAllClose(
numpy.dot(
numpy.linalg.matrix_power(
stub_model.transition,
data[feature_keys.TrainEvalFeatures.TIMES].shape[1]),
true_params[stub_model.prior_state_mean]),
posterior_mean[0],
rtol=1e-1)
self.assertAllClose(data[feature_keys.TrainEvalFeatures.TIMES][:, -1],
posterior_times)
class TimeDependentStateSpaceModel(state_space_model.StateSpaceModel):
"""A mostly trivial model which predicts values = times + 1."""
def __init__(self, static_unrolling_window_size_threshold=None):
super(TimeDependentStateSpaceModel, self).__init__(
configuration=state_space_model.StateSpaceModelConfiguration(
use_observation_noise=False,
transition_covariance_initial_log_scale_bias=5.,
static_unrolling_window_size_threshold=
static_unrolling_window_size_threshold))
def get_state_transition(self):
return array_ops.ones(shape=[1, 1])
def get_noise_transform(self):
return array_ops.ones(shape=[1, 1])
def get_observation_model(self, times):
return array_ops.reshape(
tensor=math_ops.cast(times + 1, dtypes.float32), shape=[-1, 1, 1])
def make_priors(self):
return (ops.convert_to_tensor([1.]), ops.convert_to_tensor([[0.]]))
class UnknownShapeModel(TimeDependentStateSpaceModel):
def get_observation_model(self, times):
parent_model = super(UnknownShapeModel, self).get_observation_model(times)
return array_ops.placeholder_with_default(
input=parent_model, shape=tensor_shape.unknown_shape())
class TimeDependentTests(test.TestCase):
def _time_dependency_test_template(self, model_type):
"""Test that a time-dependent observation model influences predictions."""
model = model_type()
estimator = estimators.StateSpaceRegressor(
model=model, optimizer=gradient_descent.GradientDescentOptimizer(0.1))
values = numpy.reshape([1., 2., 3., 4.],
newshape=[1, 4, 1])
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.TrainEvalFeatures.TIMES: [[0, 1, 2, 3]],
feature_keys.TrainEvalFeatures.VALUES: values
}))
estimator.train(input_fn=input_fn, max_steps=1)
predicted_values = estimator.evaluate(input_fn=input_fn, steps=1)["mean"]
# Throw out the first value so we don't test the prior
self.assertAllEqual(values[1:], predicted_values[1:])
def test_undefined_shape_time_dependency(self):
self._time_dependency_test_template(UnknownShapeModel)
def test_loop_unrolling(self):
"""Tests running/restoring from a checkpoint with static unrolling."""
model = TimeDependentStateSpaceModel(
# Unroll during training, but not evaluation
static_unrolling_window_size_threshold=2)
estimator = estimators.StateSpaceRegressor(model=model)
times = numpy.arange(100)
values = numpy.arange(100)
dataset = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(dataset), batch_size=16, window_size=2)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(dataset))
estimator.train(input_fn=train_input_fn, max_steps=1)
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class LevelOnlyModel(state_space_model.StateSpaceModel):
def get_state_transition(self):
return linalg_ops.eye(1, dtype=self.dtype)
def get_noise_transform(self):
return linalg_ops.eye(1, dtype=self.dtype)
def get_observation_model(self, times):
return [1]
class MultivariateLevelModel(
state_space_model.StateSpaceCorrelatedFeaturesEnsemble):
def __init__(self, configuration):
univariate_component_configuration = configuration._replace(
num_features=1)
components = []
for feature in range(configuration.num_features):
with variable_scope.variable_scope("feature{}".format(feature)):
components.append(
LevelOnlyModel(configuration=univariate_component_configuration))
super(MultivariateLevelModel, self).__init__(
ensemble_members=components, configuration=configuration)
class MultivariateTests(test.TestCase):
def test_multivariate(self):
dtype = dtypes.float32
num_features = 3
covariance = numpy.eye(num_features)
# A single off-diagonal has a non-zero value in the true transition
# noise covariance.
covariance[-1, 0] = 1.
covariance[0, -1] = 1.
dataset_size = 100
values = numpy.cumsum(
numpy.random.multivariate_normal(
mean=numpy.zeros(num_features),
cov=covariance,
size=dataset_size),
axis=0)
times = numpy.arange(dataset_size)
model = MultivariateLevelModel(
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=num_features,
dtype=dtype,
use_observation_noise=False,
transition_covariance_initial_log_scale_bias=5.))
estimator = estimators.StateSpaceRegressor(
model=model, optimizer=gradient_descent.GradientDescentOptimizer(0.1))
data = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(data), batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=1)
for component in model._ensemble_members:
# Check that input statistics propagated to component models
self.assertTrue(component._input_statistics)
def test_ensemble_observation_noise(self):
model = MultivariateLevelModel(
configuration=state_space_model.StateSpaceModelConfiguration())
model.initialize_graph()
outputs = model.define_loss(
features={
feature_keys.TrainEvalFeatures.TIMES:
constant_op.constant([[1, 2]]),
feature_keys.TrainEvalFeatures.VALUES:
constant_op.constant([[[1.], [2.]]])
},
mode=estimator_lib.ModeKeys.TRAIN)
initializer = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run([initializer])
outputs.loss.eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
ycsoft/FatCat-Server | LIBS/boost_1_58_0/tools/build/test/dll_path.py | 6 | 4116 | #!/usr/bin/python
# Copyright (C) 2003. Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test that the <dll-path> property is correctly set when using
# <hardcode-dll-paths>true.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# The point of this test is to have exe "main" which uses library "b", which
# uses library "a". When "main" is built with <hardcode-dll-paths>true, paths
# to both libraries should be present as values of <dll-path> feature. We
# create a special target type which reports <dll-path> values on its sources
# and compare the list of found values with out expectations.
t.write("jamroot.jam", "using dll_paths ;")
t.write("jamfile.jam", """\
exe main : main.cpp b//b ;
explicit main ;
path-list mp : main ;
""")
t.write("main.cpp", "int main() {}\n")
t.write("dll_paths.jam", """\
import "class" : new ;
import feature ;
import generators ;
import print ;
import sequence ;
import type ;
rule init ( )
{
type.register PATH_LIST : pathlist ;
class dll-paths-list-generator : generator
{
rule __init__ ( )
{
generator.__init__ dll_paths.list : EXE : PATH_LIST ;
}
rule generated-targets ( sources + : property-set : project name ? )
{
local dll-paths ;
for local s in $(sources)
{
local a = [ $(s).action ] ;
if $(a)
{
local p = [ $(a).properties ] ;
dll-paths += [ $(p).get <dll-path> ] ;
}
}
return [ generator.generated-targets $(sources) :
[ $(property-set).add-raw $(dll-paths:G=<dll-path>) ] :
$(project) $(name) ] ;
}
}
generators.register [ new dll-paths-list-generator ] ;
}
rule list ( target : sources * : properties * )
{
local paths = [ feature.get-values <dll-path> : $(properties) ] ;
paths = [ sequence.insertion-sort $(paths) ] ;
print.output $(target) ;
print.text $(paths) ;
}
""")
t.write("dll_paths.py", """\
import bjam
import b2.build.type as type
import b2.build.generators as generators
from b2.manager import get_manager
def init():
type.register("PATH_LIST", ["pathlist"])
class DllPathsListGenerator(generators.Generator):
def __init__(self):
generators.Generator.__init__(self, "dll_paths.list", False,
["EXE"], ["PATH_LIST"])
def generated_targets(self, sources, ps, project, name):
dll_paths = []
for s in sources:
a = s.action()
if a:
p = a.properties()
dll_paths += p.get('dll-path')
dll_paths.sort()
return generators.Generator.generated_targets(self, sources,
ps.add_raw(["<dll-path>" + p for p in dll_paths]), project,
name)
generators.register(DllPathsListGenerator())
command = \"\"\"
echo $(PATHS) > $(<[1])
\"\"\"
def function(target, sources, ps):
bjam.call('set-target-variable', target, "PATHS", ps.get('dll-path'))
get_manager().engine().register_action("dll_paths.list", command,
function=function)
""")
t.write("a/jamfile.jam", "lib a : a.cpp ;")
t.write("a/a.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
foo() {}
""")
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a ;")
t.write("b/b.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
bar() {}
""")
t.run_build_system(["hardcode-dll-paths=true"])
t.expect_addition("bin/$toolset/debug/mp.pathlist")
es1 = t.adjust_names("a/bin/$toolset/debug")[0]
es2 = t.adjust_names("b/bin/$toolset/debug")[0]
t.expect_content_lines("bin/$toolset/debug/mp.pathlist", "*" + es1);
t.expect_content_lines("bin/$toolset/debug/mp.pathlist", "*" + es2);
t.cleanup()
| mit |
petrjasek/superdesk-core | superdesk/data_updates/00019_20190924-104943_roles.py | 2 | 1142 | # -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Author : petr
# Creation: 2019-09-24 10:49
from superdesk.commands.data_updates import BaseDataUpdate
class DataUpdate(BaseDataUpdate):
resource = "roles"
privileges = [
"publisher_dashboard",
"planning_assignments_view",
"monitoring_view",
"spike_read",
"highlights_read",
"use_global_saved_searches",
"dashboard",
"ansa_metasearch",
"ansa_live_assistant",
"ansa_ai_news",
]
def forwards(self, mongodb_collection, mongodb_database):
updates = {}
for privilege in self.privileges:
updates["privileges.{}".format(privilege)] = 1
result = mongodb_collection.update_many({}, {"$set": updates})
print("updated {}/{} roles".format(result.modified_count, result.matched_count))
def backwards(self, mongodb_collection, mongodb_database):
pass
| agpl-3.0 |
defance/edx-platform | lms/djangoapps/courseware/tests/test_entrance_exam.py | 11 | 26647 | """
Tests use cases related to LMS Entrance Exam behavior, such as gated content access (TOC)
"""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.model_data import FieldDataCache
from courseware.module_render import toc_for_course, get_module, handle_xblock_callback
from courseware.tests.factories import UserFactory, InstructorFactory, StaffFactory
from courseware.tests.helpers import (
LoginEnrollmentTestCase,
get_request_for_user
)
from courseware.entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_can_skip_entrance_exam,
user_has_passed_entrance_exam,
)
from student.models import CourseEnrollment
from student.tests.factories import CourseEnrollmentFactory, AnonymousUserFactory
from util.milestones_helpers import (
add_milestone,
add_course_milestone,
get_namespace_choices,
generate_milestone_namespace,
add_course_content_milestone,
get_milestone_relationship_types,
)
from milestones.tests.utils import MilestonesTestCaseMixin
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@attr('shard_1')
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
class EntranceExamTestCases(LoginEnrollmentTestCase, ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Check that content is properly gated.
Creates a test course from scratch. The tests below are designed to execute
workflows regardless of the feature flag settings.
"""
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def setUp(self):
"""
Test case scaffolding
"""
super(EntranceExamTestCases, self).setUp()
self.course = CourseFactory.create(
metadata={
'entrance_exam_enabled': True,
}
)
self.chapter = ItemFactory.create(
parent=self.course,
display_name='Overview'
)
ItemFactory.create(
parent=self.chapter,
display_name='Welcome'
)
ItemFactory.create(
parent=self.course,
category='chapter',
display_name="Week 1"
)
self.chapter_subsection = ItemFactory.create(
parent=self.chapter,
category='sequential',
display_name="Lesson 1"
)
chapter_vertical = ItemFactory.create(
parent=self.chapter_subsection,
category='vertical',
display_name='Lesson 1 Vertical - Unit 1'
)
ItemFactory.create(
parent=chapter_vertical,
category="problem",
display_name="Problem - Unit 1 Problem 1"
)
ItemFactory.create(
parent=chapter_vertical,
category="problem",
display_name="Problem - Unit 1 Problem 2"
)
ItemFactory.create(
category="instructor",
parent=self.course,
data="Instructor Tab",
display_name="Instructor"
)
self.entrance_exam = ItemFactory.create(
parent=self.course,
category="chapter",
display_name="Entrance Exam Section - Chapter 1",
is_entrance_exam=True,
in_entrance_exam=True
)
self.exam_1 = ItemFactory.create(
parent=self.entrance_exam,
category='sequential',
display_name="Exam Sequential - Subsection 1",
graded=True,
in_entrance_exam=True
)
subsection = ItemFactory.create(
parent=self.exam_1,
category='vertical',
display_name='Exam Vertical - Unit 1'
)
problem_xml = MultipleChoiceResponseXMLFactory().build_xml(
question_text='The correct answer is Choice 3',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
self.problem_1 = ItemFactory.create(
parent=subsection,
category="problem",
display_name="Exam Problem - Problem 1",
data=problem_xml
)
self.problem_2 = ItemFactory.create(
parent=subsection,
category="problem",
display_name="Exam Problem - Problem 2"
)
add_entrance_exam_milestone(self.course, self.entrance_exam)
self.course.entrance_exam_enabled = True
self.course.entrance_exam_minimum_score_pct = 0.50
self.course.entrance_exam_id = unicode(self.entrance_exam.scope_ids.usage_id)
self.anonymous_user = AnonymousUserFactory()
self.request = get_request_for_user(UserFactory())
modulestore().update_item(self.course, self.request.user.id) # pylint: disable=no-member
self.client.login(username=self.request.user.username, password="test")
CourseEnrollment.enroll(self.request.user, self.course.id)
self.expected_locked_toc = (
[
{
'active': True,
'sections': [
{
'url_name': u'Exam_Sequential_-_Subsection_1',
'display_name': u'Exam Sequential - Subsection 1',
'graded': True,
'format': '',
'due': None,
'active': True
}
],
'url_name': u'Entrance_Exam_Section_-_Chapter_1',
'display_name': u'Entrance Exam Section - Chapter 1',
'display_id': u'entrance-exam-section-chapter-1',
}
]
)
self.expected_unlocked_toc = (
[
{
'active': False,
'sections': [
{
'url_name': u'Welcome',
'display_name': u'Welcome',
'graded': False,
'format': '',
'due': None,
'active': False
},
{
'url_name': u'Lesson_1',
'display_name': u'Lesson 1',
'graded': False,
'format': '',
'due': None,
'active': False
}
],
'url_name': u'Overview',
'display_name': u'Overview',
'display_id': u'overview'
},
{
'active': False,
'sections': [],
'url_name': u'Week_1',
'display_name': u'Week 1',
'display_id': u'week-1'
},
{
'active': False,
'sections': [],
'url_name': u'Instructor',
'display_name': u'Instructor',
'display_id': u'instructor'
},
{
'active': True,
'sections': [
{
'url_name': u'Exam_Sequential_-_Subsection_1',
'display_name': u'Exam Sequential - Subsection 1',
'graded': True,
'format': '',
'due': None,
'active': True
}
],
'url_name': u'Entrance_Exam_Section_-_Chapter_1',
'display_name': u'Entrance Exam Section - Chapter 1',
'display_id': u'entrance-exam-section-chapter-1'
}
]
)
def test_view_redirect_if_entrance_exam_required(self):
"""
Unit Test: if entrance exam is required. Should return a redirect.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': False})
def test_entrance_exam_content_absence(self):
"""
Unit Test: If entrance exam is not enabled then page should be redirected with chapter contents.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.chapter_subsection.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
resp = self.client.get(expected_url)
self.assertNotIn('Exam Problem - Problem 1', resp.content)
self.assertNotIn('Exam Problem - Problem 2', resp.content)
def test_entrance_exam_content_presence(self):
"""
Unit Test: If entrance exam is enabled then its content e.g. problems should be loaded and redirection will
occur with entrance exam contents.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
resp = self.client.get(expected_url)
self.assertIn('Exam Problem - Problem 1', resp.content)
self.assertIn('Exam Problem - Problem 2', resp.content)
def test_get_entrance_exam_content(self):
"""
test get entrance exam content method
"""
exam_chapter = get_entrance_exam_content(self.request, self.course)
self.assertEqual(exam_chapter.url_name, self.entrance_exam.url_name)
self.assertFalse(user_has_passed_entrance_exam(self.request, self.course))
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
exam_chapter = get_entrance_exam_content(self.request, self.course)
self.assertEqual(exam_chapter, None)
self.assertTrue(user_has_passed_entrance_exam(self.request, self.course))
def test_entrance_exam_score(self):
"""
test entrance exam score. we will hit the method get_entrance_exam_score to verify exam score.
"""
with self.assertNumQueries(1):
exam_score = get_entrance_exam_score(self.request, self.course)
self.assertEqual(exam_score, 0)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
with self.assertNumQueries(1):
exam_score = get_entrance_exam_score(self.request, self.course)
# 50 percent exam score should be achieved.
self.assertGreater(exam_score * 100, 50)
def test_entrance_exam_requirement_message(self):
"""
Unit Test: entrance exam requirement message should be present in response
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
}
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('To access course materials, you must score', resp.content)
def test_entrance_exam_requirement_message_with_correct_percentage(self):
"""
Unit Test: entrance exam requirement message should be present in response
and percentage of required score should be rounded as expected
"""
minimum_score_pct = 29
self.course.entrance_exam_minimum_score_pct = float(minimum_score_pct) / 100
modulestore().update_item(self.course, self.request.user.id) # pylint: disable=no-member
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
}
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('To access course materials, you must score {required_score}% or higher'.format(
required_score=minimum_score_pct
), resp.content)
def test_entrance_exam_requirement_message_hidden(self):
"""
Unit Test: entrance exam message should not be present outside the context of entrance exam subsection.
"""
# Login as staff to avoid redirect to entrance exam
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
CourseEnrollment.enroll(staff_user, self.course.id)
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.chapter_subsection.location.name
}
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('To access course materials, you must score', resp.content)
self.assertNotIn('You have passed the entrance exam.', resp.content)
def test_entrance_exam_passed_message_and_course_content(self):
"""
Unit Test: exam passing message and rest of the course section should be present
when user achieves the entrance exam milestone/pass the exam.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
}
)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
resp = self.client.get(url)
self.assertNotIn('To access course materials, you must score', resp.content)
self.assertIn('You have passed the entrance exam.', resp.content)
self.assertIn('Lesson 1', resp.content)
def test_entrance_exam_gating(self):
"""
Unit Test: test_entrance_exam_gating
"""
# This user helps to cover a discovered bug in the milestone fulfillment logic
chaos_user = UserFactory()
locked_toc = self._return_table_of_contents()
for toc_section in self.expected_locked_toc:
self.assertIn(toc_section, locked_toc)
# Set up the chaos user
answer_entrance_exam_problem(self.course, self.request, self.problem_1, chaos_user)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
def test_skip_entrance_exam_gating(self):
"""
Tests gating is disabled if skip entrance exam is set for a user.
"""
# make sure toc is locked before allowing user to skip entrance exam
locked_toc = self._return_table_of_contents()
for toc_section in self.expected_locked_toc:
self.assertIn(toc_section, locked_toc)
# hit skip entrance exam api in instructor app
instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=instructor.username, password='test')
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.request.user.email,
})
self.assertEqual(response.status_code, 200)
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
def test_entrance_exam_gating_for_staff(self):
"""
Tests gating is disabled if user is member of staff.
"""
# Login as member of staff
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
staff_user.is_staff = True
self.client.login(username=staff_user.username, password='test')
# assert staff has access to all toc
self.request.user = staff_user
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=False))
def test_courseware_page_access_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam
"""
url = reverse(
'courseware_chapter',
kwargs={'course_id': unicode(self.course.id), 'chapter': self.chapter.url_name}
)
response = self.client.get(url)
redirect_url = reverse('courseware', args=[unicode(self.course.id)])
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=302)
response = self.client.get(redirect_url)
exam_url = response.get('Location')
self.assertRedirects(response, exam_url)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=False))
def test_courseinfo_page_access_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam
"""
url = reverse('info', args=[unicode(self.course.id)])
response = self.client.get(url)
redirect_url = reverse('courseware', args=[unicode(self.course.id)])
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=302)
response = self.client.get(redirect_url)
exam_url = response.get('Location')
self.assertRedirects(response, exam_url)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=True))
def test_courseware_page_access_after_passing_entrance_exam(self):
"""
Test courseware access page after passing entrance exam
"""
# Mocking get_required_content with empty list to assume user has passed entrance exam
self._assert_chapter_loaded(self.course, self.chapter)
@patch('util.milestones_helpers.get_required_content', Mock(return_value=['a value']))
def test_courseware_page_access_with_staff_user_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam but with staff user
"""
self.logout()
staff_user = StaffFactory.create(course_key=self.course.id)
self.login(staff_user.email, 'test')
CourseEnrollmentFactory(user=staff_user, course_id=self.course.id)
self._assert_chapter_loaded(self.course, self.chapter)
def test_courseware_page_access_with_staff_user_after_passing_entrance_exam(self):
"""
Test courseware access page after passing entrance exam but with staff user
"""
self.logout()
staff_user = StaffFactory.create(course_key=self.course.id)
self.login(staff_user.email, 'test')
CourseEnrollmentFactory(user=staff_user, course_id=self.course.id)
self._assert_chapter_loaded(self.course, self.chapter)
@patch.dict("django.conf.settings.FEATURES", {'ENTRANCE_EXAMS': False})
def test_courseware_page_access_when_entrance_exams_disabled(self):
"""
Test courseware page access when ENTRANCE_EXAMS feature is disabled
"""
self._assert_chapter_loaded(self.course, self.chapter)
def test_can_skip_entrance_exam_with_anonymous_user(self):
"""
Test can_skip_entrance_exam method with anonymous user
"""
self.assertFalse(user_can_skip_entrance_exam(self.request, self.anonymous_user, self.course))
def test_has_passed_entrance_exam_with_anonymous_user(self):
"""
Test has_passed_entrance_exam method with anonymous user
"""
self.request.user = self.anonymous_user
self.assertFalse(user_has_passed_entrance_exam(self.request, self.course))
def test_course_has_entrance_exam_missing_exam_id(self):
course = CourseFactory.create(
metadata={
'entrance_exam_enabled': True,
}
)
self.assertFalse(course_has_entrance_exam(course))
def test_user_has_passed_entrance_exam_short_circuit_missing_exam(self):
course = CourseFactory.create(
)
self.assertTrue(user_has_passed_entrance_exam(self.request, course))
@patch.dict("django.conf.settings.FEATURES", {'ENABLE_MASQUERADE': False})
def test_entrance_exam_xblock_response(self):
"""
Tests entrance exam xblock has `entrance_exam_passed` key in json response.
"""
request_factory = RequestFactory()
data = {'input_{}_2_1'.format(unicode(self.problem_1.location.html_id())): 'choice_2'}
request = request_factory.post(
'problem_check',
data=data
)
request.user = self.user
response = handle_xblock_callback(
request,
unicode(self.course.id),
unicode(self.problem_1.location),
'xmodule_handler',
'problem_check',
)
self.assertEqual(response.status_code, 200)
self.assertIn('entrance_exam_passed', response.content)
def _assert_chapter_loaded(self, course, chapter):
"""
Asserts courseware chapter load successfully.
"""
url = reverse(
'courseware_chapter',
kwargs={'course_id': unicode(course.id), 'chapter': chapter.url_name}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def _return_table_of_contents(self):
"""
Returns table of content for the entrance exam specific to this test
Returns the table of contents for course self.course, for chapter
self.entrance_exam, and for section self.exam1
"""
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents( # pylint: disable=attribute-defined-outside-init
self.course.id,
self.request.user,
self.entrance_exam
)
return toc_for_course(
self.request.user,
self.request,
self.course,
self.entrance_exam.url_name,
self.exam_1.url_name,
self.field_data_cache
)
def answer_entrance_exam_problem(course, request, problem, user=None):
"""
Takes a required milestone `problem` in a `course` and fulfills it.
Args:
course (Course): Course object, the course the required problem is in
request (Request): request Object
problem (xblock): xblock object, the problem to be fulfilled
user (User): User object in case it is different from request.user
"""
if not user:
user = request.user
grade_dict = {'value': 1, 'max_value': 1, 'user_id': user.id}
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id,
user,
course,
depth=2
)
# pylint: disable=protected-access
module = get_module(
user,
request,
problem.scope_ids.usage_id,
field_data_cache,
)._xmodule
module.system.publish(problem, 'grade', grade_dict)
def add_entrance_exam_milestone(course, entrance_exam):
"""
Adds the milestone for given `entrance_exam` in `course`
Args:
course (Course): Course object in which the extrance_exam is located
entrance_exam (xblock): the entrance exam to be added as a milestone
"""
namespace_choices = get_namespace_choices()
milestone_relationship_types = get_milestone_relationship_types()
milestone_namespace = generate_milestone_namespace(
namespace_choices.get('ENTRANCE_EXAM'),
course.id
)
milestone = add_milestone(
{
'name': 'Test Milestone',
'namespace': milestone_namespace,
'description': 'Testing Courseware Entrance Exam Chapter',
}
)
add_course_milestone(
unicode(course.id),
milestone_relationship_types['REQUIRES'],
milestone
)
add_course_content_milestone(
unicode(course.id),
unicode(entrance_exam.location),
milestone_relationship_types['FULFILLS'],
milestone
)
| agpl-3.0 |
dcosentino/edx-platform | common/djangoapps/course_about/views.py | 28 | 2124 | """
Implementation of the RESTful endpoints for the Course About API.
"""
from rest_framework.throttling import UserRateThrottle
from rest_framework.views import APIView
from course_about import api
from rest_framework import status
from rest_framework.response import Response
from course_about.errors import CourseNotFoundError, CourseAboutError
class CourseAboutThrottle(UserRateThrottle):
"""Limit the number of requests users can make to the Course About API."""
# TODO Limit based on expected throughput # pylint: disable=fixme
rate = '50/second'
class CourseAboutView(APIView):
""" RESTful Course About API view.
Used to retrieve JSON serialized Course About information.
"""
authentication_classes = []
permission_classes = []
throttle_classes = CourseAboutThrottle,
def get(self, request, course_id=None): # pylint: disable=unused-argument
"""Read course information.
HTTP Endpoint for course info api.
Args:
Course Id = URI element specifying the course location. Course information will be
returned for this particular course.
Return:
A JSON serialized representation of the course information
"""
try:
return Response(api.get_course_about_details(course_id))
except CourseNotFoundError:
return Response(
status=status.HTTP_404_NOT_FOUND,
data={
"message": (
u"An error occurred while retrieving course information"
u" for course '{course_id}' no course found"
).format(course_id=course_id)
}
)
except CourseAboutError:
return Response(
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
data={
"message": (
u"An error occurred while retrieving course information"
u" for course '{course_id}'"
).format(course_id=course_id)
}
)
| agpl-3.0 |
dacb/viscount | tests/__init__.py | 1 | 1091 | """
viscount unittests
contains a basic unit test class that can be used for all instances
"""
from unittest import TestCase
from viscount.core import db
from .factories import UserFactory
from .utils import FlaskTestCaseMixin
class ViscountTestCase(TestCase):
pass
class ViscountAppTestCase(FlaskTestCaseMixin, ViscountTestCase):
def _create_app(self):
raise NotImplementedError
def _create_fixtures(self):
## expand
self.user = UserFactory()
def setUp(self):
super(ViscountAppTestCase, self).setUp()
self.app = self._create_app()
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
db.drop_all()
db.create_all()
self._create_fixtures()
self._create_csrf_token()
def tearDown(self):
super(ViscountAppTestCase, self).tearDown()
db.drop_all()
self.app_context.pop()
def _login(self, email=None, password=None):
email = email or self.user.email
password = password or 'password'
ret = self.post('/login', data={'email': email, 'password': password}, follow_redirects=False)
return ret
| bsd-2-clause |
highweb-project/highweb-webcl-html5spec | tools/perf/page_sets/key_desktop_sites.py | 22 | 27382 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class KeyDesktopSitesPage(page_module.Page):
def __init__(self, url, page_set):
super(KeyDesktopSitesPage, self).__init__(
url=url, page_set=page_set, credentials_path = 'data/credentials.json')
self.archive_data_file = 'data/key_desktop_sites.json'
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class FacebookPage(KeyDesktopSitesPage):
def __init__(self, page_set):
super(FacebookPage, self).__init__(
url='http://facebook.com',
page_set=page_set)
self.credentials = 'facebook'
class GmailPage(KeyDesktopSitesPage):
def __init__(self, page_set):
super(GmailPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set)
self.scrollable_element_function = '''
function(callback) {
gmonkey.load('2.0', function(api) {
callback(api.getScrollableElement());
});
}'''
self.credentials = 'google'
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined && '
'document.getElementById("gb") !== null')
class GoogleCalendarPage(KeyDesktopSitesPage):
def __init__(self, page_set):
super(GoogleCalendarPage, self).__init__(
url='https://www.google.com/calendar/',
page_set=page_set)
self.scrollable_element_function = '''
function(callback) {
callback(document.getElementById('scrolltimedeventswk'));
}'''
self.credentials = 'google'
class GoogleDrivePage(KeyDesktopSitesPage):
def __init__(self, page_set):
super(GoogleDrivePage, self).__init__(
url='https://drive.google.com',
page_set=page_set)
self.scrollable_element_function = '''
function(callback) {
callback(document.getElementsByClassName('doclistview-list')[0]);
}'''
self.credentials = 'google'
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
action_runner.WaitForJavaScriptCondition(
'document.getElementsByClassName("doclistview-list").length')
class GoogleDocPage(KeyDesktopSitesPage):
def __init__(self, page_set):
super(GoogleDocPage, self).__init__(
# pylint: disable=line-too-long
url='https://docs.google.com/a/google.com/document/d/1XMAtPiVFZfItsMUOYl39v5YA8bcSPe4LDrVO25OdsCU/edit',
page_set=page_set)
self.scrollable_element_function = '''
function(callback) {
callback(document.getElementsByClassName('kix-appview-editor')[0]);
}'''
self.credentials = 'google'
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
action_runner.WaitForJavaScriptCondition(
'document.getElementsByClassName("kix-appview-editor").length')
class KeyDesktopSitesPageSet(story.StorySet):
""" Sites of Interest """
def __init__(self):
super(KeyDesktopSitesPageSet, self).__init__(
archive_data_file='data/key_desktop_sites.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
self.AddStory(FacebookPage(self))
self.AddStory(GmailPage(self))
self.AddStory(GoogleCalendarPage(self))
self.AddStory(GoogleDrivePage(self))
self.AddStory(GoogleDocPage(self))
urls_list = [
'http://www.google.com/nexus/5/#/',
'http://youtube.com',
'http://twitter.com/nbc',
'http://bbc.co.uk',
'http://imdb.com',
'http://espn.go.com',
'http://cnn.com',
'http://bbc.co.uk/news/',
'http://weather.com',
'http://livejournal.com',
'http://deviantart.com',
'http://foxnews.com',
'http://nbcnews.com',
'http://scribd.com',
'http://movies.yahoo.com',
'http://tv.yahoo.com',
'http://pandora.com',
'http://tmz.com',
'http://hulu.com',
'http://abcnews.go.com',
'http://youtube.com/videos',
'http://ndtv.com',
'http://money.cnn.com',
'http://msn.foxsports.com',
'http://cbsnews.com',
'http://wired.com',
'http://cnbc.com',
'http://sportsillustrated.cnn.com',
'http://home.disney.go.com',
'http://urbandictionary.com',
'http://rottentomatoes.com',
'http://foodnetwork.com',
'http://npr.org',
'http://gawker.com',
'http://last.fm',
'http://sky.com',
'http://eonline.com',
'http://egotastic.com',
'http://copyscape.com',
'http://mtv.com',
'http://ultimate-guitar.com',
'http://comcast.com',
'http://cbc.ca',
'http://fanfiction.net',
'http://discovery.com',
'http://deezer.com',
'http://metrolyrics.com',
'http://foxnews.com/entertainment/',
'http://cartoonnetwork.com',
'http://paypal.com',
'http://finance.yahoo.com',
'http://alibaba.com',
'http://bankofamerica.com',
'http://www.chase.com/',
'http://wellsfargo.com',
'http://skype.com',
'http://online.wsj.com',
'http://indeed.com',
'http://samsung.com',
'http://reuters.com',
'http://ups.com',
'http://forbes.com',
'http://clickbank.com',
'http://target.com',
'http://att.com',
'http://cj.com',
'http://constantcontact.com',
'http://ezinearticles.com',
'http://shutterstock.com',
'http://americanexpress.com',
'http://freelancer.com',
'http://istockphoto.com',
'http://fedex.com',
'http://verizonwireless.com',
'http://capitalone.com',
'http://bloomberg.com',
'http://monster.com',
'http://hdfcbank.com',
'http://fotolia.com',
'http://thesun.co.uk',
'http://zillow.com',
'http://nokia.com',
'http://tradedoubler.com',
'http://icicibank.com',
'http://123rf.com',
'http://elance.com',
'http://icbc.com.cn',
'http://news.cnet.com',
'http://verizon.com',
'http://careerbuilder.com',
'http://sears.com',
'http://getresponse.com',
'http://sitesell.com',
'http://manta.com',
'http://www.blogger.com/',
'http://avg.com',
'http://google.com/analytics/',
'http://go.com',
'http://flickr.com',
'http://aol.com',
'http://thepiratebay.se',
'http://zedo.com',
'http://about.com',
'http://stackoverflow.com',
'http://godaddy.com',
'http://mediafire.com',
'http://wordpress.org',
'http://adwords.google.com',
'http://imgur.com',
'http://4shared.com',
'http://vimeo.com',
'http://play.google.com/',
'http://badoo.com',
'http://aweber.com',
'http://mozilla.org',
'http://www.stumbleupon.com/stumbler/chimacintosh',
'http://www.google.com/adsense/',
'http://my.yahoo.com',
'http://sourceforge.net',
'http://answers.com',
'http://wordpress.org/extend/plugins/',
'http://photobucket.com',
'http://clicksor.com',
'http://google.com/reader/',
'http://store.apple.com',
'http://wikia.com',
'http://statcounter.com',
'http://fiverr.com',
'http://slideshare.net',
'http://salesforce.com',
'http://myspace.com',
'http://hootsuite.com',
'http://domaintools.com',
'http://rediff.com',
'http://soundcloud.com',
'http://download.cnet.com',
'http://archive.org',
'http://filestube.com',
'http://developers.facebook.com',
'http://hostgator.com',
'http://battle.net',
'http://pch.com',
'http://ign.com',
'http://pogo.com',
'http://miniclip.com',
'http://888.com',
'http://gamespot.com',
'http://steampowered.com',
'http://gamefaqs.com',
'http://xbox.com',
'http://games.yahoo.com',
'http://betfair.com',
'http://kongregate.com',
'http://ea.com',
'http://leagueoflegends.com',
'http://roblox.com',
'http://williamhill.com',
'http://playstation.com',
'http://wowhead.com',
'http://king.com',
'http://minecraft.net',
'http://chess.com',
'http://minecraftwiki.net',
'http://addictinggames.com',
'http://mmo-champion.com',
'http://runescape.com',
'http://travian.com',
'http://zone.msn.com',
'http://ubi.com',
'http://calottery.com',
'http://freeonlinegames.com',
'http://games.com',
'http://n4g.com',
'http://nvidia.com',
'http://callofduty.com',
'http://us.playstation.com',
'http://bet-at-home.com',
'http://gametrailers.com',
'http://teamliquid.net',
'http://nick.com/games/',
'http://planetminecraft.com',
'http://nintendo.com',
'http://popcap.com',
'http://gamehouse.com',
'http://curse.com',
'http://bulbagarden.net',
'http://rockstargames.com',
'http://partycasino.com',
'http://square-enix.com',
'http://perfectworld.com',
'http://nih.gov',
'http://webmd.com',
'http://ncbi.nlm.nih.gov/pubmed/',
'http://focusoncrohnsdisease.com',
'http://mayoclinic.com',
'http://mercola.com',
'http://drugs.com',
'http://menshealth.com',
'http://nlm.nih.gov/medlineplus/',
'http://weightwatchers.com',
'http://cdc.gov',
'http://caloriecount.about.com',
'http://patents.uspto.gov',
'http://psychologytoday.com',
'http://nhs.uk',
'http://medscape.com',
'http://foxnews.com/health/',
'http://who.int',
'http://healthboards.com',
'http://self.com',
'http://health.com',
'http://kidshealth.org',
'http://fda.gov',
'http://netdoctor.co.uk',
'http://prevention.com',
'http://makeupalley.com',
'http://stevepavlina.com',
'http://realage.com',
'http://fitnessmagazine.com',
'http://healthcentral.com',
'http://rxlist.com',
'http://vitals.com',
'http://totalbeauty.com',
'http://nuance.com',
'http://telegraph.co.uk/health/',
'http://drbatras.com',
'http://emedtv.com',
'http://bmj.com',
'http://medcohealth.com',
'http://webmd.com/skin-problems-and-treatments/default.htm',
'http://tums.ac.ir',
'http://apa.org',
'http://cancer.org',
'http://healthguru.com',
'http://earthclinic.com',
'http://curezone.com',
'http://beauty.about.com',
'http://www.kaiserpermanente.org/',
'http://drweil.com',
'http://24hourfitness.com',
'http://ehow.com',
'http://yelp.com',
'http://groupon.com/san-francisco',
'http://engadget.com',
'http://gsmarena.com',
'http://reviews.cnet.com',
'http://allrecipes.com',
'http://autos.yahoo.com',
'http://shopping.yahoo.com',
'http://gizmodo.com',
'http://marketwatch.com',
'http://babycenter.com',
'http://nextag.com',
'http://fixya.com',
'http://dpreview.com',
'http://tomshardware.com',
'http://theverge.com',
'http://instructables.com',
'http://cafemom.com',
'http://google.com/products',
'http://bbb.org',
'http://shopping.com',
'http://irs.gov',
'http://kbb.com',
'http://retailmenot.com',
'http://edmunds.com',
'http://mobile9.com',
'http://bankrate.com',
'http://fatwallet.com',
'http://fool.com',
'http://hgtv.com',
'http://coupons.com',
'http://apartmenttherapy.com',
'http://phonearena.com',
'http://shopzilla.com',
'http://marthastewart.com',
'http://consumerreports.org',
'http://pricegrabber.com',
'http://epinions.com',
'http://cooks.com',
'http://bhg.com',
'http://mouthshut.com',
'http://travel.state.gov',
'http://realsimple.com',
'http://opendns.com',
'http://gardenweb.com',
'http://blu-ray.com',
'http://thesaurus.com',
'http://espncricinfo.com',
'http://weebly.com',
'http://bbc.co.uk/sport/0/football/',
'http://y8.com',
'http://xe.com/ucc/',
'http://timeanddate.com',
'http://soccernet.espn.go.com',
'http://howstuffworks.com',
'http://en.wikipedia.org/wiki/Main_Page',
'http://reverso.net',
'http://timeanddate.com/worldclock/',
'http://sitepoint.com',
'http://usopen.org',
'http://stardoll.com',
'http://london2012.com',
'http://lego.com',
'http://000webhost.com',
'http://fifa.com',
'http://uefa.com',
'http://nick.com',
'http://girlsgogames.com',
'http://pbskids.org',
'http://thestar.com',
'http://dynamicdrive.com',
'http://nickjr.com',
'http://manutd.com',
'http://earthquake.usgs.gov',
'http://khanacademy.org',
'http://barbie.com',
'http://sciencedaily.com',
'http://gocomics.com',
'http://webdeveloper.com',
'http://www2.warnerbros.com',
'http://jpl.nasa.gov',
'http://yola.com',
'http://bom.gov.au',
'http://nationalpost.com',
'http://booking.com',
'http://tripadvisor.com',
'http://agoda.com',
'http://xe.com',
'http://expedia.com',
'http://metacafe.com',
'http://priceline.com',
'http://southwest.com',
'http://cracked.com',
'http://kayak.com',
'http://travelocity.com',
'http://united.com',
'http://delta.com',
'http://ryanair.com',
'http://lonelyplanet.com',
'http://orbitz.com',
'http://aa.com',
'http://easyjet.com',
'http://hilton.com',
'http://travel.yahoo.com',
'http://marriott.com',
'http://couchsurfing.org',
'http://hotwire.com',
'http://autoblog.com',
'http://lufthansa.com',
'http://theonion.com',
'http://britishairways.com',
'http://travelzoo.com',
'http://ebaumsworld.com',
'http://emirates.com',
'http://venere.com',
'http://wikitravel.org',
'http://jal.co.jp',
'http://collegehumor.com',
'http://ford.com',
'http://vrbo.com',
'http://opentable.com',
'http://hyatt.com',
'http://klm.com',
'http://airberlin.com',
'http://usairways.com',
'http://skyscanner.net',
'http://timeout.com',
'http://homeaway.com',
'http://lonelyplanet.com/thorntree/',
'http://virgin-atlantic.com',
'http://news.yahoo.com',
'http://huffingtonpost.com',
'http://news.google.com',
'http://reddit.com',
'http://guardian.co.uk',
'http://timesofindia.indiatimes.com',
'http://washingtonpost.com',
'http://usatoday.com',
'http://drudgereport.com',
'http://latimes.com',
'http://wunderground.com',
'http://accuweather.com',
'http://examiner.com',
'http://news.com.au',
'http://time.com',
'http://alarabiya.net',
'http://businessweek.com',
'http://smh.com.au',
'http://weather.yahoo.com',
'http://foxnews.com/politics/',
'http://economictimes.indiatimes.com',
'http://nationalgeographic.com',
'http://ft.com',
'http://nypost.com',
'http://sfgate.com',
'http://topix.com',
'http://hindustantimes.com',
'http://chicagotribune.com',
'http://newsmax.com',
'http://breitbart.com',
'http://economist.com',
'http://theatlantic.com',
'http://prweb.com',
'http://theglobeandmail.com',
'http://answers.yahoo.com',
'http://wiki.answers.com',
'http://wordreference.com',
'http://thefreedictionary.com',
'http://dict.leo.org',
'http://w3.org',
'http://nlm.nih.gov',
'http://goodreads.com',
'http://mapquest.com',
'http://yellowpages.com',
'http://wiktionary.org',
'http://dict.cc',
'http://bing.com/maps/',
'http://whitepages.com',
'http://m-w.com',
'http://classmates.com',
'http://blackboard.com',
'http://justanswer.com',
'http://mit.edu',
'http://medterms.com',
'http://stanford.edu',
'http://brainyquote.com',
'http://harvard.edu',
'http://superpages.com',
'http://mylife.com',
'http://en.wiktionary.org',
'http://investopedia.com',
'http://lumosity.com',
'http://phoenix.edu',
'http://berkeley.edu',
'http://ecollege.com',
'http://ed.gov',
'http://yellowpages.sulekha.com',
'http://wisegeek.com',
'http://utexas.edu',
'http://wwp.greenwichmeantime.com',
'http://cornell.edu',
'http://psu.edu',
'http://maps.yahoo.com',
'http://linkedin.com/answers',
'http://yahoo.co.jp',
'http://translate.google.com',
'http://noaa.gov',
'http://ncbi.nlm.nih.gov',
'http://nhc.noaa.gov',
'http://ted.com',
'http://jma.go.jp',
'http://usgs.gov',
'http://care2.com',
'http://sciencedirect.com',
'http://intellicast.com',
'http://guardian.co.uk/technology',
'http://nature.com',
'http://wunderground.com/tropical/',
'http://ieee.org',
'http://elsevier.com',
'http://usda.gov',
'http://redorbit.com',
'http://scientificamerican.com',
'http://nps.gov',
'http://metoffice.gov.uk',
'http://space.com',
'http://foreignpolicy.com',
'http://bbc.co.uk/news/technology/',
'http://newscientist.com',
'http://livescience.com',
'http://jstor.org',
'http://mnn.com',
'http://foxnews.com/scitech/',
'http://census.gov',
'http://epa.gov',
'http://bls.gov',
'http://metric-conversions.org',
'http://news.nationalgeographic.com/index.rss',
'http://bbc.co.uk/news/science_and_environment/',
'http://colorado.edu',
'http://popsci.com',
'http://amazon.com',
'http://ebay.com',
'http://netflix.com',
'http://amazon.co.uk',
'http://walmart.com',
'http://ikea.com',
'http://bestbuy.com',
'http://multiply.com',
'http://newegg.com',
'http://homedepot.com',
'http://macys.com',
'http://livingsocial.com',
'http://gap.com',
'http://bodybuilding.com',
'http://kohls.com',
'http://barnesandnoble.com',
'http://lowes.com',
'http://zappos.com',
'http://overstock.com',
'http://legacy.com',
'http://staples.com',
'http://shutterfly.com',
'http://nike.com',
'http://nordstrom.com',
'http://pixmania.com',
'http://costco.com',
'http://bhphotovideo.com',
'http://hm.com',
'http://ticketmaster.com',
'http://jcpenney.com',
'http://walgreens.com',
'http://qvc.com',
'http://autotrader.com',
'http://tigerdirect.com',
'http://trademe.co.nz',
'http://sony.com',
'http://directv.com',
'http://buy.com',
'http://victoriassecret.com',
'http://cars.com',
'http://gamestop.com',
'http://cvs.com',
'http://dealextreme.com',
'http://cafepress.com',
'http://6pm.com',
'http://facebook.com/home.php#!/OccupyAirForce',
'http://deviantart.com/#catpath=anthro',
'http://shine.yahoo.com',
'http://match.com',
'http://siteadvisor.com',
'http://digg.com',
'http://hi5.com',
'http://ancestry.com',
'http://sulekha.com',
'http://europa.eu',
'http://biblegateway.com',
'http://slate.com',
'http://correios.com.br',
'http://wonderwall.msn.com',
'http://change.org',
'http://state.gov',
'http://salon.com',
'http://askmen.com',
'http://infowars.com',
'http://wnd.com',
'http://ec.europa.eu',
'http://justjared.com',
'http://sheknows.com',
'http://slashdot.org',
'http://newgrounds.com',
'http://weeklystandard.com',
'http://royalmail.com',
'http://snopes.com',
'http://lds.org',
'http://dailykos.com',
'http://complex.com',
'http://avaaz.org',
'http://aarp.org',
'http://theregister.co.uk',
'http://creativecommons.org',
'http://jw.org',
'http://peoplesmart.com',
'http://uspto.gov',
'http://uscis.gov',
'http://whitehouse.gov',
'http://townhall.com',
'http://sec.gov',
'http://sports.yahoo.com',
'http://nfl.com',
'http://mlb.mlb.com',
'http://cbssports.com',
'http://bleacherreport.com',
'http://livescore.com',
'http://espn.go.com/nfl/',
'http://sports.yahoo.com/nfl',
'http://espn.go.com/mlb/',
'http://premierleague.com',
'http://skysports.com',
'http://sports.yahoo.com/mlb',
'http://games.espn.go.com/frontpage',
'http://uk.eurosport.yahoo.com',
'http://baseball.fantasysports.yahoo.com',
'http://baseball.fantasysports.yahoo.com/b1',
'http://skysports.com/football/',
'http://nba.com',
'http://hattrick.org',
'http://wwe.com',
'http://telegraph.co.uk/sport/',
'http://rivals.com',
'http://sports.yahoo.com/fantasy',
'http://espn.go.com/nba/',
'http://scout.com',
'http://msn.foxsports.com/nfl',
'http://sports.yahoo.com/nfl/players/',
'http://guardian.co.uk/football',
'http://rotoworld.com',
'http://nascar.com',
'http://arsenal.com',
'http://formula1.com',
'http://yardbarker.com',
'http://pgatour.com',
'http://rei.com',
'http://liverpoolfc.tv',
'http://deadspin.com',
'http://sbnation.com',
'https://www.google.com',
'https://www.google.com/search?q=barack%20obama',
'https://maps.google.com',
'http://reader.google.com',
'https://plus.google.com/110031535020051778989/posts/2wP4KPPBMG8',
'https://plus.google.com/110031535020051778989/photos',
'http://googleblog.blogspot.com/',
'https://chrome.google.com/webstore/category/home',
'http://staff.tumblr.com/',
'http://mashable.com/',
'http://www.buzzfeed.com/celebrity',
'http://www.thedailybeast.com/',
'http://www.theverge.com/',
'http://techcrunch.com/',
'http://www.engadget.com/',
'http://gizmodo.com/',
'http://thinkprogress.org/?mobile=nc',
'http://gawker.com/',
'http://arstechnica.com/',
'http://boingboing.net/category/featured/',
'http://thenextweb.com/',
'http://politicalticker.blogs.cnn.com/',
'http://deadspin.com/',
'http://news.yahoo.com/',
'http://www.cnn.com/',
'http://www.nbcnews.com/',
'http://www.bbc.co.uk/news/',
'http://www.reddit.com/',
'http://my.yahoo.com/',
'http://www.foxnews.com/',
'http://www.guardiannews.com/uk-home',
'http://timesofindia.indiatimes.com/',
'http://online.wsj.com/home-page',
'http://www.forbes.com/home_usa/',
'http://www.washingtonpost.com/',
'http://www.usatoday.com/',
'http://drudgereport.com/',
'http://abcnews.go.com/',
'http://www.latimes.com/',
'http://www.bloomberg.com/',
'http://money.cnn.com/',
'http://www.news.com.au/',
'http://www.cbsnews.com/',
'http://www.examiner.com/',
'http://www.cnbc.com/',
'http://www.alarabiya.net/default.html',
'http://www.time.com/time/',
'http://www.foxnews.com/politics/index.html',
'http://www.smh.com.au/',
'http://www.businessweek.com/',
'http://www.nationalgeographic.com/',
# pylint: disable=line-too-long
'http://www.wunderground.com/cgi-bin/findweather/getForecast?query=94035&sp=KCAMOUNT24',
# pylint: disable=line-too-long
'http://www.accuweather.com/en/search-locations?zipcode=mountain%20view,%20ca',
'http://www.weather.com/weather/right-now/Mountain+View+CA+94043',
# pylint: disable=line-too-long
'http://weather.yahoo.com/united-states/california/mountain-view-12797130/',
'http://www.yr.no/place/Norway/Oslo/Oslo/Oslo/',
'http://www.metoffice.gov.uk/',
'http://www.intellicast.com/Local/Weather.aspx?location=USCA0746',
# pylint: disable=line-too-long
'http://www.shutterstock.com/cat.mhtml?searchterm=google&search_group=&lang=en&search_source=search_form',
'http://www.flickr.com/search/?q=monkeys&f=hp',
# pylint: disable=line-too-long
'http://www.flickr.com/photos/davidgutierrez/sets/72157604615916402/?page=3',
# pylint: disable=line-too-long
'http://www.flickr.com/photos/davidgutierrez/sets/72157604615916402/show/with/4403158307/',
'http://www.apple.com/iphone/',
'http://www.taobao.com/index_global.php',
'http://hootsuite.com/',
'http://www.android.com/',
'https://www.change.org/',
'http://www.nytimes.com/skimmer/#/Technology',
'http://www.glennbeck.com/',
'http://www.pengyou.com/mobile?from=loginAndroid',
'http://en.wikipedia.org/wiki/Cat',
'http://en.wikipedia.org/wiki/British_Royal_Family',
'http://9gag.com/gag/5202885',
'http://www.wowwiki.com/World_of_Warcraft:_Mists_of_Pandaria',
'http://twitter.github.com/bootstrap/',
# pylint: disable=line-too-long
'http://reviews.cnet.com/8301-13727_7-57431192-263/disable-elastic-scrolling-in-os-x/',
'http://mlb.com',
'http://thenounproject.com/zh-cn/',
'http://allrecipes.com/recipe/chicken-pot-pie-ix/',
'http://www.gamespot.com/',
'http://valleywag.com/',
# pylint: disable=line-too-long
'http://gawker.com/5939683/based-on-a-true-story-is-a-rotten-lie-i-hope-you-never-believe',
'http://www.imdb.com/title/tt0910970/',
'http://www.html5rocks.com/en/',
# pylint: disable=line-too-long
'http://athome.kimvallee.com/2010/03/why-to-splurge-on-a-high-end-dishwasher/',
('http://mlb.mlb.com/mlb/gameday/index.jsp?gid=2012_08_31_sfnmlb_chnmlb_1'
'&mode=wrap#gid=2012_08_31_sfnmlb_chnmlb_1&mode=box'),
'http://nytimes.com',
'http://arstechnica.com',
'http://pinterest.com',
'http://www.theregister.co.uk/',
'http://forum.xda-developers.com/',
'http://maps.google.com',
'https://www.google.com/search?num=10&hl=en&site=&tbm=isch&q=cats',
'http://code.google.com/p/chromium/issues/list',
('http://code.google.com/p/chromium/issues/detail?id=142038'
'&q=black%20screen%20amd&colspec=ID%20Pri%20Mstone%20ReleaseBlock%20OS'
'%20Area%20Feature%20Status%20Owner%20Summary'),
'http://mlb.mlb.com/index.jsp',
'http://www.nfl.com/',
'http://airbnb.github.com/infinity/demo-on.html',
'http://habrahabr.ru/post/149892/#habracut'
]
for url in urls_list:
self.AddStory(KeyDesktopSitesPage(url, self))
| bsd-3-clause |
adamfisk/littleshoot-client | server/common/appengine/patch/common/appenginepatch/ragendja/pyutils.py | 11 | 2758 | # -*- coding: utf-8 -*-
from django.utils._threading_local import local
def make_tls_property(default=None):
"""Creates a class-wide instance property with a thread-specific value."""
class TLSProperty(object):
def __init__(self):
self.local = local()
def __get__(self, instance, cls):
if not instance:
return self
return self.value
def __set__(self, instance, value):
self.value = value
def _get_value(self):
return getattr(self.local, 'value', default)
def _set_value(self, value):
self.local.value = value
value = property(_get_value, _set_value)
return TLSProperty()
def getattr_by_path(obj, attr, *default):
"""Like getattr(), but can go down a hierarchy like 'attr.subattr'"""
value = obj
for part in attr.split('.'):
if not hasattr(value, part) and len(default):
return default[0]
value = getattr(value, part)
if callable(value):
value = value()
return value
def subdict(data, *attrs):
"""Returns a subset of the keys of a dictionary."""
result = {}
result.update([(key, data[key]) for key in attrs])
return result
def equal_lists(left, right):
"""
Compares two lists and returs True if they contain the same elements, but
doesn't require that they have the same order.
"""
right = list(right)
if len(left) != len(right):
return False
for item in left:
if item in right:
del right[right.index(item)]
else:
return False
return True
def object_list_to_table(headings, dict_list):
"""
Converts objects to table-style list of rows with heading:
Example:
x.a = 1
x.b = 2
x.c = 3
y.a = 11
y.b = 12
y.c = 13
object_list_to_table(('a', 'b', 'c'), [x, y])
results in the following (dict keys reordered for better readability):
[
('a', 'b', 'c'),
(1, 2, 3),
(11, 12, 13),
]
"""
return [headings] + [tuple([getattr_by_path(row, heading, None)
for heading in headings])
for row in dict_list]
def dict_list_to_table(headings, dict_list):
"""
Converts dict to table-style list of rows with heading:
Example:
dict_list_to_table(('a', 'b', 'c'),
[{'a': 1, 'b': 2, 'c': 3}, {'a': 11, 'b': 12, 'c': 13}])
results in the following (dict keys reordered for better readability):
[
('a', 'b', 'c'),
(1, 2, 3),
(11, 12, 13),
]
"""
return [headings] + [tuple([row[heading] for heading in headings])
for row in dict_list]
| gpl-2.0 |
HyperBaton/ansible | lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py | 13 | 4706 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elasticache_subnet_group
version_added: "2.0"
short_description: manage ElastiCache subnet groups
description:
- Creates, modifies, and deletes ElastiCache subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
choices: [ 'present' , 'absent' ]
type: str
name:
description:
- Database subnet group identifier.
required: true
type: str
description:
description:
- ElastiCache subnet group description. Only set when a new group is added.
type: str
subnets:
description:
- List of subnet IDs that make up the ElastiCache subnet group.
type: list
elements: str
author: "Tim Mahoney (@timmahoney)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Add or change a subnet group
- elasticache_subnet_group:
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- elasticache_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto
from boto.elasticache import connect_to_region
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connection_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True),
description=dict(required=False),
subnets=dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg=str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
"""Get an elasticache connection"""
try:
conn = connect_to_region(region_name=region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=e.message)
try:
changed = False
exists = False
try:
matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError as e:
if e.error_code != 'CacheSubnetGroupNotFoundFault':
module.fail_json(msg=e.error_message)
if state == 'absent':
if exists:
conn.delete_cache_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
else:
changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError as e:
if e.error_message != 'No modifications were requested.':
module.fail_json(msg=e.error_message)
else:
changed = False
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
tobegit3hub/deep_cnn | java_predict_client/src/main/proto/tensorflow/python/training/moving_averages_test.py | 2 | 12596 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for moving_averages.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.training import moving_averages
class MovingAveragesTest(tf.test.TestCase):
def testAssignMovingAverage(self):
with self.test_session():
var = tf.Variable([10.0, 11.0])
val = tf.constant([1.0, 2.0], tf.float32)
decay = 0.25
assign = moving_averages.assign_moving_average(var, val, decay)
tf.global_variables_initializer().run()
self.assertAllClose([10.0, 11.0], var.eval())
assign.op.run()
self.assertAllClose([10.0 * 0.25 + 1.0 * (1.0 - 0.25),
11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
var.eval())
def testAssignMovingAverageWithZeroDebias(self):
with self.test_session():
var = tf.Variable([0.0, 0.0])
val = tf.constant([1.0, 2.0], tf.float32)
decay = 0.25
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=True)
tf.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], var.eval())
assign.op.run()
self.assertAllClose([1.0 * (1.0 - 0.25) / (1 - 0.25 ** 2),
2.0 * (1.0 - 0.25) / (1 - 0.25 ** 2)],
var.eval())
def testWeightedMovingAverage(self):
with self.test_session() as sess:
decay = 0.5
weight = tf.placeholder(tf.float32, [])
val = tf.placeholder(tf.float32, [])
wma = moving_averages.weighted_moving_average(val, decay, weight)
tf.global_variables_initializer().run()
# Get the first weighted moving average.
val_1 = 3.0
weight_1 = 4.0
wma_array = sess.run(
wma, feed_dict={val: val_1, weight: weight_1})
numerator_1 = val_1 * weight_1 * (1.0 - decay)
denominator_1 = weight_1 * (1.0 - decay)
self.assertAllClose(numerator_1 / denominator_1, wma_array)
# Get the second weighted moving average.
val_2 = 11.0
weight_2 = 22.0
wma_array = sess.run(
wma, feed_dict={val: val_2, weight: weight_2})
numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay)
denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay)
self.assertAllClose(numerator_2 / denominator_2, wma_array)
def _Repeat(value, dim):
if dim == 1:
return value
return [value] * dim
class ExponentialMovingAverageTest(tf.test.TestCase):
def _CheckDecay(self, ema, actual_decay, dim):
tens = _Repeat(10.0, dim)
thirties = _Repeat(30.0, dim)
var0 = tf.Variable(tens, name="v0")
var1 = tf.Variable(thirties, name="v1")
tf.global_variables_initializer().run()
# Note that tensor2 is not a Variable but just a plain Tensor resulting
# from the sum operation.
tensor2 = var0 + var1
update = ema.apply([var0, var1, tensor2])
avg0 = ema.average(var0)
avg1 = ema.average(var1)
avg2 = ema.average(tensor2)
self.assertItemsEqual([var0, var1], tf.moving_average_variables())
self.assertFalse(avg0 in tf.trainable_variables())
self.assertFalse(avg1 in tf.trainable_variables())
self.assertFalse(avg2 in tf.trainable_variables())
tf.global_variables_initializer().run()
self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
self.assertEqual("add/ExponentialMovingAverage:0", avg2.name)
# Check initial values.
self.assertAllClose(tens, var0.eval())
self.assertAllClose(thirties, var1.eval())
self.assertAllClose(_Repeat(10.0 + 30.0, dim), tensor2.eval())
# Check that averages are initialized correctly.
self.assertAllClose(tens, avg0.eval())
self.assertAllClose(thirties, avg1.eval())
# Note that averages of Tensor's initialize to zeros_like since no value
# of the Tensor is known because the Op has not been run (yet).
self.assertAllClose(_Repeat(0.0, dim), avg2.eval())
# Update the averages and check.
update.run()
dk = actual_decay
expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)
self.assertAllClose(expected, avg0.eval())
expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)
self.assertAllClose(expected, avg1.eval())
expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk), dim)
self.assertAllClose(expected, avg2.eval())
# Again, update the averages and check.
update.run()
expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),
dim)
self.assertAllClose(expected, avg0.eval())
expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),
dim)
self.assertAllClose(expected, avg1.eval())
expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +
(10.0 + 30.0) * (1 - dk)),
dim)
self.assertAllClose(expected, avg2.eval())
def testAverageVariablesNoNumUpdates_Scalar(self):
with self.test_session():
ema = tf.train.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=1)
def testAverageVariablesNoNumUpdates_Vector(self):
with self.test_session():
ema = tf.train.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=5)
def testAverageVariablesNumUpdates_Scalar(self):
with self.test_session():
# With num_updates 1, the decay applied is 0.1818
ema = tf.train.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=1)
def testAverageVariablesNumUpdates_Vector(self):
with self.test_session():
# With num_updates 1, the decay applied is 0.1818
ema = tf.train.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=5)
def testAverageVariablesWithControlDeps(self):
with self.test_session() as sess:
v0 = tf.Variable(0, name="v0")
add_to_v0 = v0.assign_add(1)
v1 = tf.Variable([10.0], name="v1")
assign_to_v1 = v1.assign([20.0])
ema = tf.train.ExponentialMovingAverage(0.25)
with tf.control_dependencies([add_to_v0]):
ema_op = ema.apply([v1])
# the moving average of v1 should not have any control inputs
v1_avg = ema.average(v1)
self.assertEqual([], v1_avg.initializer.control_inputs)
self.assertEqual([], v1_avg.value().op.control_inputs)
self.assertEqual([], v1_avg.ref().op.control_inputs)
# We should be able to initialize v1_avg before v0.
sess.run(v1_avg.initializer)
sess.run(v0.initializer)
self.assertEqual([10.0], sess.run(v1_avg))
# running ema_op should add to v0 (in addition to updating v1_avg)
sess.run(assign_to_v1)
sess.run(ema_op)
self.assertEqual(1, sess.run(v0))
self.assertEqual([17.5], sess.run(v1_avg))
def testAverageVariablesNames(self):
with self.test_session():
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = tf.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
ema = tf.train.ExponentialMovingAverage(0.25, name="foo_avg")
self.assertEqual("v0/foo_avg", ema.average_name(v0))
self.assertEqual("v1/foo_avg", ema.average_name(v1))
self.assertEqual("add/foo_avg", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
vars_to_restore = ema.variables_to_restore()
# vars_to_restore should contain the following:
# {v0/foo_avg : v0,
# v1/foo_avg : v1,
# add/foo_avg : add/foo_avg
# v2 : v2}
self.assertEqual(sorted(vars_to_restore.keys()),
sorted([ema.average_name(v0),
ema.average_name(v1),
ema.average_name(tensor2),
v2.op.name]))
self.assertEqual(ema.average_name(v0), ema.average(v0).op.name)
self.assertEqual(ema.average_name(v1), ema.average(v1).op.name)
self.assertEqual(ema.average_name(tensor2), ema.average(tensor2).op.name)
def testAverageVariablesNamesRespectScope(self):
# See discussion on #2740.
with self.test_session():
with tf.variable_scope("scope1"):
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = tf.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
with tf.variable_scope("scope2"):
ema = tf.train.ExponentialMovingAverage(0.25, name="foo_avg")
self.assertEqual("scope2/scope1/v0/foo_avg", ema.average_name(v0))
self.assertEqual("scope2/scope1/v1/foo_avg", ema.average_name(v1))
self.assertEqual("scope2/scope1/add/foo_avg", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
vars_to_restore = ema.variables_to_restore()
# vars_to_restore should contain the following:
# {scope2/scope1/v0/foo_avg : v0,
# scope2/scope1/v1/foo_avg : v1,
# scope2/scope1/add/foo_avg : add/foo_avg
# scope1/v2 : v2}
self.assertEqual(sorted(vars_to_restore.keys()),
sorted([ema.average_name(v0),
ema.average_name(v1),
ema.average_name(tensor2),
v2.op.name]))
self.assertEqual(ema.average_name(v0), ema.average(v0).op.name)
self.assertEqual(ema.average_name(v1), ema.average(v1).op.name)
self.assertEqual(ema.average_name(tensor2),
ema.average(tensor2).op.name)
def testSubsetAverageVariablesNames(self):
with self.test_session():
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = tf.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
ema = tf.train.ExponentialMovingAverage(0.25, name="foo_avg")
self.assertEqual("v0/foo_avg", ema.average_name(v0))
self.assertEqual("v1/foo_avg", ema.average_name(v1))
self.assertEqual("add/foo_avg", ema.average_name(tensor2))
vars_to_restore = ema.variables_to_restore([v0, tensor2])
# vars_to_restore should contain the following:
# {v0/foo_avg : v0,
# add/foo_avg : add
# v1 : v1,
# v2 : v2}
self.assertEqual(sorted(vars_to_restore.keys()),
sorted([ema.average_name(v0),
ema.average_name(tensor2),
v1.op.name,
v2.op.name]))
ema.apply([v0, v1, tensor2])
self.assertEqual(ema.average_name(v0), ema.average(v0).op.name)
self.assertEqual(ema.average_name(v1), ema.average(v1).op.name)
self.assertEqual(ema.average_name(tensor2), ema.average(tensor2).op.name)
def testAverageVariablesDeviceAssignment(self):
with tf.device("/job:dev_v0"):
v0 = tf.Variable(10.0, name="v0")
with tf.device("/job:dev_v1"):
v1 = gen_state_ops._variable(shape=[1], dtype=tf.float32,
name="v1", container="", shared_name="")
v1.set_shape([1])
tensor2 = v0 + v1
ema = tf.train.ExponentialMovingAverage(0.25, name="foo_avg")
with tf.device("/job:default"):
ema.apply([v0, v1, tensor2])
self.assertDeviceEqual("/job:dev_v0", ema.average(v0).device)
self.assertDeviceEqual("/job:dev_v1", ema.average(v1).device)
# However, the colocation property is maintained.
self.assertEqual([b"loc:@v1"],
ema.average(v1).op.colocation_groups())
self.assertDeviceEqual("/job:default", ema.average(tensor2).device)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
bjwbell/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/response.py | 114 | 14756 | from collections import OrderedDict
from datetime import datetime, timedelta
import Cookie
import json
import types
import uuid
import socket
from constants import response_codes
from logger import get_logger
missing = object()
class Response(object):
"""Object representing the response to a HTTP request
:param handler: RequestHandler being used for this response
:param request: Request that this is the response for
.. attribute:: request
Request associated with this Response.
.. attribute:: encoding
The encoding to use when converting unicode to strings for output.
.. attribute:: add_required_headers
Boolean indicating whether mandatory headers should be added to the
response.
.. attribute:: send_body_for_head_request
Boolean, default False, indicating whether the body content should be
sent when the request method is HEAD.
.. attribute:: explicit_flush
Boolean indicating whether output should be flushed automatically or only
when requested.
.. attribute:: writer
The ResponseWriter for this response
.. attribute:: status
Status tuple (code, message). Can be set to an integer, in which case the
message part is filled in automatically, or a tuple.
.. attribute:: headers
List of HTTP headers to send with the response. Each item in the list is a
tuple of (name, value).
.. attribute:: content
The body of the response. This can either be a string or a iterable of response
parts. If it is an iterable, any item may be a string or a function of zero
parameters which, when called, returns a string."""
def __init__(self, handler, request):
self.request = request
self.encoding = "utf8"
self.add_required_headers = True
self.send_body_for_head_request = False
self.explicit_flush = False
self.close_connection = False
self.writer = ResponseWriter(handler, self)
self._status = (200, None)
self.headers = ResponseHeaders()
self.content = []
self.logger = get_logger()
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if hasattr(value, "__len__"):
if len(value) != 2:
raise ValueError
else:
self._status = (int(value[0]), str(value[1]))
else:
self._status = (int(value), None)
def set_cookie(self, name, value, path="/", domain=None, max_age=None,
expires=None, secure=False, httponly=False, comment=None):
"""Set a cookie to be sent with a Set-Cookie header in the
response
:param name: String name of the cookie
:param value: String value of the cookie
:param max_age: datetime.timedelta int representing the time (in seconds)
until the cookie expires
:param path: String path to which the cookie applies
:param domain: String domain to which the cookie applies
:param secure: Boolean indicating whether the cookie is marked as secure
:param httponly: Boolean indicating whether the cookie is marked as
HTTP Only
:param comment: String comment
:param expires: datetime.datetime or datetime.timedelta indicating a
time or interval from now when the cookie expires
"""
days = dict((i+1, name) for i, name in enumerate(["jan", "feb", "mar",
"apr", "may", "jun",
"jul", "aug", "sep",
"oct", "nov", "dec"]))
if value is None:
value = ''
max_age = 0
expires = timedelta(days=-1)
if isinstance(expires, timedelta):
expires = datetime.utcnow() + expires
if expires is not None:
expires_str = expires.strftime("%d %%s %Y %H:%M:%S GMT")
expires_str = expires_str % days[expires.month]
expires = expires_str
if max_age is not None:
if hasattr(max_age, "total_seconds"):
max_age = int(max_age.total_seconds())
max_age = "%.0d" % max_age
m = Cookie.Morsel()
def maybe_set(key, value):
if value is not None and value is not False:
m[key] = value
m.set(name, value, value)
maybe_set("path", path)
maybe_set("domain", domain)
maybe_set("comment", comment)
maybe_set("expires", expires)
maybe_set("max-age", max_age)
maybe_set("secure", secure)
maybe_set("httponly", httponly)
self.headers.append("Set-Cookie", m.OutputString())
def unset_cookie(self, name):
"""Remove a cookie from those that are being sent with the response"""
cookies = self.headers.get("Set-Cookie")
parser = Cookie.BaseCookie()
for cookie in cookies:
parser.load(cookie)
if name in parser.keys():
del self.headers["Set-Cookie"]
for m in parser.values():
if m.key != name:
self.headers.append(("Set-Cookie", m.OutputString()))
def delete_cookie(self, name, path="/", domain=None):
"""Delete a cookie on the client by setting it to the empty string
and to expire in the past"""
self.set_cookie(name, None, path=path, domain=domain, max_age=0,
expires=timedelta(days=-1))
def iter_content(self):
"""Iterator returning chunks of response body content.
If any part of the content is a function, this will be called
and the resulting value (if any) returned."""
if type(self.content) in types.StringTypes:
yield self.content
else:
for item in self.content:
if hasattr(item, "__call__"):
value = item()
else:
value = item
if value:
yield value
def write_status_headers(self):
"""Write out the status line and headers for the response"""
self.writer.write_status(*self.status)
for item in self.headers:
self.writer.write_header(*item)
self.writer.end_headers()
def write_content(self):
"""Write out the response content"""
if self.request.method != "HEAD" or self.send_body_for_head_request:
for item in self.iter_content():
self.writer.write_content(item)
def write(self):
"""Write the whole response"""
self.write_status_headers()
self.write_content()
def set_error(self, code, message=""):
"""Set the response status headers and body to indicate an
error"""
err = {"code": code,
"message": message}
data = json.dumps({"error": err})
self.status = code
self.headers = [("Content-Type", "application/json"),
("Content-Length", len(data))]
self.content = data
if code == 500:
self.logger.error(message)
class MultipartContent(object):
def __init__(self, boundary=None, default_content_type=None):
self.items = []
if boundary is None:
boundary = str(uuid.uuid4())
self.boundary = boundary
self.default_content_type = default_content_type
def __call__(self):
boundary = "--" + self.boundary
rv = ["", boundary]
for item in self.items:
rv.append(str(item))
rv.append(boundary)
rv[-1] += "--"
return "\r\n".join(rv)
def append_part(self, data, content_type=None, headers=None):
if content_type is None:
content_type = self.default_content_type
self.items.append(MultipartPart(data, content_type, headers))
def __iter__(self):
#This is hackish; when writing the response we need an iterable
#or a string. For a multipart/byterange response we want an
#iterable that contains a single callable; the MultipartContent
#object itself
yield self
class MultipartPart(object):
def __init__(self, data, content_type=None, headers=None):
self.headers = ResponseHeaders()
if content_type is not None:
self.headers.set("Content-Type", content_type)
if headers is not None:
for name, value in headers:
if name.lower() == "content-type":
func = self.headers.set
else:
func = self.headers.append
func(name, value)
self.data = data
def __str__(self):
rv = []
for item in self.headers:
rv.append("%s: %s" % item)
rv.append("")
rv.append(self.data)
return "\r\n".join(rv)
class ResponseHeaders(object):
"""Dictionary-like object holding the headers for the response"""
def __init__(self):
self.data = OrderedDict()
def set(self, key, value):
"""Set a header to a specific value, overwriting any previous header
with the same name
:param key: Name of the header to set
:param value: Value to set the header to
"""
self.data[key.lower()] = (key, [value])
def append(self, key, value):
"""Add a new header with a given name, not overwriting any existing
headers with the same name
:param key: Name of the header to add
:param value: Value to set for the header
"""
if key.lower() in self.data:
self.data[key.lower()][1].append(value)
else:
self.set(key, value)
def get(self, key, default=missing):
"""Get the set values for a particular header."""
try:
return self[key]
except KeyError:
if default is missing:
return []
return default
def __getitem__(self, key):
"""Get a list of values for a particular header
"""
return self.data[key.lower()][1]
def __delitem__(self, key):
del self.data[key.lower()]
def __contains__(self, key):
return key.lower() in self.data
def __setitem__(self, key, value):
self.set(key, value)
def __iter__(self):
for key, values in self.data.itervalues():
for value in values:
yield key, value
def items(self):
return list(self)
def update(self, items_iter):
for name, value in items_iter:
self.set(name, value)
def __repr__(self):
return repr(self.data)
class ResponseWriter(object):
"""Object providing an API to write out a HTTP response.
:param handler: The RequestHandler being used.
:param response: The Response associated with this writer.
After each part of the response is written, the output is
flushed unless response.explicit_flush is False, in which case
the user must call .flush() explicitly."""
def __init__(self, handler, response):
self._wfile = handler.wfile
self._response = response
self._handler = handler
self._headers_seen = set()
self._headers_complete = False
self.content_written = False
self.request = response.request
def write_status(self, code, message=None):
"""Write out the status line of a response.
:param code: The integer status code of the response.
:param message: The message of the response. Defaults to the message commonly used
with the status code."""
if message is None:
if code in response_codes:
message = response_codes[code][0]
else:
message = ''
self.write("%s %d %s\r\n" %
(self._response.request.protocol_version, code, message))
def write_header(self, name, value):
"""Write out a single header for the response.
:param name: Name of the header field
:param value: Value of the header field
"""
self._headers_seen.add(name.lower())
self.write("%s: %s\r\n" % (name, value))
if not self._response.explicit_flush:
self.flush()
def write_default_headers(self):
for name, f in [("Server", self._handler.version_string),
("Date", self._handler.date_time_string)]:
if name.lower() not in self._headers_seen:
self.write_header(name, f())
if (type(self._response.content) in (str, unicode) and
"content-length" not in self._headers_seen):
#Would be nice to avoid double-encoding here
self.write_header("Content-Length", len(self.encode(self._response.content)))
def end_headers(self):
"""Finish writing headers and write the separator.
Unless add_required_headers on the response is False,
this will also add HTTP-mandated headers that have not yet been supplied
to the response headers"""
if self._response.add_required_headers:
self.write_default_headers()
self.write("\r\n")
if "content-length" not in self._headers_seen:
self._response.close_connection = True
if not self._response.explicit_flush:
self.flush()
self._headers_complete = True
def write_content(self, data):
"""Write the body of the response."""
self.write(self.encode(data))
if not self._response.explicit_flush:
self.flush()
def write(self, data):
"""Write directly to the response, converting unicode to bytes
according to response.encoding. Does not flush."""
self.content_written = True
try:
self._wfile.write(self.encode(data))
except socket.error:
# This can happen if the socket got closed by the remote end
pass
def encode(self, data):
"""Convert unicode to bytes according to response.encoding."""
if isinstance(data, str):
return data
elif isinstance(data, unicode):
return data.encode(self._response.encoding)
else:
raise ValueError
def flush(self):
"""Flush the output."""
try:
self._wfile.flush()
except socket.error:
# This can happen if the socket got closed by the remote end
pass
| mpl-2.0 |
ForNeVeR/styx-miranda | styx-client/protobuf-2.5.0/python/google/protobuf/internal/message_factory_test.py | 213 | 4869 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.message_factory."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
import unittest
from google.protobuf import descriptor_pb2
from google.protobuf.internal import factory_test1_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
class MessageFactoryTest(unittest.TestCase):
def setUp(self):
self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test1_pb2.DESCRIPTOR.serialized_pb)
self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
def _ExerciseDynamicClass(self, cls):
msg = cls()
msg.mandatory = 42
msg.nested_factory_2_enum = 0
msg.nested_factory_2_message.value = 'nested message value'
msg.factory_1_message.factory_1_enum = 1
msg.factory_1_message.nested_factory_1_enum = 0
msg.factory_1_message.nested_factory_1_message.value = (
'nested message value')
msg.factory_1_message.scalar_value = 22
msg.factory_1_message.list_value.extend(['one', 'two', 'three'])
msg.factory_1_message.list_value.append('four')
msg.factory_1_enum = 1
msg.nested_factory_1_enum = 0
msg.nested_factory_1_message.value = 'nested message value'
msg.circular_message.mandatory = 1
msg.circular_message.circular_message.mandatory = 2
msg.circular_message.scalar_value = 'one deep'
msg.scalar_value = 'zero deep'
msg.list_value.extend(['four', 'three', 'two'])
msg.list_value.append('one')
msg.grouped.add()
msg.grouped[0].part_1 = 'hello'
msg.grouped[0].part_2 = 'world'
msg.grouped.add(part_1='testing', part_2='123')
msg.loop.loop.mandatory = 2
msg.loop.loop.loop.loop.mandatory = 4
serialized = msg.SerializeToString()
converted = factory_test2_pb2.Factory2Message.FromString(serialized)
reserialized = converted.SerializeToString()
self.assertEquals(serialized, reserialized)
result = cls.FromString(reserialized)
self.assertEquals(msg, result)
def testGetPrototype(self):
db = descriptor_database.DescriptorDatabase()
pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
factory = message_factory.MessageFactory()
cls = factory.GetPrototype(pool.FindMessageTypeByName(
'net.proto2.python.internal.Factory2Message'))
self.assertIsNot(cls, factory_test2_pb2.Factory2Message)
self._ExerciseDynamicClass(cls)
cls2 = factory.GetPrototype(pool.FindMessageTypeByName(
'net.proto2.python.internal.Factory2Message'))
self.assertIs(cls, cls2)
def testGetMessages(self):
messages = message_factory.GetMessages([self.factory_test2_fd,
self.factory_test1_fd])
self.assertContainsSubset(
['net.proto2.python.internal.Factory2Message',
'net.proto2.python.internal.Factory1Message'],
messages.keys())
self._ExerciseDynamicClass(
messages['net.proto2.python.internal.Factory2Message'])
if __name__ == '__main__':
unittest.main()
| mit |
DCSaunders/tensorflow | tensorflow/contrib/session_bundle/gc_test.py | 24 | 4527 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for session_bundle.gc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
def tearDownModule():
gfile.DeleteRecursively(tf.test.get_temp_dir())
class GcTest(test_util.TensorFlowTestCase):
def testLargestExportVersions(self):
paths = [gc.Path("/foo", 8), gc.Path("/foo", 9), gc.Path("/foo", 10)]
newest = gc.largest_export_versions(2)
n = newest(paths)
self.assertEquals(n, [gc.Path("/foo", 9), gc.Path("/foo", 10)])
def testLargestExportVersionsDoesNotDeleteZeroFolder(self):
paths = [gc.Path("/foo", 0), gc.Path("/foo", 3)]
newest = gc.largest_export_versions(2)
n = newest(paths)
self.assertEquals(n, [gc.Path("/foo", 0), gc.Path("/foo", 3)])
def testModExportVersion(self):
paths = [gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6),
gc.Path("/foo", 9)]
mod = gc.mod_export_version(2)
self.assertEquals(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 6)])
mod = gc.mod_export_version(3)
self.assertEquals(mod(paths), [gc.Path("/foo", 6), gc.Path("/foo", 9)])
def testOneOfEveryNExportVersions(self):
paths = [gc.Path("/foo", 0), gc.Path("/foo", 1), gc.Path("/foo", 3),
gc.Path("/foo", 5), gc.Path("/foo", 6), gc.Path("/foo", 7),
gc.Path("/foo", 8), gc.Path("/foo", 33)]
one_of = gc.one_of_every_n_export_versions(3)
self.assertEquals(one_of(paths),
[gc.Path("/foo", 3), gc.Path("/foo", 6),
gc.Path("/foo", 8), gc.Path("/foo", 33)])
def testOneOfEveryNExportVersionsZero(self):
# Zero is a special case since it gets rolled into the first interval.
# Test that here.
paths = [gc.Path("/foo", 0), gc.Path("/foo", 4), gc.Path("/foo", 5)]
one_of = gc.one_of_every_n_export_versions(3)
self.assertEquals(one_of(paths),
[gc.Path("/foo", 0), gc.Path("/foo", 5)])
def testUnion(self):
paths = []
for i in xrange(10):
paths.append(gc.Path("/foo", i))
f = gc.union(gc.largest_export_versions(3), gc.mod_export_version(3))
self.assertEquals(
f(paths), [gc.Path("/foo", 0), gc.Path("/foo", 3),
gc.Path("/foo", 6), gc.Path("/foo", 7),
gc.Path("/foo", 8), gc.Path("/foo", 9)])
def testNegation(self):
paths = [gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6),
gc.Path("/foo", 9)]
mod = gc.negation(gc.mod_export_version(2))
self.assertEquals(
mod(paths), [gc.Path("/foo", 5), gc.Path("/foo", 9)])
mod = gc.negation(gc.mod_export_version(3))
self.assertEquals(
mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 5)])
def testPathsWithParse(self):
base_dir = os.path.join(tf.test.get_temp_dir(), "paths_parse")
self.assertFalse(gfile.Exists(base_dir))
for p in xrange(3):
gfile.MakeDirs(os.path.join(base_dir, "%d" % p))
# add a base_directory to ignore
gfile.MakeDirs(os.path.join(base_dir, "ignore"))
# create a simple parser that pulls the export_version from the directory.
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
self.assertEquals(
gc.get_paths(base_dir, parser=parser),
[gc.Path(os.path.join(base_dir, "0"), 0),
gc.Path(os.path.join(base_dir, "1"), 1),
gc.Path(os.path.join(base_dir, "2"), 2)])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
TNick/pyl2extra | pyl2extra/datasets/img_dataset/tests/test_generators.py | 1 | 2602 | """
Tests for adjusters.
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "nicu.tofan@gmail.com"
import functools
import unittest
from pyl2extra.datasets.img_dataset.generators import (InlineGen,
ThreadedGen,
ProcessGen,
gen_from_string)
class TestInlineGen(unittest.TestCase):
"""
Tests for InlineGen.
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
self.testee = InlineGen()
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
del self.testee
def test_is_inline(self):
"""
Check the transf_count() for InlineGen
"""
instloc = InlineGen()
self.assertTrue(instloc.is_inline())
class TestThreadedGen(unittest.TestCase):
"""
Tests for ThreadedGen.
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
self.testee = ThreadedGen()
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
del self.testee
def test_is_inline(self):
"""
Check the transf_count() for ThreadedGen
"""
instloc = ThreadedGen()
self.assertFalse(instloc.is_inline())
class TestProcessGen(unittest.TestCase):
"""
Tests for ProcessGen.
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
self.testee = ProcessGen()
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
del self.testee
def test_is_inline(self):
"""
Check the transf_count() for ProcessGen
"""
instloc = ProcessGen()
self.assertFalse(instloc.is_inline())
class TestGenFromString(unittest.TestCase):
"""
Tests for gen_from_string().
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
pass
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
pass
def test_simple(self):
"""
Create generators via gen_from_string().
"""
adj = gen_from_string('inline')
self.assertIsInstance(adj, InlineGen)
adj = gen_from_string('threads')
self.assertIsInstance(adj, ThreadedGen)
adj = gen_from_string('process')
self.assertIsInstance(adj, ProcessGen)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
massot/odoo | setup/win32/win32_service.py | 362 | 1825 | # -*- coding: utf-8 -*-
import servicemanager
import win32api
import win32process
import win32service
import win32serviceutil
import subprocess
import sys
from os.path import dirname, join, split
execfile(join(dirname(__file__), '..', 'server', 'openerp', 'release.py'))
class OdooService(win32serviceutil.ServiceFramework):
_svc_name_ = nt_service_name
_svc_display_name_ = "%s %s" % (nt_service_name, serie)
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.odooprocess = None # Reference to the server's process
def SvcStop(self):
# Before we do anything, tell the SCM we are starting the stop process.
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# Stop the running Odoo: say it's a normal exit
win32api.TerminateProcess(int(self.odooprocess._handle), 0)
servicemanager.LogInfoMsg("Odoo stopped correctly")
def SvcDoRun(self):
# We start Odoo as an independent process, but we keep its handle
service_dir = dirname(sys.argv[0])
server_dir = split(service_dir)[0]
server_path = join(server_dir, 'server', 'openerp-server.exe')
self.odooprocess = subprocess.Popen(
[server_path], cwd=server_dir, creationflags=win32process.CREATE_NO_WINDOW
)
servicemanager.LogInfoMsg('Odoo up and running')
# exit with same exit code as Odoo process
sys.exit(self.odooprocess.wait())
def option_handler(opts):
# configure the service to auto restart on failures...
subprocess.call([
'sc', 'failure', nt_service_name, 'reset=', '0', 'actions=', 'restart/0/restart/0/restart/0'
])
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(OdooService, customOptionHandler=option_handler)
| agpl-3.0 |
hyperized/ansible | lib/ansible/modules/network/fortios/fortios_router_access_list.py | 13 | 12427 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_access_list
short_description: Configure access lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and access_list category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
router_access_list:
description:
- Configure access lists.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
comments:
description:
- Comment.
type: str
name:
description:
- Name.
required: true
type: str
rule:
description:
- Rule.
type: list
suboptions:
action:
description:
- Permit or deny this IP address and netmask prefix.
type: str
choices:
- permit
- deny
exact_match:
description:
- Enable/disable exact match.
type: str
choices:
- enable
- disable
flags:
description:
- Flags.
type: int
id:
description:
- Rule ID.
required: true
type: int
prefix:
description:
- IPv4 prefix to define regular filter criteria, such as "any" or subnets.
type: str
wildcard:
description:
- Wildcard to define Cisco-style wildcard filter criteria.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure access lists.
fortios_router_access_list:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
router_access_list:
comments: "<your_own_value>"
name: "default_name_4"
rule:
-
action: "permit"
exact_match: "enable"
flags: "8"
id: "9"
prefix: "<your_own_value>"
wildcard: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_access_list_data(json):
option_list = ['comments', 'name', 'rule']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_access_list(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['router_access_list'] and data['router_access_list']:
state = data['router_access_list']['state']
else:
state = True
router_access_list_data = data['router_access_list']
filtered_data = underscore_to_hyphen(filter_router_access_list_data(router_access_list_data))
if state == "present":
return fos.set('router',
'access-list',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('router',
'access-list',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_access_list']:
resp = router_access_list(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"router_access_list": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"comments": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"rule": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["permit", "deny"]},
"exact_match": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"flags": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"},
"wildcard": {"required": False, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/View.py | 37 | 15803 | #._cv_part guppy.heapy.View
class Horizon:
def __init__(self, mod):
self.mod = mod
self._hiding_tag_ = mod._hiding_tag_
# Make preallocations of things that will be needed for news()
self.retset = self.mod.retset
self.hv = mod.hv
self.exc_info = self.mod._root.sys.exc_info
self.iso = self.mod.iso
str(self.retset(self.iso(1,[],(),{}, self.__dict__)) -
self.iso(()))
mod.hv.heap
mod.enter
mod.gc.collect()
self.hv_horizon = mod.heapyc.Horizon(self.hv)
def news(self):
r = self.retset(self.hv_horizon.news(self.mod.enter(self.hv.heap)))
return r
class ClearCallback(object):
__slots__ = 'callback',
def __init__(self, callback):
self.callback = callback
def __call__(self, wr):
if self.callback is not None:
self.callback(wr)
else:
print 'No callback'
class Gchook_type(object):
__slots__ = 'x', '__weakref__', 'cb'
def __init__(g):
g.x = g
class ObservationList(list):
__slots__ = '_hiding_tag_',
def __init__(self, iterable, hiding_tag):
list.__init__(self, iterable)
self._hiding_tag_ = hiding_tag
class _GLUECLAMP_:
_imports_ = (
'_parent.ImpSet:immnodeset',
'_parent.ImpSet:immnodeset_union',
'_parent.ImpSet:mutnodeset',
'_parent.ImpSet:NodeSet',
'_parent.UniSet:nodeset_adapt',
'_parent.UniSet:retset',
'_parent.Use:idset',
'_parent.Use:iso',
'_parent.Use:Type',
'_root:gc',
'_root:types',
)
_chgable_ = ('is_rg_update_all', 'referrers_lock', '_is_clear_drg_enabled')
_setable_ = ('_hiding_tag_','target', 'is_hiding_calling_interpreter',
)
is_hiding_calling_interpreter = False
is_rg_update_all = False
_is_clear_drg_enabled = 1 # Flag mainly for test, Note Apr 19 2005
_hiding_tag_ = []
#opt_rg_update_all = True
_uniset_exports = (
# 'dominos',
# 'domisize',
'imdom',
# 'indisize',
# 'referents',
# 'referrers',
'referrers_gc',
)
def _get__clear_hook(self):
return self.mutnodeset()
def clear_check(self):
ch = self._clear_hook
try:
wr = list(ch)[0]
except IndexError:
self.clear_setup()
else:
c = wr()
if c is None:
self.clear_setup()
elif self._root.sys.getrefcount(c) > 3:
print 'GC hook object was referred to from somebody!'
self.clear_callback(wr)
c.cb.callback = None
def clear_callback(self, wr):
# print 'clear callback'
self._clear_hook.clear()
for m in self.clear_methods:
m()
self.clear_setup()
def clear_setup(self):
ch = self._clear_hook
ch.clear()
c=self.gchook_type()
cb = self.ClearCallback(self.clear_callback)
c.cb = cb
ch.add(self._root.weakref.ref(c, cb))
def _get_clear_methods(self):
return []
def clear_register_method(self, m):
self.clear_methods.append(m)
self.clear_check()
def _get_dict_ownership(self):
drg = self.nodegraph()
def clear_drg():
# print 'clear_drg?'
if drg.is_sorted and self._is_clear_drg_enabled:
# print 'yes'
drg.clear()
else:
# print 'no, enabled = ', self.is_clear_drg_enabled
pass
self.clear_register_method(clear_drg)
return drg
def _get_gchook_type(self):
return Gchook_type
def _get_heapdef_modules(self):
# We touch self.heapyc to import it & its dependent guppy.sets;
# this is kinda specialcase-hacky but see Notes Apr 8 2005.
self.heapyc
return self.target.sys.modules.items()
def _get_heapdefs(self):
heapdefs = []
for n, m in self.heapdef_modules:
try:
hd = getattr(m, '_NyHeapDefs_')
except:
continue
heapdefs.append(hd)
return tuple(heapdefs)
def _get_heapyc(self): return self._parent.heapyc
def _get_hv(self):
hv = self.new_hv(_hiding_tag_=self._hiding_tag_,
is_hiding_calling_interpreter = self.is_hiding_calling_interpreter)
return hv
def _get_norefer(self): return self.mutnodeset()
def _get_referrers_targets(self): return []
def _get_rg(self):
rg = self.nodegraph()
self.clear_register_method(self._clear_rg)
return rg
def _clear_rg(self):
if self.referrers_lock:
return
rg = self.rg
if rg.is_sorted:
#print 'clearing', rg
rg.clear()
self.norefer.clear()
else:
#print 'no clear', rg, len(rg), len(self.norefer)
pass
def _get_referrers_lock(self) : return 0
def _get_root(self): return self.heapyc.RootState
def _get_target(self): return self._parent.Target.Target()
def _set_root(self, root):
self.clear_retainers()
self.hv.root = root
def call_with_referrers(self, X, f):
self.referrers_lock += 1
try:
self.update_referrers(X)
return f(X)
finally:
self.referrers_lock -= 1
def clear_retainers(self):
"""G.clear_retainers()
Clear the retainer graph V.rg.
"""
self.rg.clear()
self.norefer.clear()
def dominos(self, X):
"""dominos(X) -> idset
Return the dominos of a set of objects X. The dominos of X is the set
of objects that are dominated by X, which is the objects that will become
deallocated, directly or indirectly, when the objects in X are deallocated."""
return self.dominos_tuple((X,))[0]
def dominos_tuple(self, X):
"""V.dominos_tuple(X) -> tuple of idsets
Return a tuple of dominos for the tuple of sets of objects X."""
D_ = [self.nodeset_adapt(x) for x in X] # Convert to naming like in the appendix
T = self.hv.reachable
S = self.immnodeset([self.root])
D = self.immnodeset_union(D_)
W = T(S, D)
return tuple([self.retset(T(Di, W) - T(D, W | Di)) for Di in D_])
def domisize(self, X):
"""domisize(X) -> int
Return the dominated size of a set of objects X. The dominated size of X
is the total size of memory that will become deallocated, directly or
indirectly, when the objects in X are deallocated. See also: indisize."""
return self.domisize_tuple((X,))[0]
def domisize_tuple(self, X):
""""V.domisize_tuple(X) -> tuple of ints
Return a tuple of dominated sizes for the tuple of sets of objects X."""
return tuple([self.indisize(dominos_i)
for dominos_i in self.dominos_tuple(X)])
def enter(self, func):
if self.hv.is_hiding_calling_interpreter:
self.hv.limitframe = None
elif self.hv.limitframe is not None:
return func()
else:
import sys
try:
1/0
except:
type, value, traceback = sys.exc_info()
limitframe = traceback.tb_frame.f_back.f_back
sys.last_traceback=None
sys.exc_clear()
del type,value,traceback
self.hv.limitframe = limitframe
try:
retval = func()
finally:
self.hv.limitframe = None
return retval
def gchook(self, func):
c=self.gchook_type()
ho = self.mutnodeset()
def cb(wr):
func()
ho.clear()
c=self.gchook_type()
ho.add(self._root.weakref.ref(c, cb))
ho.add(self._root.weakref.ref(c, cb))
return self.mutnodeset([ho])
def heapg(self, rma=1):
# Almost the same as gc.get_objects(),
# except:
# 1. calls gc.collect() first (twice)
# 2. removes objects of type gchook
# 3. removes objects of type ClearCallback
# 4. removes all objects of type types.FrameType
# 5. removes all objects of weakref type
# 6. If rma = 1,
# removes all that is in the reachable heap
# except what is in the set itself.
# . wraps the result in an IdSet
self.gc.collect()
self.gc.collect()
objs = self.gc.get_objects()
cli = self.hv.cli_type()
objs = cli.select(objs, self.gchook_type, '!=')
objs = cli.select(objs, ClearCallback, '!=')
objs = cli.select(objs, self._root.types.FrameType, '!=')
objs = cli.select(objs, self._root.weakref.ReferenceType, '!=')
r = self.retset(objs)
del cli, objs
if rma:
r = (r - self.idset(self.heapyc.HeapView(
self.heapyc.RootState,
self.heapdefs
).reachable_x(
self.immnodeset([self.heapyc.RootState]),
self.observation_containers()
))
)
return r
def heapu(self, rma=1):
self.gc.collect()
self.gc.collect()
r = self.gc.get_objects()
exclude = (self.Type(self.gchook_type) |
self.Type(ClearCallback)
)
if rma:
exclude |= self.idset(self.heapyc.HeapView(
self.heapyc.RootState,
self.heapdefs
).reachable_x(
self.immnodeset([self.heapyc.RootState]),
self.immnodeset([r])
))
r = self.retset(r) - exclude
ref = r.referents - exclude
while not ref <= r:
r |= ref
ref = ref.referents - exclude
del ref, exclude
r = r.byclass # Avoid memoizing for complicated classification
return r
def heap(self):
"""V.heap() -> idset
Return the set of objects in the visible heap.
"""
global heap_one_time_initialized
# This is to make sure that the first time called
# the heap will contain things that may likely be loaded later
# because of common operations.
if not heap_one_time_initialized:
heap_one_time_initialized = 1
repr(self.idset(self.hv.heap()))
x=[]
repr(self.iso(x).shpaths)
repr(self.iso(x).rp)
self.gc.collect() # Sealing a leak at particular usage ; Notes Apr 13 2005
# Exclude current frame by encapsulting in enter(). Note Apr 20 2005
return self.enter(lambda:
self.idset(self.hv.heap()))
def horizon(self):
return self.Horizon(self)
def imdom(self, X):
"""imdom(X) -> idset
Return the immediate dominators of a set of objects X. The immediate
dominators is a subset of the referrers. It includes only those
referrers that are reachable directly, avoiding any other referrer."""
pred = self.nodeset_adapt(self.referrers(X))
visit = self.hv.reachable_x(self.immnodeset([self.root]), pred)
return self.retset(pred & visit)
def indisize(self, X):
"""indisize(X) -> int
Return the sum of the individual sizes of the set of objects X.
The individual size of an object is the size of memory that is
allocated directly in the object, not including any externally
visible subobjects. See also: domisize."""
return self.hv.indisize_sum(self.nodeset_adapt(X))
def new_hv(self, _hiding_tag_=None, is_hiding_calling_interpreter=False,
heapdefs=None, root=None, gchook_type=None):
if heapdefs is None:
heapdefs = self.heapdefs
if root is None:
root = self.root
if gchook_type is None:
gchook_type = self.gchook_type
hv = self.heapyc.HeapView(root, heapdefs)
hv._hiding_tag_ = _hiding_tag_
hv.is_hiding_calling_interpreter = is_hiding_calling_interpreter
hv.register_hidden_exact_type(gchook_type)
#hv.register__hiding_tag__type(self._parent.UniSet.UniSet)
hv.register__hiding_tag__type(self._parent.UniSet.Kind)
hv.register__hiding_tag__type(self._parent.UniSet.IdentitySetMulti)
hv.register__hiding_tag__type(self._parent.UniSet.IdentitySetSingleton)
return hv
def nodegraph(self, iterable = None, is_mapping = False):
ng = self.heapyc.NodeGraph(iterable, is_mapping)
ng._hiding_tag_ = self._hiding_tag_
return ng
def obj_at(self, addr):
try:
return self.immnodeset(self.hv.static_types).obj_at(addr)
except ValueError:
pass
try:
return self.immnodeset(self.gc.get_objects()).obj_at(addr)
except ValueError:
pass
try:
return self.immnodeset(self.hv.heap()).obj_at(addr)
except ValueError:
raise ValueError, 'No object found at address %s'%hex(addr)
def observation_containers(self):
# Return the current set of 'observation containers'
# as discussed in Notes Oct 27 2005.
# returns a nodeset, not an idset, to avoid recursive referenes
objs = self.gc.get_objects()
cli = self.hv.cli_type()
objs = (cli.select(objs, self.NodeSet, '<=') +
cli.select(objs, ObservationList, '<=') +
cli.select(objs, self._parent.UniSet.IdentitySetSingleton, '<=')
)
r = self.immnodeset([x for x in objs if getattr(x, '_hiding_tag_', None) is self._hiding_tag_])
del x, cli, objs
return r
def observation_list(self, iterable=()):
# Return an ObservationList object with our _hiding_tag_
return ObservationList(iterable, self._hiding_tag_)
def referents(self, X):
"""V.referents(X) -> idset
Return the set of objects that are directly referred to by
any of the objects in the set X."""
return self.retset(self.hv.relimg(self.nodeset_adapt(X)))
def referrers(self, X):
"""V.referrers(X) -> idset
Return the set of objects that directly refer to
any of the objects in the set X."""
X = self.nodeset_adapt(X)
if self.is_rg_update_all and self.root is self.heapyc.RootState:
if not (self.rg.domain_covers(X) or
self.rg.domain_covers(X - self.norefer)):
# print 'new update old len = %d'%len(self.rg)
# print self.idset(X-self.rg.get_domain())
self.rg.clear()
import gc
gc.collect()
self.hv.update_referrers_completely(self.rg)
addnoref = X - self.rg.get_domain()
#print 'done 1', len(X), len(addnoref)
self.norefer |= addnoref
#print 'done 1a', len(self.rg)
else:
# print 'X', X, len(X)
# print self.idset(X)
Y = self.mutnodeset(X)
Y -= self.norefer
if not self.rg.domain_covers(Y):
for wt in self.referrers_targets:
t = wt()
if t is not None:
Y |= t.set.nodes
if 0:
print 'old update'
print self.idset(Y - self.rg.get_domain())
Y |= self.rg.get_domain()
self.rg.clear()
self.hv.update_referrers(self.rg, Y)
self.norefer.clear()
self.norefer |= (X | Y | self.rg.get_range())
self.norefer -= self.rg.get_domain()
Y = self.mutnodeset(X) - self.norefer
if not self.rg.domain_covers(Y):
print 'update_referrers failed'
print 'Y - domain of rg:'
print self.idset(Y - self.rg.get_domain())
from pdb import pm, set_trace
set_trace()
Y = None
X = self.rg.relimg(X)
X = self.immnodeset(X) - [None]
X = self.retset(X)
return X
def referrers_gc(self, X):
"""V.referrers_gc(X) -> idset
Return the set of objects that directly refer to
any of the objects in the set X.
This differs from referrers in that it uses the
gc module's view of the referrers. This is more or less
valid depending on viewpoint.
"""
X = tuple(self.nodeset_adapt(X))
return self.idset(self.gc.get_referrers(*X)) - self.iso(X)
def referrers_add_target(self, t):
def remove(wr):
self.referrers_targets.remove(wr)
wr = self._root.weakref.ref(t, remove)
self.referrers_targets.append(wr)
def update_referrers(self, X):
"""V.update_referrers(X)
Update the view V from the set X. X must be adaptable to NodeSet. V.rg is
updated so that in addition to its previos mapping, it will also contain
mappings for the elements of X to their referrers, from them to their
referrers and so on.
"""
self.referrers(X)
def prime_builtin_types():
# Make sure builtin types have been completely allocated
# with all method descriptors etc.
# so subsequent events will not give spurios confusing allocations.
# This should need to be done only once.
# (Or whenever a new (extension) module is imported??)
# The problem & solution is further discussed in Notes Nov 9 2005.
import types
import guppy.heapy.heapyc
import guppy.sets.setsc
import sys
import weakref
for mod in sys.modules.values():
if mod is None:
continue
for t in mod.__dict__.values():
if isinstance(t, type):
dir(t)
# Other type(s)
for t in [type(iter([])), type(iter(())),
]:
dir(t)
prime_builtin_types()
# The following global variable is used by heap()
# to do extra initializations the first time it is called.
# having to do that we want to do import and init things
# but only if heap is actually called
heap_one_time_initialized = 0
| apache-2.0 |
eayunstack/neutron | neutron/db/migration/alembic_migrations/versions/mitaka/expand/3894bccad37f_add_timestamp_to_base_resources.py | 9 | 1093 | # Copyright 2015 HuaWei Technologies.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add_timestamp_to_base_resources
Revision ID: 3894bccad37f
Revises: 2f9e956e7532
Create Date: 2016-03-01 04:19:58.852612
"""
# revision identifiers, used by Alembic.
revision = '3894bccad37f'
down_revision = '2f9e956e7532'
from alembic import op
import sqlalchemy as sa
def upgrade():
for column_name in ['created_at', 'updated_at']:
op.add_column(
'standardattributes',
sa.Column(column_name, sa.DateTime(), nullable=True)
)
| apache-2.0 |
BizzCloud/PosBox | addons/account/wizard/__init__.py | 362 | 2345 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_report_common
import account_report_common_partner
import account_report_common_journal
import account_report_common_account
import account_automatic_reconcile
import account_move_line_reconcile_select
import account_move_line_unreconcile_select
import account_reconcile_partner_process
import account_reconcile
import account_unreconcile
import account_invoice_refund
import account_journal_select
import account_move_bank_reconcile
import account_subscription_generate
import account_report_aged_partner_balance
import account_report_partner_ledger
import account_report_partner_balance
import account_period_close
import account_fiscalyear_close
import account_fiscalyear_close_state
import account_vat
import account_open_closed_fiscalyear
import account_invoice_state
import account_chart
import account_tax_chart
import account_financial_report
#TODO: remove this file no moe used
# also remove related view fiel
import account_validate_account_move
import account_use_model
import account_state_open
import account_report_print_journal
import account_report_central_journal
import account_report_general_journal
import account_report_general_ledger
import account_report_account_balance
import account_change_currency
import pos_box
import account_statement_from_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
roadmapper/ansible | lib/ansible/utils/plugin_docs.py | 28 | 6674 | # Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.release import __version__ as ansible_version
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
from ansible.module_utils.common._collections_compat import MutableMapping, MutableSet, MutableSequence
from ansible.parsing.plugin_docs import read_docstring
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.utils.display import Display
display = Display()
# modules that are ok that they do not have documentation strings
BLACKLIST = {
'MODULE': frozenset(('async_wrapper',)),
'CACHE': frozenset(('base',)),
}
def merge_fragment(target, source):
for key, value in source.items():
if key in target:
# assumes both structures have same type
if isinstance(target[key], MutableMapping):
value.update(target[key])
elif isinstance(target[key], MutableSet):
value.add(target[key])
elif isinstance(target[key], MutableSequence):
value = sorted(frozenset(value + target[key]))
else:
raise Exception("Attempt to extend a documentation fragement, invalid type for %s" % key)
target[key] = value
def add_fragments(doc, filename, fragment_loader):
fragments = doc.pop('extends_documentation_fragment', [])
if isinstance(fragments, string_types):
fragments = [fragments]
unknown_fragments = []
# doc_fragments are allowed to specify a fragment var other than DOCUMENTATION
# with a . separator; this is complicated by collections-hosted doc_fragments that
# use the same separator. Assume it's collection-hosted normally first, try to load
# as-specified. If failure, assume the right-most component is a var, split it off,
# and retry the load.
for fragment_slug in fragments:
fragment_name = fragment_slug
fragment_var = 'DOCUMENTATION'
fragment_class = fragment_loader.get(fragment_name)
if fragment_class is None and '.' in fragment_slug:
splitname = fragment_slug.rsplit('.', 1)
fragment_name = splitname[0]
fragment_var = splitname[1].upper()
fragment_class = fragment_loader.get(fragment_name)
if fragment_class is None:
unknown_fragments.append(fragment_slug)
continue
fragment_yaml = getattr(fragment_class, fragment_var, None)
if fragment_yaml is None:
if fragment_var != 'DOCUMENTATION':
# if it's asking for something specific that's missing, that's an error
unknown_fragments.append(fragment_slug)
continue
else:
fragment_yaml = '{}' # TODO: this is still an error later since we require 'options' below...
fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data()
if 'notes' in fragment:
notes = fragment.pop('notes')
if notes:
if 'notes' not in doc:
doc['notes'] = []
doc['notes'].extend(notes)
if 'seealso' in fragment:
seealso = fragment.pop('seealso')
if seealso:
if 'seealso' not in doc:
doc['seealso'] = []
doc['seealso'].extend(seealso)
if 'options' not in fragment:
raise Exception("missing options in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename))
# ensure options themselves are directly merged
if 'options' in doc:
try:
merge_fragment(doc['options'], fragment.pop('options'))
except Exception as e:
raise AnsibleError("%s options (%s) of unknown type: %s" % (to_native(e), fragment_name, filename))
else:
doc['options'] = fragment.pop('options')
# merge rest of the sections
try:
merge_fragment(doc, fragment)
except Exception as e:
raise AnsibleError("%s (%s) of unknown type: %s" % (to_native(e), fragment_name, filename))
if unknown_fragments:
raise AnsibleError('unknown doc_fragment(s) in file {0}: {1}'.format(filename, to_native(', '.join(unknown_fragments))))
def get_docstring(filename, fragment_loader, verbose=False, ignore_errors=False):
"""
DOCUMENTATION can be extended using documentation fragments loaded by the PluginLoader from the doc_fragments plugins.
"""
data = read_docstring(filename, verbose=verbose, ignore_errors=ignore_errors)
# add fragments to documentation
if data.get('doc', False):
add_fragments(data['doc'], filename, fragment_loader=fragment_loader)
return data['doc'], data['plainexamples'], data['returndocs'], data['metadata']
def get_versioned_doclink(path):
"""
returns a versioned documentation link for the current Ansible major.minor version; used to generate
in-product warning/error links to the configured DOCSITE_ROOT_URL
(eg, https://docs.ansible.com/ansible/2.8/somepath/doc.html)
:param path: relative path to a document under docs/docsite/rst;
:return: absolute URL to the specified doc for the current version of Ansible
"""
path = to_native(path)
try:
base_url = C.config.get_config_value('DOCSITE_ROOT_URL')
if not base_url.endswith('/'):
base_url += '/'
if path.startswith('/'):
path = path[1:]
split_ver = ansible_version.split('.')
if len(split_ver) < 3:
raise RuntimeError('invalid version ({0})'.format(ansible_version))
doc_version = '{0}.{1}'.format(split_ver[0], split_ver[1])
# check to see if it's a X.Y.0 non-rc prerelease or dev release, if so, assume devel (since the X.Y doctree
# isn't published until beta-ish)
if split_ver[2].startswith('0'):
# exclude rc; we should have the X.Y doctree live by rc1
if any((pre in split_ver[2]) for pre in ['a', 'b']) or len(split_ver) > 3 and 'dev' in split_ver[3]:
doc_version = 'devel'
return '{0}{1}/{2}'.format(base_url, doc_version, path)
except Exception as ex:
return '(unable to create versioned doc link for path {0}: {1})'.format(path, to_native(ex))
| gpl-3.0 |
andreparrish/python-for-android | python3-alpha/python3-src/Lib/encodings/cp437.py | 272 | 34564 | """ Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xa5' # 0x009d -> YEN SIGN
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
rkube/blob_tracking | analysis/correlate_test.py | 1 | 7170 | #!/opt/local/bin/python
#-*- Encoding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from misc.correlate import correlate
from sys import exit
"""
Correlate timeseries of two pixels. Trigger on large amplitudes.
Try estimating radial blob velocity from this.
"""
shotnr = 1120711010
frame0 = 20000
nframes = 25000
wlen = 10 # Window length for correlation analysis
gpi_fps = 390.8e3 # Phantom camera frame rate
tau = 1. / gpi_fps
dx = 0.061 / 64.
tau_max = 10
num_blobs = 100
blob_vel = np.zeros(num_blobs)
frames_file = np.load('%d/%d_frames.npz' % ( shotnr, shotnr) )
frames = frames_file['frames_normalized_mean']
ts_pixel1 = frames[ :, 48, 53 ]
ts_pixel2 = frames[ :, 48, 55 ]
ts_pixel3 = frames[ :, 48, 57 ]
ts_pixel4 = frames[ :, 48, 59 ]
ts_pixel5 = frames[ :, 48, 61 ]
#np.savez('corr_ts.npz', ts_pixel1 = ts_pixel1, ts_pixel2 = ts_pixel2, ts_pixel3 = ts_pixel3, ts_pixel4 = ts_pixel4, ts_pixel5 = ts_pixel5 )
#df = np.load('test/corr_ts.npz')
#ts_pixel1 = df['ts_pixel1']
#ts_pixel2 = df['ts_pixel2']
#ts_pixel3 = df['ts_pixel3']
#ts_pixel4 = df['ts_pixel4']
#ts_pixel5 = df['ts_pixel5']
plt.figure()
plt.plot( np.arange( frame0 ), ts_pixel1[:frame0], 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel1[frame0:frame0 + nframes], 'r' )
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel1[frame0 + nframes:], 'k' )
plt.plot( np.arange( frame0 ), ts_pixel2[:frame0] + 3.0, 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel2[frame0:frame0 + nframes] + 3.0, 'r')
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel2[frame0 + nframes:] + 3.0, 'k' )
plt.plot( np.arange( frame0 ), ts_pixel3[:frame0] + 6.0, 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel3[frame0:frame0 + nframes] + 6.0, 'r' )
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel3[frame0 + nframes:] + 6.0, 'k' )
plt.plot( np.arange( frame0 ), ts_pixel4[:frame0] + 9.0, 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel4[frame0:frame0 + nframes]+ 6.0, 'r' )
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel4[frame0 + nframes:] + 6.0, 'k' )
plt.plot( np.arange( frame0 ), ts_pixel5[:frame0] + 9.0, 'k' )
plt.plot( np.arange( frame0, frame0 + nframes ), ts_pixel5[frame0:frame0 + nframes] + 9.0, 'r' )
plt.plot( np.arange( frame0 + nframes, np.size(ts_pixel1)), ts_pixel5[frame0 + nframes:] + 9.0, 'k' )
#plt.show()
ts_pixel1 = ts_pixel1[frame0 : frame0 + nframes]
ts_pixel2 = ts_pixel2[frame0 : frame0 + nframes]
ts_pixel3 = ts_pixel3[frame0 : frame0 + nframes]
ts_pixel4 = ts_pixel4[frame0 : frame0 + nframes]
ts_pixel5 = ts_pixel5[frame0 : frame0 + nframes]
# Take the 100 largest blobs and estimate their velocity
ts1_sortidx = ts_pixel1.argsort()[-num_blobs:]
plt.figure()
plt.plot(ts_pixel1[ts1_sortidx])
for idx, max_idx in enumerate(ts1_sortidx):
if ( max_idx == -1 ):
print 'Index was blanked out previously, skipping to next index'
continue
elif ( max_idx < wlen ):
print 'Too close too boundaries for full correlation, skipping to next index'
continue
# Blank out all other peaks occuring within +- 10 frames
print 'before:', max_idx, ts1_sortidx
close_peak_indices = np.squeeze(np.argwhere( np.abs(ts1_sortidx - max_idx) < 10 ))
print 'close_peak_indices:', close_peak_indices, ' entries:', ts1_sortidx[ close_peak_indices ]
ts1_sortidx[ close_peak_indices ] = -1
print 'after:', max_idx, ts1_sortidx
print max_idx
fig = plt.figure()
plt.subplot(211)
plt.title('max_idx = %d' % max_idx)
plt.xlabel('frame no.')
plt.ylabel('I tilde')
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel1[ max_idx - wlen : max_idx + wlen ] )
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel2[ max_idx - wlen : max_idx + wlen ] )
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel3[ max_idx - wlen : max_idx + wlen ] )
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel4[ max_idx - wlen : max_idx + wlen ] )
plt.plot( np.arange( frame0 + max_idx - wlen, frame0 + max_idx + wlen), ts_pixel5[ max_idx - wlen : max_idx + wlen ] )
plt.subplot(212)
plt.xlabel('Time lag tau')
plt.ylabel('Correlation amplitude')
tau_range = np.arange( -tau_max, tau_max )
# Compute the correlation between the timeseries of neighbouring pixels. The maximum of
# the correlation amplitude is used to compute the radial blob velocity.
# Limit the neighbourhood in which the peak correlation amplitude may be to +- 10 frames.
c11 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel1[ max_idx - wlen - 1 : max_idx + wlen + 1], 2*wlen )
c11 = c11[ 2*wlen - tau_max : 2*wlen + tau_max]
plt.plot(tau_range, c11)
plt.plot(tau_range[c11.argmax()], c11.max(), 'ko')
c12 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel2[ max_idx - wlen - 1 : max_idx + wlen + 1], 2*wlen )
c12 = c12[ 2*wlen - tau_max : 2*wlen + tau_max]
max_c12 = c12.argmax()
plt.plot(tau_range, c12)
plt.plot(tau_range[c12.argmax()], c12.max(), 'ko')
c13 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel3[ max_idx - wlen - 1: max_idx + wlen +1 ], 2*wlen )
c13 = c13[ 2*wlen - tau_max : 2*wlen + tau_max]
max_c13 = c13.argmax()
plt.plot(tau_range, c13)
plt.plot(tau_range[c13.argmax()], c13.max(), 'ko')
c14 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel3[ max_idx - wlen - 1: max_idx + wlen + 1], 2*wlen )
c14 = c14[ 2*wlen - tau_max : 2*wlen + tau_max]
max_c14 = c14.argmax()
plt.plot(tau_range, c14)
plt.plot(tau_range[c14.argmax()], c14.max(), 'ko')
c15 = correlate( ts_pixel1[ max_idx - wlen - 1: max_idx + wlen + 1], ts_pixel5[ max_idx - wlen - 1: max_idx + wlen + 1], 2*wlen )
c15 = c15[ 2*wlen - tau_max : 2*wlen + tau_max]
max_c15 = c15.argmax()
plt.plot(tau_range, c15)
plt.plot(tau_range[c15.argmax()], c15.max(), 'ko')
fig.savefig('%d/vrad_correlation/%d_frame%05d.png' % ( shotnr, shotnr, max_idx ) )
plt.close()
# Estimate radial blob velocity by propagation of correlation amplitude
v_c12 = gpi_fps * 2.0*dx / (2*wlen - max_c12)
v_c13 = gpi_fps * 4.0*dx / (2*wlen - max_c13)
v_c14 = gpi_fps * 6.0*dx / (2*wlen - max_c14)
v_c15 = gpi_fps * 8.0*dx / (2*wlen - max_c15)
print 'Blob velocities from correlation method:'
print 'px1 - px2: %f, px1 - px3: %f, px1 - px4: %f, px1 - px5: %f' % (v_c12, v_c13, v_c14, v_c15 )
print 'mean: %f' % np.mean( np.array([v_c12, v_c13, v_c14, v_c15]) )
blob_vel[idx] = np.mean( np.array([v_c12, v_c13, v_c14, v_c15]) )
blob_vel = blob_vel[blob_vel != 0]
plt.figure()
plt.plot(blob_vel, '.')
plt.xlabel('Blob event no.')
plt.ylabel('Radial velocity m/s')
print '================================================================================='
print 'mean over all blobs: %f' % blob_vel.mean()
plt.show()
| mit |
richardfergie/googleads-python-lib | examples/dfp/v201411/inventory_service/get_top_level_ad_units.py | 4 | 2260 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all child ad units of the effective root ad unit.
To create ad units, run create_ad_units.py
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201411')
network_service = client.GetService('NetworkService', version='v201411')
root_id = network_service.getCurrentNetwork()['effectiveRootAdUnitId']
# Create a statement to select the children of the effective root ad unit.
values = [{
'key': 'id',
'value': {
'xsi_type': 'TextValue',
'value': root_id
}
}]
query = 'WHERE parentId = :id'
statement = dfp.FilterStatement(query, values)
# Get ad units by statement.
while True:
response = inventory_service.getAdUnitsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for ad_unit in response['results']:
print ('Ad unit with ID \'%s\' and name \'%s\' was found.'
% (ad_unit['id'], ad_unit['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
js0701/chromium-crosswalk | tools/clang/blink_gc_plugin/tests/test.py | 40 | 4935 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import glob
import os
import subprocess
import sys
def run_test(test_base_name, cmd, reset_results):
"""Run a test case.
Args:
test_base_name: The name for the test C++ source file without the extension.
cmd: The actual command to run for the test.
reset_results: True if the results should be overwritten in place.
Returns:
None on pass, or a str with the description of the failure.
"""
try:
actual = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# Some of the Blink GC plugin tests intentionally trigger compile errors, so
# just ignore an exit code that indicates failure.
actual = e.output
except Exception as e:
return 'could not execute %s (%s)' % (cmd, e)
# Some Blink GC plugins dump a JSON representation of the object graph, and
# use the processed results as the actual results of the test.
if os.path.exists('%s.graph.json' % test_base_name):
try:
actual = subprocess.check_output(
['python', '../process-graph.py', '-c',
'%s.graph.json' % test_base_name],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
# The graph processing script returns a failure exit code if the graph is
# 'bad' (e.g. it has a cycle). The output still needs to be captured in
# that case, since the expected results capture the errors.
actual = e.output
finally:
# Clean up the .graph.json file to prevent false passes from stale results
# from a previous run.
os.remove('%s.graph.json' % test_base_name)
# On Windows, clang emits CRLF as the end of line marker. Normalize it to LF
# to match posix systems.
actual = actual.replace('\r\n', '\n')
result_file = '%s.txt%s' % (
test_base_name, '' if reset_results else '.actual')
try:
expected = open('%s.txt' % test_base_name).read()
except IOError:
open(result_file, 'w').write(actual)
return 'no expected file found'
if expected != actual:
open(result_file, 'w').write(actual)
error = 'expected and actual differed\n'
error += 'Actual:\n' + actual
error += 'Expected:\n' + expected
return error
def run_tests(clang_path, plugin_path, reset_results):
"""Runs the tests.
Args:
clang_path: The path to the clang binary to be tested.
plugin_path: An optional path to the plugin to test. This may be None, if
plugin is built directly into clang, like on Windows.
reset_results: True if the results should be overwritten in place.
Returns:
(passing, failing): Two lists containing the base names of the passing and
failing tests respectively.
"""
passing = []
failing = []
# The plugin option to dump the object graph is incompatible with
# -fsyntax-only. It generates the .graph.json file based on the name of the
# output file, but there is no output filename with -fsyntax-only.
base_cmd = [clang_path, '-c', '-std=c++11']
base_cmd.extend(['-Wno-inaccessible-base'])
if plugin_path:
base_cmd.extend(['-Xclang', '-load', '-Xclang', plugin_path])
base_cmd.extend(['-Xclang', '-add-plugin', '-Xclang', 'blink-gc-plugin'])
tests = glob.glob('*.cpp')
for test in tests:
sys.stdout.write('Testing %s... ' % test)
test_base_name, _ = os.path.splitext(test)
cmd = base_cmd[:]
try:
cmd.extend(file('%s.flags' % test_base_name).read().split())
except IOError:
pass
cmd.append(test)
failure_message = run_test(test_base_name, cmd, reset_results)
if failure_message:
print 'failed: %s' % failure_message
failing.append(test_base_name)
else:
print 'passed!'
passing.append(test_base_name)
return passing, failing
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--reset-results', action='store_true',
help='If specified, overwrites the expected results in place.')
parser.add_argument('clang_path', help='The path to the clang binary.')
parser.add_argument('plugin_path', nargs='?',
help='The path to the plugin library, if any.')
args = parser.parse_args()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
print 'Using clang %s...' % args.clang_path
print 'Using plugin %s...' % args.plugin_path
passing, failing = run_tests(args.clang_path,
args.plugin_path,
args.reset_results)
print 'Ran %d tests: %d succeeded, %d failed' % (
len(passing) + len(failing), len(passing), len(failing))
for test in failing:
print ' %s' % test
return len(failing)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Ernesto99/odoo | addons/account_analytic_plans/report/__init__.py | 445 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
michael-dev2rights/ansible | lib/ansible/modules/network/netscaler/netscaler_lb_monitor.py | 22 | 47419 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netscaler_lb_monitor
short_description: Manage load balancing monitors
description:
- Manage load balancing monitors.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
monitorname:
description:
- >-
Name for the monitor. Must begin with an ASCII alphanumeric or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore, hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters.
- "Minimum length = 1"
type:
choices:
- 'PING'
- 'TCP'
- 'HTTP'
- 'TCP-ECV'
- 'HTTP-ECV'
- 'UDP-ECV'
- 'DNS'
- 'FTP'
- 'LDNS-PING'
- 'LDNS-TCP'
- 'LDNS-DNS'
- 'RADIUS'
- 'USER'
- 'HTTP-INLINE'
- 'SIP-UDP'
- 'SIP-TCP'
- 'LOAD'
- 'FTP-EXTENDED'
- 'SMTP'
- 'SNMP'
- 'NNTP'
- 'MYSQL'
- 'MYSQL-ECV'
- 'MSSQL-ECV'
- 'ORACLE-ECV'
- 'LDAP'
- 'POP3'
- 'CITRIX-XML-SERVICE'
- 'CITRIX-WEB-INTERFACE'
- 'DNS-TCP'
- 'RTSP'
- 'ARP'
- 'CITRIX-AG'
- 'CITRIX-AAC-LOGINPAGE'
- 'CITRIX-AAC-LAS'
- 'CITRIX-XD-DDC'
- 'ND6'
- 'CITRIX-WI-EXTENDED'
- 'DIAMETER'
- 'RADIUS_ACCOUNTING'
- 'STOREFRONT'
- 'APPC'
- 'SMPP'
- 'CITRIX-XNC-ECV'
- 'CITRIX-XDM'
- 'CITRIX-STA-SERVICE'
- 'CITRIX-STA-SERVICE-NHOP'
description:
- "Type of monitor that you want to create."
action:
choices:
- 'NONE'
- 'LOG'
- 'DOWN'
description:
- >-
Action to perform when the response to an inline monitor (a monitor of type C(HTTP-INLINE)) indicates
that the service is down. A service monitored by an inline monitor is considered C(DOWN) if the response
code is not one of the codes that have been specified for the Response Code parameter.
- "Available settings function as follows:"
- >-
* C(NONE) - Do not take any action. However, the show service command and the show lb monitor command
indicate the total number of responses that were checked and the number of consecutive error
responses received after the last successful probe.
- "* C(LOG) - Log the event in NSLOG or SYSLOG."
- >-
* C(DOWN) - Mark the service as being down, and then do not direct any traffic to the service until the
configured down time has expired. Persistent connections to the service are terminated as soon as the
service is marked as C(DOWN). Also, log the event in NSLOG or SYSLOG.
respcode:
description:
- >-
Response codes for which to mark the service as UP. For any other response code, the action performed
depends on the monitor type. C(HTTP) monitors and C(RADIUS) monitors mark the service as C(DOWN), while
C(HTTP-INLINE) monitors perform the action indicated by the Action parameter.
httprequest:
description:
- "HTTP request to send to the server (for example, C(\\"HEAD /file.html\\"))."
rtsprequest:
description:
- "RTSP request to send to the server (for example, C(\\"OPTIONS *\\"))."
customheaders:
description:
- "Custom header string to include in the monitoring probes."
maxforwards:
description:
- >-
Maximum number of hops that the SIP request used for monitoring can traverse to reach the server.
Applicable only to monitors of type C(SIP-UDP).
- "Minimum value = C(0)"
- "Maximum value = C(255)"
sipmethod:
choices:
- 'OPTIONS'
- 'INVITE'
- 'REGISTER'
description:
- "SIP method to use for the query. Applicable only to monitors of type C(SIP-UDP)."
sipuri:
description:
- >-
SIP URI string to send to the service (for example, C(sip:sip.test)). Applicable only to monitors of
type C(SIP-UDP).
- "Minimum length = 1"
sipreguri:
description:
- >-
SIP user to be registered. Applicable only if the monitor is of type C(SIP-UDP) and the SIP Method
parameter is set to C(REGISTER).
- "Minimum length = 1"
send:
description:
- "String to send to the service. Applicable to C(TCP-ECV), C(HTTP-ECV), and C(UDP-ECV) monitors."
recv:
description:
- >-
String expected from the server for the service to be marked as UP. Applicable to C(TCP-ECV), C(HTTP-ECV),
and C(UDP-ECV) monitors.
query:
description:
- "Domain name to resolve as part of monitoring the DNS service (for example, C(example.com))."
querytype:
choices:
- 'Address'
- 'Zone'
- 'AAAA'
description:
- >-
Type of DNS record for which to send monitoring queries. Set to C(Address) for querying A records, C(AAAA)
for querying AAAA records, and C(Zone) for querying the SOA record.
scriptname:
description:
- >-
Path and name of the script to execute. The script must be available on the NetScaler appliance, in
the /nsconfig/monitors/ directory.
- "Minimum length = 1"
scriptargs:
description:
- "String of arguments for the script. The string is copied verbatim into the request."
dispatcherip:
description:
- "IP address of the dispatcher to which to send the probe."
dispatcherport:
description:
- "Port number on which the dispatcher listens for the monitoring probe."
username:
description:
- >-
User name with which to probe the C(RADIUS), C(NNTP), C(FTP), C(FTP-EXTENDED), C(MYSQL), C(MSSQL), C(POP3), C(CITRIX-AG),
C(CITRIX-XD-DDC), C(CITRIX-WI-EXTENDED), C(CITRIX-XNC) or C(CITRIX-XDM) server.
- "Minimum length = 1"
password:
description:
- >-
Password that is required for logging on to the C(RADIUS), C(NNTP), C(FTP), C(FTP-EXTENDED), C(MYSQL), C(MSSQL), C(POP3),
C(CITRIX-AG), C(CITRIX-XD-DDC), C(CITRIX-WI-EXTENDED), C(CITRIX-XNC-ECV) or C(CITRIX-XDM) server. Used in
conjunction with the user name specified for the C(username) parameter.
- "Minimum length = 1"
secondarypassword:
description:
- >-
Secondary password that users might have to provide to log on to the Access Gateway server.
Applicable to C(CITRIX-AG) monitors.
logonpointname:
description:
- >-
Name of the logon point that is configured for the Citrix Access Gateway Advanced Access Control
software. Required if you want to monitor the associated login page or Logon Agent. Applicable to
C(CITRIX-AAC-LAS) and C(CITRIX-AAC-LOGINPAGE) monitors.
lasversion:
description:
- >-
Version number of the Citrix Advanced Access Control Logon Agent. Required by the C(CITRIX-AAC-LAS)
monitor.
radkey:
description:
- >-
Authentication key (shared secret text string) for RADIUS clients and servers to exchange. Applicable
to monitors of type C(RADIUS) and C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radnasid:
description:
- "NAS-Identifier to send in the Access-Request packet. Applicable to monitors of type C(RADIUS)."
- "Minimum length = 1"
radnasip:
description:
- >-
Network Access Server (NAS) IP address to use as the source IP address when monitoring a RADIUS
server. Applicable to monitors of type C(RADIUS) and C(RADIUS_ACCOUNTING).
radaccounttype:
description:
- "Account Type to be used in Account Request Packet. Applicable to monitors of type C(RADIUS_ACCOUNTING)."
- "Minimum value = 0"
- "Maximum value = 15"
radframedip:
description:
- "Source ip with which the packet will go out . Applicable to monitors of type C(RADIUS_ACCOUNTING)."
radapn:
description:
- >-
Called Station Id to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radmsisdn:
description:
- >-
Calling Stations Id to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radaccountsession:
description:
- >-
Account Session ID to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
lrtm:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Calculate the least response times for bound services. If this parameter is not enabled, the
appliance does not learn the response times of the bound services. Also used for LRTM load balancing.
deviation:
description:
- >-
Time value added to the learned average response time in dynamic response time monitoring (DRTM).
When a deviation is specified, the appliance learns the average response time of bound services and
adds the deviation to the average. The final value is then continually adjusted to accommodate
response time variations over time. Specified in milliseconds, seconds, or minutes.
- "Minimum value = C(0)"
- "Maximum value = C(20939)"
units1:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "Unit of measurement for the Deviation parameter. Cannot be changed after the monitor is created."
interval:
description:
- "Time interval between two successive probes. Must be greater than the value of Response Time-out."
- "Minimum value = C(1)"
- "Maximum value = C(20940)"
units3:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "monitor interval units."
resptimeout:
description:
- >-
Amount of time for which the appliance must wait before it marks a probe as FAILED. Must be less than
the value specified for the Interval parameter.
- >-
Note: For C(UDP-ECV) monitors for which a receive string is not configured, response timeout does not
apply. For C(UDP-ECV) monitors with no receive string, probe failure is indicated by an ICMP port
unreachable error received from the service.
- "Minimum value = C(1)"
- "Maximum value = C(20939)"
units4:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "monitor response timeout units."
resptimeoutthresh:
description:
- >-
Response time threshold, specified as a percentage of the Response Time-out parameter. If the
response to a monitor probe has not arrived when the threshold is reached, the appliance generates an
SNMP trap called monRespTimeoutAboveThresh. After the response time returns to a value below the
threshold, the appliance generates a monRespTimeoutBelowThresh SNMP trap. For the traps to be
generated, the "MONITOR-RTO-THRESHOLD" alarm must also be enabled.
- "Minimum value = C(0)"
- "Maximum value = C(100)"
retries:
description:
- >-
Maximum number of probes to send to establish the state of a service for which a monitoring probe
failed.
- "Minimum value = C(1)"
- "Maximum value = C(127)"
failureretries:
description:
- >-
Number of retries that must fail, out of the number specified for the Retries parameter, for a
service to be marked as DOWN. For example, if the Retries parameter is set to 10 and the Failure
Retries parameter is set to 6, out of the ten probes sent, at least six probes must fail if the
service is to be marked as DOWN. The default value of 0 means that all the retries must fail if the
service is to be marked as DOWN.
- "Minimum value = C(0)"
- "Maximum value = C(32)"
alertretries:
description:
- >-
Number of consecutive probe failures after which the appliance generates an SNMP trap called
monProbeFailed.
- "Minimum value = C(0)"
- "Maximum value = C(32)"
successretries:
description:
- "Number of consecutive successful probes required to transition a service's state from DOWN to UP."
- "Minimum value = C(1)"
- "Maximum value = C(32)"
downtime:
description:
- >-
Time duration for which to wait before probing a service that has been marked as DOWN. Expressed in
milliseconds, seconds, or minutes.
- "Minimum value = C(1)"
- "Maximum value = C(20939)"
units2:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "Unit of measurement for the Down Time parameter. Cannot be changed after the monitor is created."
destip:
description:
- >-
IP address of the service to which to send probes. If the parameter is set to 0, the IP address of
the server to which the monitor is bound is considered the destination IP address.
destport:
description:
- >-
TCP or UDP port to which to send the probe. If the parameter is set to 0, the port number of the
service to which the monitor is bound is considered the destination port. For a monitor of type C(USER),
however, the destination port is the port number that is included in the HTTP request sent to the
dispatcher. Does not apply to monitors of type C(PING).
state:
choices:
- 'enabled'
- 'disabled'
description:
- >-
State of the monitor. The C(disabled) setting disables not only the monitor being configured, but all
monitors of the same type, until the parameter is set to C(enabled). If the monitor is bound to a
service, the state of the monitor is not taken into account when the state of the service is
determined.
reverse:
description:
- >-
Mark a service as DOWN, instead of UP, when probe criteria are satisfied, and as UP instead of DOWN
when probe criteria are not satisfied.
type: bool
transparent:
description:
- >-
The monitor is bound to a transparent device such as a firewall or router. The state of a transparent
device depends on the responsiveness of the services behind it. If a transparent device is being
monitored, a destination IP address must be specified. The probe is sent to the specified IP address
by using the MAC address of the transparent device.
type: bool
iptunnel:
description:
- >-
Send the monitoring probe to the service through an IP tunnel. A destination IP address must be
specified.
type: bool
tos:
description:
- "Probe the service by encoding the destination IP address in the IP TOS (6) bits."
type: bool
tosid:
description:
- "The TOS ID of the specified destination IP. Applicable only when the TOS parameter is set."
- "Minimum value = C(1)"
- "Maximum value = C(63)"
secure:
description:
- >-
Use a secure SSL connection when monitoring a service. Applicable only to TCP based monitors. The
secure option cannot be used with a C(CITRIX-AG) monitor, because a CITRIX-AG monitor uses a secure
connection by default.
type: bool
validatecred:
description:
- >-
Validate the credentials of the Xen Desktop DDC server user. Applicable to monitors of type
C(CITRIX-XD-DDC).
type: bool
domain:
description:
- >-
Domain in which the XenDesktop Desktop Delivery Controller (DDC) servers or Web Interface servers are
present. Required by C(CITRIX-XD-DDC) and C(CITRIX-WI-EXTENDED) monitors for logging on to the DDC servers
and Web Interface servers, respectively.
ipaddress:
description:
- >-
Set of IP addresses expected in the monitoring response from the DNS server, if the record type is A
or AAAA. Applicable to C(DNS) monitors.
- "Minimum length = 1"
group:
description:
- >-
Name of a newsgroup available on the NNTP service that is to be monitored. The appliance periodically
generates an NNTP query for the name of the newsgroup and evaluates the response. If the newsgroup is
found on the server, the service is marked as UP. If the newsgroup does not exist or if the search
fails, the service is marked as DOWN. Applicable to NNTP monitors.
- "Minimum length = 1"
filename:
description:
- >-
Name of a file on the FTP server. The appliance monitors the FTP service by periodically checking the
existence of the file on the server. Applicable to C(FTP-EXTENDED) monitors.
- "Minimum length = 1"
basedn:
description:
- >-
The base distinguished name of the LDAP service, from where the LDAP server can begin the search for
the attributes in the monitoring query. Required for C(LDAP) service monitoring.
- "Minimum length = 1"
binddn:
description:
- >-
The distinguished name with which an LDAP monitor can perform the Bind operation on the LDAP server.
Optional. Applicable to C(LDAP) monitors.
- "Minimum length = 1"
filter:
description:
- "Filter criteria for the LDAP query. Optional."
- "Minimum length = 1"
attribute:
description:
- >-
Attribute to evaluate when the LDAP server responds to the query. Success or failure of the
monitoring probe depends on whether the attribute exists in the response. Optional.
- "Minimum length = 1"
database:
description:
- "Name of the database to connect to during authentication."
- "Minimum length = 1"
oraclesid:
description:
- "Name of the service identifier that is used to connect to the Oracle database during authentication."
- "Minimum length = 1"
sqlquery:
description:
- >-
SQL query for a C(MYSQL-ECV) or C(MSSQL-ECV) monitor. Sent to the database server after the server
authenticates the connection.
- "Minimum length = 1"
evalrule:
description:
- >-
Default syntax expression that evaluates the database server's response to a MYSQL-ECV or MSSQL-ECV
monitoring query. Must produce a Boolean result. The result determines the state of the server. If
the expression returns TRUE, the probe succeeds.
- >-
For example, if you want the appliance to evaluate the error message to determine the state of the
server, use the rule C(MYSQL.RES.ROW(10) .TEXT_ELEM(2).EQ("MySQL")).
mssqlprotocolversion:
choices:
- '70'
- '2000'
- '2000SP1'
- '2005'
- '2008'
- '2008R2'
- '2012'
- '2014'
description:
- "Version of MSSQL server that is to be monitored."
Snmpoid:
description:
- "SNMP OID for C(SNMP) monitors."
- "Minimum length = 1"
snmpcommunity:
description:
- "Community name for C(SNMP) monitors."
- "Minimum length = 1"
snmpthreshold:
description:
- "Threshold for C(SNMP) monitors."
- "Minimum length = 1"
snmpversion:
choices:
- 'V1'
- 'V2'
description:
- "SNMP version to be used for C(SNMP) monitors."
metrictable:
description:
- "Metric table to which to bind metrics."
- "Minimum length = 1"
- "Maximum length = 99"
application:
description:
- >-
Name of the application used to determine the state of the service. Applicable to monitors of type
C(CITRIX-XML-SERVICE).
- "Minimum length = 1"
sitepath:
description:
- >-
URL of the logon page. For monitors of type C(CITRIX-WEB-INTERFACE), to monitor a dynamic page under the
site path, terminate the site path with a slash C(/). Applicable to C(CITRIX-WEB-INTERFACE),
C(CITRIX-WI-EXTENDED) and C(CITRIX-XDM) monitors.
- "Minimum length = 1"
storename:
description:
- >-
Store Name. For monitors of type C(STOREFRONT), C(storename) is an optional argument defining storefront
service store name. Applicable to C(STOREFRONT) monitors.
- "Minimum length = 1"
storefrontacctservice:
description:
- >-
Enable/Disable probing for Account Service. Applicable only to Store Front monitors. For
multi-tenancy configuration users my skip account service.
type: bool
hostname:
description:
- "Hostname in the FQDN format (Example: C(porche.cars.org)). Applicable to C(STOREFRONT) monitors."
- "Minimum length = 1"
netprofile:
description:
- "Name of the network profile."
- "Minimum length = 1"
- "Maximum length = 127"
originhost:
description:
- >-
Origin-Host value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
originrealm:
description:
- >-
Origin-Realm value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
hostipaddress:
description:
- >-
Host-IP-Address value for the Capabilities-Exchange-Request (CER) message to use for monitoring
Diameter servers. If Host-IP-Address is not specified, the appliance inserts the mapped IP (MIP)
address or subnet IP (SNIP) address from which the CER request (the monitoring probe) is sent.
- "Minimum length = 1"
vendorid:
description:
- >-
Vendor-Id value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
productname:
description:
- >-
Product-Name value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
firmwarerevision:
description:
- >-
Firmware-Revision value for the Capabilities-Exchange-Request (CER) message to use for monitoring
Diameter servers.
authapplicationid:
description:
- >-
List of Auth-Application-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum of eight of these AVPs are supported in a
monitoring CER message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
acctapplicationid:
description:
- >-
List of Acct-Application-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum of eight of these AVPs are supported in a
monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
inbandsecurityid:
choices:
- 'NO_INBAND_SECURITY'
- 'TLS'
description:
- >-
Inband-Security-Id for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
supportedvendorids:
description:
- >-
List of Supported-Vendor-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum eight of these AVPs are supported in a
monitoring message.
- "Minimum value = C(1)"
- "Maximum value = C(4294967295)"
vendorspecificvendorid:
description:
- >-
Vendor-Id to use in the Vendor-Specific-Application-Id grouped attribute-value pair (AVP) in the
monitoring CER message. To specify Auth-Application-Id or Acct-Application-Id in
Vendor-Specific-Application-Id, use vendorSpecificAuthApplicationIds or
vendorSpecificAcctApplicationIds, respectively. Only one Vendor-Id is supported for all the
Vendor-Specific-Application-Id AVPs in a CER monitoring message.
- "Minimum value = 1"
vendorspecificauthapplicationids:
description:
- >-
List of Vendor-Specific-Auth-Application-Id attribute value pairs (AVPs) for the
Capabilities-Exchange-Request (CER) message to use for monitoring Diameter servers. A maximum of
eight of these AVPs are supported in a monitoring message. The specified value is combined with the
value of vendorSpecificVendorId to obtain the Vendor-Specific-Application-Id AVP in the CER
monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
vendorspecificacctapplicationids:
description:
- >-
List of Vendor-Specific-Acct-Application-Id attribute value pairs (AVPs) to use for monitoring
Diameter servers. A maximum of eight of these AVPs are supported in a monitoring message. The
specified value is combined with the value of vendorSpecificVendorId to obtain the
Vendor-Specific-Application-Id AVP in the CER monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
kcdaccount:
description:
- "KCD Account used by C(MSSQL) monitor."
- "Minimum length = 1"
- "Maximum length = 32"
storedb:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Store the database list populated with the responses to monitor probes. Used in database specific
load balancing if C(MSSQL-ECV)/C(MYSQL-ECV) monitor is configured.
storefrontcheckbackendservices:
description:
- >-
This option will enable monitoring of services running on storefront server. Storefront services are
monitored by probing to a Windows service that runs on the Storefront server and exposes details of
which storefront services are running.
type: bool
trofscode:
description:
- "Code expected when the server is under maintenance."
trofsstring:
description:
- >-
String expected from the server for the service to be marked as trofs. Applicable to HTTP-ECV/TCP-ECV
monitors.
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Set lb monitor
local_action:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
module: netscaler_lb_monitor
state: present
monitorname: monitor_1
type: HTTP-INLINE
action: DOWN
respcode: ['400']
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
get_immutables_intersection
)
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor import lbmonitor
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def lbmonitor_exists(client, module):
log('Checking if monitor exists')
if lbmonitor.count_filtered(client, 'monitorname:%s' % module.params['monitorname']) > 0:
return True
else:
return False
def lbmonitor_identical(client, module, lbmonitor_proxy):
log('Checking if monitor is identical')
count = lbmonitor.count_filtered(client, 'monitorname:%s' % module.params['monitorname'])
if count == 0:
return False
lbmonitor_list = lbmonitor.get_filtered(client, 'monitorname:%s' % module.params['monitorname'])
diff_dict = lbmonitor_proxy.diff_object(lbmonitor_list[0])
# Skipping hashed fields since the cannot be compared directly
hashed_fields = [
'password',
'secondarypassword',
'radkey',
]
for key in hashed_fields:
if key in diff_dict:
del diff_dict[key]
if diff_dict == {}:
return True
else:
return False
def diff_list(client, module, lbmonitor_proxy):
monitor_list = lbmonitor.get_filtered(client, 'monitorname:%s' % module.params['monitorname'])
return lbmonitor_proxy.diff_object(monitor_list[0])
def main():
module_specific_arguments = dict(
monitorname=dict(type='str'),
type=dict(
type='str',
choices=[
'PING',
'TCP',
'HTTP',
'TCP-ECV',
'HTTP-ECV',
'UDP-ECV',
'DNS',
'FTP',
'LDNS-PING',
'LDNS-TCP',
'LDNS-DNS',
'RADIUS',
'USER',
'HTTP-INLINE',
'SIP-UDP',
'SIP-TCP',
'LOAD',
'FTP-EXTENDED',
'SMTP',
'SNMP',
'NNTP',
'MYSQL',
'MYSQL-ECV',
'MSSQL-ECV',
'ORACLE-ECV',
'LDAP',
'POP3',
'CITRIX-XML-SERVICE',
'CITRIX-WEB-INTERFACE',
'DNS-TCP',
'RTSP',
'ARP',
'CITRIX-AG',
'CITRIX-AAC-LOGINPAGE',
'CITRIX-AAC-LAS',
'CITRIX-XD-DDC',
'ND6',
'CITRIX-WI-EXTENDED',
'DIAMETER',
'RADIUS_ACCOUNTING',
'STOREFRONT',
'APPC',
'SMPP',
'CITRIX-XNC-ECV',
'CITRIX-XDM',
'CITRIX-STA-SERVICE',
'CITRIX-STA-SERVICE-NHOP',
]
),
action=dict(
type='str',
choices=[
'NONE',
'LOG',
'DOWN',
]
),
respcode=dict(type='list'),
httprequest=dict(type='str'),
rtsprequest=dict(type='str'),
customheaders=dict(type='str'),
maxforwards=dict(type='float'),
sipmethod=dict(
type='str',
choices=[
'OPTIONS',
'INVITE',
'REGISTER',
]
),
sipuri=dict(type='str'),
sipreguri=dict(type='str'),
send=dict(type='str'),
recv=dict(type='str'),
query=dict(type='str'),
querytype=dict(
type='str',
choices=[
'Address',
'Zone',
'AAAA',
]
),
scriptname=dict(type='str'),
scriptargs=dict(type='str'),
dispatcherip=dict(type='str'),
dispatcherport=dict(type='int'),
username=dict(type='str'),
password=dict(type='str'),
secondarypassword=dict(type='str'),
logonpointname=dict(type='str'),
lasversion=dict(type='str'),
radkey=dict(type='str'),
radnasid=dict(type='str'),
radnasip=dict(type='str'),
radaccounttype=dict(type='float'),
radframedip=dict(type='str'),
radapn=dict(type='str'),
radmsisdn=dict(type='str'),
radaccountsession=dict(type='str'),
lrtm=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
deviation=dict(type='float'),
units1=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
interval=dict(type='int'),
units3=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
resptimeout=dict(type='int'),
units4=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
resptimeoutthresh=dict(type='float'),
retries=dict(type='int'),
failureretries=dict(type='int'),
alertretries=dict(type='int'),
successretries=dict(type='int'),
downtime=dict(type='int'),
units2=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
destip=dict(type='str'),
destport=dict(type='int'),
reverse=dict(type='bool'),
transparent=dict(type='bool'),
iptunnel=dict(type='bool'),
tos=dict(type='bool'),
tosid=dict(type='float'),
secure=dict(type='bool'),
validatecred=dict(type='bool'),
domain=dict(type='str'),
ipaddress=dict(type='list'),
group=dict(type='str'),
filename=dict(type='str'),
basedn=dict(type='str'),
binddn=dict(type='str'),
filter=dict(type='str'),
attribute=dict(type='str'),
database=dict(type='str'),
oraclesid=dict(type='str'),
sqlquery=dict(type='str'),
evalrule=dict(type='str'),
mssqlprotocolversion=dict(
type='str',
choices=[
'70',
'2000',
'2000SP1',
'2005',
'2008',
'2008R2',
'2012',
'2014',
]
),
Snmpoid=dict(type='str'),
snmpcommunity=dict(type='str'),
snmpthreshold=dict(type='str'),
snmpversion=dict(
type='str',
choices=[
'V1',
'V2',
]
),
application=dict(type='str'),
sitepath=dict(type='str'),
storename=dict(type='str'),
storefrontacctservice=dict(type='bool'),
hostname=dict(type='str'),
netprofile=dict(type='str'),
originhost=dict(type='str'),
originrealm=dict(type='str'),
hostipaddress=dict(type='str'),
vendorid=dict(type='float'),
productname=dict(type='str'),
firmwarerevision=dict(type='float'),
authapplicationid=dict(type='list'),
acctapplicationid=dict(type='list'),
inbandsecurityid=dict(
type='str',
choices=[
'NO_INBAND_SECURITY',
'TLS',
]
),
supportedvendorids=dict(type='list'),
vendorspecificvendorid=dict(type='float'),
vendorspecificauthapplicationids=dict(type='list'),
vendorspecificacctapplicationids=dict(type='list'),
storedb=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
storefrontcheckbackendservices=dict(type='bool'),
trofscode=dict(type='float'),
trofsstring=dict(type='str'),
)
hand_inserted_arguments = dict()
argument_spec = dict()
argument_spec.update(module_specific_arguments)
argument_spec.update(netscaler_common_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk', **module_result)
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Instantiate lb monitor object
readwrite_attrs = [
'monitorname',
'type',
'action',
'respcode',
'httprequest',
'rtsprequest',
'customheaders',
'maxforwards',
'sipmethod',
'sipuri',
'sipreguri',
'send',
'recv',
'query',
'querytype',
'scriptname',
'scriptargs',
'dispatcherip',
'dispatcherport',
'username',
'password',
'secondarypassword',
'logonpointname',
'lasversion',
'radkey',
'radnasid',
'radnasip',
'radaccounttype',
'radframedip',
'radapn',
'radmsisdn',
'radaccountsession',
'lrtm',
'deviation',
'units1',
'interval',
'units3',
'resptimeout',
'units4',
'resptimeoutthresh',
'retries',
'failureretries',
'alertretries',
'successretries',
'downtime',
'units2',
'destip',
'destport',
'reverse',
'transparent',
'iptunnel',
'tos',
'tosid',
'secure',
'validatecred',
'domain',
'ipaddress',
'group',
'filename',
'basedn',
'binddn',
'filter',
'attribute',
'database',
'oraclesid',
'sqlquery',
'evalrule',
'mssqlprotocolversion',
'Snmpoid',
'snmpcommunity',
'snmpthreshold',
'snmpversion',
'application',
'sitepath',
'storename',
'storefrontacctservice',
'netprofile',
'originhost',
'originrealm',
'hostipaddress',
'vendorid',
'productname',
'firmwarerevision',
'authapplicationid',
'acctapplicationid',
'inbandsecurityid',
'supportedvendorids',
'vendorspecificvendorid',
'vendorspecificauthapplicationids',
'vendorspecificacctapplicationids',
'storedb',
'storefrontcheckbackendservices',
'trofscode',
'trofsstring',
]
readonly_attrs = [
'lrtmconf',
'lrtmconfstr',
'dynamicresponsetimeout',
'dynamicinterval',
'multimetrictable',
'dup_state',
'dup_weight',
'weight',
]
immutable_attrs = [
'monitorname',
'type',
'units1',
'units3',
'units4',
'units2',
'Snmpoid',
'hostname',
'servicename',
'servicegroupname',
]
transforms = {
'storefrontcheckbackendservices': ['bool_yes_no'],
'secure': ['bool_yes_no'],
'tos': ['bool_yes_no'],
'validatecred': ['bool_yes_no'],
'storefrontacctservice': ['bool_yes_no'],
'iptunnel': ['bool_yes_no'],
'transparent': ['bool_yes_no'],
'reverse': ['bool_yes_no'],
'lrtm': [lambda v: v.upper()],
'storedb': [lambda v: v.upper()],
}
lbmonitor_proxy = ConfigProxy(
actual=lbmonitor(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'LB')
if module.params['state'] == 'present':
log('Applying actions for state present')
if not lbmonitor_exists(client, module):
if not module.check_mode:
log('Adding monitor')
lbmonitor_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not lbmonitor_identical(client, module, lbmonitor_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(lbmonitor_proxy, diff_list(client, module, lbmonitor_proxy).keys())
if immutables_changed != []:
diff = diff_list(client, module, lbmonitor_proxy)
msg = 'Cannot update immutable attributes %s' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff, **module_result)
if not module.check_mode:
log('Updating monitor')
lbmonitor_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
log('Doing nothing for monitor')
module_result['changed'] = False
# Sanity check for result
log('Sanity checks for state present')
if not module.check_mode:
if not lbmonitor_exists(client, module):
module.fail_json(msg='lb monitor does not exist', **module_result)
if not lbmonitor_identical(client, module, lbmonitor_proxy):
module.fail_json(
msg='lb monitor is not configured correctly',
diff=diff_list(client, module, lbmonitor_proxy),
**module_result
)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if lbmonitor_exists(client, module):
if not module.check_mode:
lbmonitor_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for result
log('Sanity checks for state absent')
if not module.check_mode:
if lbmonitor_exists(client, module):
module.fail_json(msg='lb monitor still exists', **module_result)
module_result['actual_attributes'] = lbmonitor_proxy.get_actual_rw_attributes(filter='monitorname')
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
petrvanblokland/Xierpa3 | xierpa3/adapters/dynamodbadapter.py | 1 | 1331 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# dynamodbadapter.py
#
from xierpa3.adapters.adapter import Adapter
#from xierpa3.toolbox.database.dynamodb.dynamodbconnector import Connector
class Connector():
# @@@ Under development
pass
class DynamoDBAdapter(Adapter):
u"""
Wrapper around the DynamoDB Connector, using:
- Connector.getItem(id)
- Connector.saveItem(item)
- Connector.newItem(d)
"""
# @@@ Under development
def __init__(self):
Adapter.__init__(self)
def getItem(self, id):
return Connector.getItem(id)
def newItem(self, d=None):
return Connector.newItem(d)
def saveItem(self, item):
Connector.saveItem(item)
def getMessage(self, count):
return self.newArticle(text=u'English is not native. For corrections on disaster misspellings please contact buro (at) petr.com')
def getLogo(self, count):
return self.newArticle(url='http://petr.com/_images/contact.png')
if __name__ == "__main__":
pass
| mit |
HurryNwait/kernel-crespo-jellybean | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
espressopp/espressopp | src/FixedTripleAngleList.py | 1 | 4092 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*******************************
espressopp.FixedTripleAngleList
*******************************
.. function:: espressopp.FixedTripleAngleList(storage)
:param storage:
:type storage:
.. function:: espressopp.FixedTripleAngleList.add(pid1, pid2, pid3)
:param pid1:
:param pid2:
:param pid3:
:type pid1:
:type pid2:
:type pid3:
:rtype:
.. function:: espressopp.FixedTripleAngleList.addTriples(triplelist)
:param triplelist:
:type triplelist:
:rtype:
.. function:: espressopp.FixedTripleAngleList.getAngle(pid1, pid2, pid3)
:param pid1:
:param pid2:
:param pid3:
:type pid1:
:type pid2:
:type pid3:
:rtype:
.. function:: espressopp.FixedTripleAngleList.getTriples()
:rtype:
.. function:: espressopp.FixedTripleAngleList.getTriplesAngles()
:rtype:
.. function:: espressopp.FixedTripleAngleList.size()
:rtype:
"""
from espressopp import pmi
import _espressopp
#import espressopp
from espressopp.esutil import cxxinit
class FixedTripleAngleListLocal(_espressopp.FixedTripleAngleList):
def __init__(self, storage):
#if pmi.workerIsActive():
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, _espressopp.FixedTripleAngleList, storage)
def add(self, pid1, pid2, pid3):
if pmi.workerIsActive():
return self.cxxclass.add(self, pid1, pid2, pid3)
def addTriples(self, triplelist):
"""
Each processor takes the broadcasted triplelist and
adds those triples whose first particle is owned by
this processor.
"""
if pmi.workerIsActive():
for triple in triplelist:
pid1, pid2, pid3 = triple
self.cxxclass.add(self, pid1, pid2, pid3)
def size(self):
if pmi.workerIsActive():
return self.cxxclass.size(self)
def getTriples(self):
if pmi.workerIsActive():
triples = self.cxxclass.getTriples(self)
return triples
'returns the list of (pid1, pid2, pid3, angle(123))'
def getTriplesAngles(self):
if pmi.workerIsActive():
triples_angles = self.cxxclass.getTriplesAngles(self)
return triples_angles
def getAngle(self, pid1, pid2, pid3):
if pmi.workerIsActive():
return self.cxxclass.getAngle(self, pid1, pid2, pid3)
if pmi.isController:
class FixedTripleAngleList(metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.FixedTripleAngleListLocal',
localcall = [ "add" ],
pmicall = [ "addTriples" ],
pmiinvoke = ["getTriples", "getTriplesAngles", "size"]
)
def getAngle(self, pid1, pid2, pid3 ):
angles = pmi.invoke(self.pmiobject, 'getAngle', pid1, pid2, pid3 )
for i in angles:
if( i != -1 ):
return i
| gpl-3.0 |
silverlogic/djangorestframework-timed-auth-token | tests/test_models.py | 1 | 1333 | from datetime import timedelta
import pytest
from django.utils import timezone
from timed_auth_token.models import TimedAuthToken
from users.models import CustomUser
pytestmark = pytest.mark.django_db
@pytest.fixture
def token():
return TimedAuthToken(user=CustomUser.objects.create(identifier='blah'))
def test_calculate_new_expiration_uses_30_day_default(token):
token.calculate_new_expiration()
expected = timezone.now().date() + timedelta(days=30)
actual = token.expires.date()
assert expected == actual
def test_calculate_new_expiration_duration_can_be_set_in_settings(token, settings):
settings.TIMED_AUTH_TOKEN = {'DEFAULT_VALIDITY_DURATION': timedelta(days=5)}
token.calculate_new_expiration()
expected = timezone.now().date() + timedelta(days=5)
actual = token.expires.date()
assert expected == actual
def test_calculate_new_expiration_can_be_overridden_on_model(token, settings):
settings.TIMED_AUTH_TOKEN = {'DEFAULT_VALIDITY_DURATION': timedelta(days=5)}
CustomUser.token_validity_duration = timedelta(days=10)
token.calculate_new_expiration()
expected = timezone.now().date() + timedelta(days=10)
actual = token.expires.date()
assert expected == actual
def test_str_representation_has_username_field(token):
assert 'blah' in str(token)
| mit |
elijah513/tornado | tornado/test/log_test.py | 111 | 8368 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import glob
import logging
import os
import re
import subprocess
import sys
import tempfile
import warnings
from tornado.escape import utf8
from tornado.log import LogFormatter, define_logging_options, enable_pretty_logging
from tornado.options import OptionParser
from tornado.test.util import unittest
from tornado.util import u, basestring_type
@contextlib.contextmanager
def ignore_bytes_warning():
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=BytesWarning)
yield
class LogFormatterTest(unittest.TestCase):
# Matches the output of a single logging call (which may be multiple lines
# if a traceback was included, so we use the DOTALL option)
LINE_RE = re.compile(b"(?s)\x01\\[E [0-9]{6} [0-9]{2}:[0-9]{2}:[0-9]{2} log_test:[0-9]+\\]\x02 (.*)")
def setUp(self):
self.formatter = LogFormatter(color=False)
# Fake color support. We can't guarantee anything about the $TERM
# variable when the tests are run, so just patch in some values
# for testing. (testing with color off fails to expose some potential
# encoding issues from the control characters)
self.formatter._colors = {
logging.ERROR: u("\u0001"),
}
self.formatter._normal = u("\u0002")
# construct a Logger directly to bypass getLogger's caching
self.logger = logging.Logger('LogFormatterTest')
self.logger.propagate = False
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'log.out')
self.handler = self.make_handler(self.filename)
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
def tearDown(self):
self.handler.close()
os.unlink(self.filename)
os.rmdir(self.tempdir)
def make_handler(self, filename):
# Base case: default setup without explicit encoding.
# In python 2, supports arbitrary byte strings and unicode objects
# that contain only ascii. In python 3, supports ascii-only unicode
# strings (but byte strings will be repr'd automatically).
return logging.FileHandler(filename)
def get_output(self):
with open(self.filename, "rb") as f:
line = f.read().strip()
m = LogFormatterTest.LINE_RE.match(line)
if m:
return m.group(1)
else:
raise Exception("output didn't match regex: %r" % line)
def test_basic_logging(self):
self.logger.error("foo")
self.assertEqual(self.get_output(), b"foo")
def test_bytes_logging(self):
with ignore_bytes_warning():
# This will be "\xe9" on python 2 or "b'\xe9'" on python 3
self.logger.error(b"\xe9")
self.assertEqual(self.get_output(), utf8(repr(b"\xe9")))
def test_utf8_logging(self):
with ignore_bytes_warning():
self.logger.error(u("\u00e9").encode("utf8"))
if issubclass(bytes, basestring_type):
# on python 2, utf8 byte strings (and by extension ascii byte
# strings) are passed through as-is.
self.assertEqual(self.get_output(), utf8(u("\u00e9")))
else:
# on python 3, byte strings always get repr'd even if
# they're ascii-only, so this degenerates into another
# copy of test_bytes_logging.
self.assertEqual(self.get_output(), utf8(repr(utf8(u("\u00e9")))))
def test_bytes_exception_logging(self):
try:
raise Exception(b'\xe9')
except Exception:
self.logger.exception('caught exception')
# This will be "Exception: \xe9" on python 2 or
# "Exception: b'\xe9'" on python 3.
output = self.get_output()
self.assertRegexpMatches(output, br'Exception.*\\xe9')
# The traceback contains newlines, which should not have been escaped.
self.assertNotIn(br'\n', output)
class UnicodeLogFormatterTest(LogFormatterTest):
def make_handler(self, filename):
# Adding an explicit encoding configuration allows non-ascii unicode
# strings in both python 2 and 3, without changing the behavior
# for byte strings.
return logging.FileHandler(filename, encoding="utf8")
def test_unicode_logging(self):
self.logger.error(u("\u00e9"))
self.assertEqual(self.get_output(), utf8(u("\u00e9")))
class EnablePrettyLoggingTest(unittest.TestCase):
def setUp(self):
super(EnablePrettyLoggingTest, self).setUp()
self.options = OptionParser()
define_logging_options(self.options)
self.logger = logging.Logger('tornado.test.log_test.EnablePrettyLoggingTest')
self.logger.propagate = False
def test_log_file(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + '/test_log'
enable_pretty_logging(options=self.options, logger=self.logger)
self.assertEqual(1, len(self.logger.handlers))
self.logger.error('hello')
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + '/test_log*')
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegexpMatches(f.read(), r'^\[E [^]]*\] hello$')
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + '/test_log*'):
os.unlink(filename)
os.rmdir(tmpdir)
class LoggingOptionTest(unittest.TestCase):
"""Test the ability to enable and disable Tornado's logging hooks."""
def logs_present(self, statement, args=None):
# Each test may manipulate and/or parse the options and then logs
# a line at the 'info' level. This level is ignored in the
# logging module by default, but Tornado turns it on by default
# so it is the easiest way to tell whether tornado's logging hooks
# ran.
IMPORT = 'from tornado.options import options, parse_command_line'
LOG_INFO = 'import logging; logging.info("hello")'
program = ';'.join([IMPORT, statement, LOG_INFO])
proc = subprocess.Popen(
[sys.executable, '-c', program] + (args or []),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0, 'process failed: %r' % stdout)
return b'hello' in stdout
def test_default(self):
self.assertFalse(self.logs_present('pass'))
def test_tornado_default(self):
self.assertTrue(self.logs_present('parse_command_line()'))
def test_disable_command_line(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=none']))
def test_disable_command_line_case_insensitive(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=None']))
def test_disable_code_string(self):
self.assertFalse(self.logs_present(
'options.logging = "none"; parse_command_line()'))
def test_disable_code_none(self):
self.assertFalse(self.logs_present(
'options.logging = None; parse_command_line()'))
def test_disable_override(self):
# command line trumps code defaults
self.assertTrue(self.logs_present(
'options.logging = None; parse_command_line()',
['--logging=info']))
| apache-2.0 |
40223232/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/sre_parse.py | 630 | 29657 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
| gpl-3.0 |
matiasherranz/keyczar | cpp/src/tools/swtoolkit/site_scons/site_tools/seven_zip.py | 15 | 5446 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""SCons tool for 7zip."""
import os
import shutil
import subprocess
import tempfile
import SCons.Script
def SevenZipGetFiles(env, source):
"""SCons emitter for 7zip extract.
Examines the source 7z archive to determine the list of files which will be
created by extract/unzip operation.
Args:
env: The SCons environment to get the 7zip command line from.
source: The 7zip archive to examine.
Returns:
The list of filenames in the archive.
"""
# Expand the command to list archive contents.
cmd = env.subst('$SEVEN_ZIP l "%s"' % source)
# Run it and capture output.
output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
# Strip off 7-line header and 3-line trailer from 7zip output.
lines = output.split('\r\n')[7:-3]
# Trim out just the files and their names.
files = [i[53:] for i in lines if i[20] != 'D']
return files
def SevenZipEmitter(target, source, env):
"""An emitter that decides what nodes are vented from a 7zip archive.
Args:
target: The target directory node.
source: The source archive node.
env: The environment in which the emit takes place.
Returns:
The pair (target, source) which lists the emitted targets and sources.
"""
# Remember out dir for later.
env['SEVEN_ZIP_OUT_DIR'] = target[0].dir
# Get out contents
files = SevenZipGetFiles(env, env.subst('$SOURCE', source=source))
# Extract a layer deeper if there is only one, and it extension is 7z.
if env.get('SEVEN_ZIP_PEEL_LAYERS', False):
assert len(files) == 1 and os.path.splitext(files[0])[1] == '.7z'
# Create a temporary directory.
tmp_dir = tempfile.mkdtemp()
# Expand the command to extract the archive to a temporary location.
cmd = env.subst('$SEVEN_ZIP x $SOURCE -o"%s"' % tmp_dir, source=source[0])
# Run it and swallow output.
subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()
# Get contents.
inner_files = SevenZipGetFiles(env, os.path.join(tmp_dir, files[0]))
# Make into file nodes.
inner_files = [target[0].dir.File(i) for i in inner_files]
# Delete temp_dir.
shutil.rmtree(tmp_dir)
# Decide where to extra working file to.
working_file = env.Dir(target[0].dir.abspath +
'.7zip_extract').File(files[0])
# Combine everything.
files = [working_file] + inner_files
else:
# Make into file nodes.
files = [target[0].dir.File(i) for i in files]
# Return files as actual target.
return (files, source)
def SevenZipGenerator(source, target, env, for_signature):
"""The generator function which decides how to extract a file."""
# Silence lint.
source = source
target = target
for_signature = for_signature
if env.get('SEVEN_ZIP_PEEL_LAYERS', False):
return [SCons.Script.Delete('$SEVEN_ZIP_OUT_DIR'),
'$SEVEN_ZIP x $SOURCE -o"${TARGET.dir}"',
'$SEVEN_ZIP x $TARGET -o"$SEVEN_ZIP_OUT_DIR"']
else:
return [SCons.Script.Delete('$SEVEN_ZIP_OUT_DIR'),
'$SEVEN_ZIP x $SOURCE -o"$SEVEN_ZIP_OUT_DIR"']
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
env.Replace(
SEVEN_ZIP='$SEVEN_ZIP_DIR/7za.exe',
SEVEN_ZIP_ARCHIVE_OPTIONS = ['-t7z', '-mx0'],
SEVEN_ZIP_COMPRESS_OPTIONS = ['-t7z', '-mx9'],
)
b = SCons.Script.Builder(generator=SevenZipGenerator,
emitter=SevenZipEmitter)
env['BUILDERS']['Extract7zip'] = b
b = SCons.Script.Builder(
action=('cd $SOURCE && '
'$SEVEN_ZIP a $SEVEN_ZIP_ARCHIVE_OPTIONS ${TARGET.abspath} ./'))
env['BUILDERS']['Archive7zip'] = b
b = SCons.Script.Builder(
action=('cd ${SOURCE.dir} && '
'$SEVEN_ZIP a $SEVEN_ZIP_COMPRESS_OPTIONS '
'${TARGET.abspath} ${SOURCE.file}'))
env['BUILDERS']['Compress7zip'] = b
| apache-2.0 |
Endika/account-invoice-reporting | invoice_print_report_balance_payment/__init__.py | 11 | 1056 | # -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import account_invoice
from . import partner
from . import report
| agpl-3.0 |
AdamIsrael/PerfKitBenchmarker | perfkitbenchmarker/providers/alicloud/ali_virtual_machine.py | 2 | 13868 | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent an Ali Virtual Machine object.
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import json
import threading
import logging
from perfkitbenchmarker import flags
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import windows_virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import disk
from perfkitbenchmarker.providers.alicloud import ali_disk
from perfkitbenchmarker.providers.alicloud import ali_network
from perfkitbenchmarker.providers.alicloud import util
from perfkitbenchmarker import providers
FLAGS = flags.FLAGS
NON_HVM_PREFIXES = ['t1', 's1', 's2', 's3', 'm1']
DRIVE_START_LETTER = 'b'
DEFAULT_DISK_SIZE = 500
INSTANCE = 'instance'
IMAGE = 'image'
SNAPSHOT = 'snapshot'
DISK = 'disk'
NONE = 'none'
IO_OPTIMIZED = 'io_optimized'
RESOURCE_TYPE = {
INSTANCE: 'instance',
IMAGE: 'image',
SNAPSHOT: 'snapshot',
DISK: 'disk',
}
SSH_PORT = 22
NUM_LOCAL_VOLUMES = {
'ecs.t1.small': 4,
'ecs.s1.small': 4,
'ecs.s1.medium': 4,
'ecs.s2.small': 4,
'ecs.s2.large': 4,
'ecs.s2.xlarge': 4,
'ecs.s3.medium': 4,
'ecs.s3.large': 4,
'ecs.m1.medium': 4,
}
INSTANCE_EXISTS_STATUSES = frozenset(
['Starting', 'Running', 'Stopping', 'Stopped'])
INSTANCE_DELETED_STATUSES = frozenset([])
INSTANCE_KNOWN_STATUSES = INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES
DEFAULT_IMAGE = "ubuntu1404_64_20G_aliaegis_20150325.vhd",
class AliVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an AliCloud Virtual Machine."""
CLOUD = providers.ALICLOUD
DEFAULT_ZONE = 'cn-hangzhou-d'
DEFAULT_MACHINE_TYPE = 'ecs.s3.large'
IMAGE_NAME_FILTER = 'ubuntu1404_64_20G_aliaegis*'
_lock = threading.Lock()
imported_keyfile_set = set()
deleted_keyfile_set = set()
def __init__(self, vm_spec):
"""Initialize a AliCloud virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the VM.
"""
super(AliVirtualMachine, self).__init__(vm_spec)
self.image = self.image or DEFAULT_IMAGE
self.user_name = FLAGS.ali_user_name
self.region = util.GetRegionByZone(self.zone)
self.bandwidth_in = FLAGS.ali_bandwidth_in
self.bandwidth_out = FLAGS.ali_bandwidth_out
self.scratch_disk_size = FLAGS.scratch_disk_size or DEFAULT_DISK_SIZE
self.system_disk_type = FLAGS.ali_system_disk_type
self.network = ali_network.AliNetwork.GetNetwork(self)
self.firewall = ali_network.AliFirewall.GetFirewall()
@vm_util.Retry(poll_interval=1, log_errors=False)
def _WaitForInstanceStatus(self, status_list):
"""Waits until the instance's status is in status_list"""
logging.info('Waits until the instance\'s stastus is one of statuses: %s',
status_list)
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeInstances',
'--RegionId %s' % self.region,
'--InstanceIds \'["%s"]\'' % self.id]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instances = response['Instances']['Instance']
assert len(instances) == 1
status = instances[0]['Status']
assert status in status_list
@vm_util.Retry(poll_interval=5, max_retries=30, log_errors=False)
def _WaitForEipStatus(self, status_list):
"""Waits until the instance's status is in status_list"""
logging.info('Waits until the eip\'s stastus is one of statuses: %s',
status_list)
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeEipAddresses',
'--RegionId %s' % self.region,
'--AllocationId %s' % self.eip_id]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
EipAddresses = response['EipAddresses']['EipAddress']
assert len(EipAddresses) == 1
status = EipAddresses[0]['Status']
assert status in status_list
def _AllocatePubIp(self, region, instance_id):
"""Allocate a public ip address and associate it to the instance"""
if FLAGS.ali_use_vpc:
allocatip_cmd = util.ALI_PREFIX + [
'ecs',
'AllocateEipAddress',
'--RegionId %s' % region,
'--InternetChargeType PayByTraffic']
allocatip_cmd = util.GetEncodedCmd(allocatip_cmd)
stdout, _ = vm_util.IssueRetryableCommand(allocatip_cmd)
response = json.loads(stdout)
self.ip_address = response['EipAddress']
self.eip_id = response['AllocationId']
self._WaitForInstanceStatus(['Stopped', 'Running'])
associate_cmd = util.ALI_PREFIX + [
'ecs',
'AssociateEipAddress',
'--RegionId %s' % region,
'--AllocationId %s' % self.eip_id,
'--InstanceId %s' % instance_id,
'--InstanceType EcsInstance']
associate_cmd = util.GetEncodedCmd(associate_cmd)
vm_util.IssueRetryableCommand(associate_cmd)
else:
allocatip_cmd = util.ALI_PREFIX + [
'ecs',
'AllocatePublicIpAddress',
'--RegionId %s' % region,
'--InstanceId %s' % instance_id]
allocatip_cmd = util.GetEncodedCmd(allocatip_cmd)
stdout, _ = vm_util.IssueRetryableCommand(allocatip_cmd)
response = json.loads(stdout)
self.ip_address = response['IpAddress']
@classmethod
def _GetDefaultImage(cls, region):
"""Returns the default image given the machine type and region.
If no default is configured, this will return None.
"""
if cls.IMAGE_NAME_FILTER is None:
return None
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeImage',
'--RegionId %s' % region,
'--ImageName \'%s\'' % cls.IMAGE_NAME_FILTER]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
if not stdout:
return None
images = json.loads(stdout)['Images']['Image']
# We want to return the latest version of the image, and since the wildcard
# portion of the image name is the image's creation date, we can just take
# the image with the 'largest' name.
return max(images, key=lambda image: image['ImageName'])['ImageId']
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data and tag it."""
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeInstances',
'--RegionId %s' % self.region,
'--InstanceIds \'["%s"]\'' % self.id]
logging.info('Getting instance %s public IP. This will fail until '
'a public IP is available, but will be retried.', self.id)
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instance = response['Instances']['Instance'][0]
if self.network.use_vpc:
pub_ip_address = instance['EipAddress']['IpAddress']
self.internal_ip = \
instance['VpcAttributes']['PrivateIpAddress']['IpAddress'][0]
else:
pub_ip_address = instance['PublicIpAddress']['IpAddress'][0]
self.internal_ip = instance['InnerIpAddress']['IpAddress'][0]
assert self.ip_address == pub_ip_address
self.group_id = instance['SecurityGroupIds']['SecurityGroupId'][0]
self._WaitForInstanceStatus(['Running'])
self.firewall.AllowPort(self, SSH_PORT)
key_file = vm_util.GetPublicKeyPath()
util.AddPubKeyToHost(self.ip_address,
self.password,
key_file,
self.user_name)
util.AddDefaultTags(self.id, RESOURCE_TYPE[INSTANCE], self.region)
def _CreateDependencies(self):
"""Create VM dependencies."""
pass
def _DeleteDependencies(self):
"""Delete VM dependencies."""
pass
def _Create(self):
"""Create a VM instance."""
if self.image is None:
# This is here and not in the __init__ method bceauese _GetDefaultImage
# does a nontrivial amount of work (it calls the aliyuncli).
self.image = self._GetDefaultImage(self.region)
self.password = util.GeneratePassword()
create_cmd = util.ALI_PREFIX + [
'ecs',
'CreateInstance',
'--InstanceName perfkit-%s' % FLAGS.run_uri,
'--RegionId %s' % self.region,
'--ZoneId %s' % self.zone,
'--ImageId %s' % self.image,
'--InstanceType %s' % self.machine_type,
'--SecurityGroupId %s' % self.network.security_group.group_id,
'--Password %s' % self.password]
if FLAGS.scratch_disk_type == disk.LOCAL:
disk_cmd = [
'--SystemDiskCategory ephemeral_ssd',
'--DataDisk1Category ephemeral_ssd',
'--DataDisk1Size %s' % self.scratch_disk_size,
'--DataDisk1Device %s%s' % (util.GetDrivePathPrefix(),
DRIVE_START_LETTER)]
create_cmd.extend(disk_cmd)
if FLAGS.ali_io_optimized is not None:
create_cmd.extend(['--IoOptimized optimized',
'--SystemDiskCategory %s' % self.system_disk_type])
if FLAGS.ali_use_vpc:
create_cmd.extend(['--VpcId %s' % self.network.vpc.id,
'--VSwitchId %s' % self.network.vswitch.id])
else:
create_cmd.extend([
'--InternetChargeType PayByTraffic',
'--InternetMaxBandwidthIn %s' % self.bandwidth_in,
'--InternetMaxBandwidthOut %s' % self.bandwidth_out])
create_cmd = util.GetEncodedCmd(create_cmd)
stdout, _ = vm_util.IssueRetryableCommand(create_cmd)
response = json.loads(stdout)
self.id = response['InstanceId']
self._AllocatePubIp(self.region, self.id)
start_cmd = util.ALI_PREFIX + [
'ecs',
'StartInstance',
'--RegionId %s' % self.region,
'--InstanceId %s' % self.id]
start_cmd = util.GetEncodedCmd(start_cmd)
vm_util.IssueRetryableCommand(start_cmd)
def _Delete(self):
"""Delete a VM instance."""
stop_cmd = util.ALI_PREFIX + [
'ecs',
'StopInstance',
'--RegionId %s' % self.region,
'--InstanceId %s' % self.id]
stop_cmd = util.GetEncodedCmd(stop_cmd)
vm_util.IssueRetryableCommand(stop_cmd)
self._WaitForInstanceStatus(['Stopped'])
delete_cmd = util.ALI_PREFIX + [
'ecs',
'DeleteInstance',
'--RegionId %s' % self.region,
'--InstanceId %s' % self.id]
delete_cmd = util.GetEncodedCmd(delete_cmd)
vm_util.IssueRetryableCommand(delete_cmd)
if FLAGS.ali_use_vpc:
self._WaitForEipStatus(['Available'])
release_eip_cmd = util.ALI_PREFIX + [
'ecs',
'ReleaseEipAddress',
'--AllocationId %s' % self.eip_id]
release_eip_cmd = util.GetEncodedCmd(release_eip_cmd)
vm_util.IssueRetryableCommand(release_eip_cmd)
def _Exists(self):
"""Returns true if the VM exists."""
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeInstances',
'--RegionId %s' % self.region,
'--InstanceIds \'["%s"]\'' % str(self.id)]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instances = response['Instances']['Instance']
assert len(instances) < 2, 'Too many instances.'
if not instances:
return False
assert len(instances) == 1, 'Wrong number of instances.'
status = instances[0]['Status']
assert status in INSTANCE_KNOWN_STATUSES, status
return status in INSTANCE_EXISTS_STATUSES
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
data_disk = ali_disk.AliDisk(disk_spec, self.zone)
self.scratch_disks.append(data_disk)
if disk_spec.disk_type != disk.LOCAL:
data_disk.Create()
data_disk.Attach(self)
else:
data_disk.device_letter = DRIVE_START_LETTER
data_disk.WaitForDiskStatus(['In_use'])
self.FormatDisk(data_disk.GetDevicePath())
self.MountDisk(data_disk.GetDevicePath(), disk_spec.mount_point)
def GetLocalDisks(self):
"""Returns a list of local disks on the VM.
Returns:
A list of strings, where each string is the absolute path to the local
disks on the VM (e.g. '/dev/xvdb').
"""
return ['%s%s' % (util.GetDrivePathPrefix(),
chr(ord(DRIVE_START_LETTER) + i))
for i in xrange(NUM_LOCAL_VOLUMES[self.machine_type])]
def AddMetadata(self, **kwargs):
"""Adds metadata to the VM."""
util.AddTags(self.id, RESOURCE_TYPE[INSTANCE], self.region, **kwargs)
class DebianBasedAliVirtualMachine(AliVirtualMachine,
linux_virtual_machine.DebianMixin):
IMAGE_NAME_FILTER = 'ubuntu1404_64*aliaegis*.vhd'
class RhelBasedAliVirtualMachine(AliVirtualMachine,
linux_virtual_machine.RhelMixin):
pass
class WindowsAliVirtualMachine(AliVirtualMachine,
windows_virtual_machine.WindowsMixin):
pass
| apache-2.0 |
eulo/miff-project | bower_components/jasmine/setup.py | 191 | 1983 | from setuptools import setup, find_packages, os
import json
with open('package.json') as packageFile:
version = json.load(packageFile)['version']
setup(
name="jasmine-core",
version=version,
url="http://pivotal.github.io/jasmine/",
author="Pivotal Labs",
author_email="jasmine-js@googlegroups.com",
description=('Jasmine is a Behavior Driven Development testing framework for JavaScript. It does not rely on '+
'browsers, DOM, or any JavaScript framework. Thus it\'s suited for websites, '+
'Node.js (http://nodejs.org) projects, or anywhere that JavaScript can run.'),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
packages=['jasmine_core', 'jasmine_core.images'],
package_dir={'jasmine_core': 'lib/jasmine-core', 'jasmine_core.images': 'images'},
package_data={'jasmine_core': ['*.js', '*.css'], 'jasmine_core.images': ['*.png']},
include_package_data=True,
install_requires=['glob2>=0.4.1', 'ordereddict==1.1']
)
| mit |
lfaller/name-that-microbe | annotate.py | 1 | 4385 | #!/usr/bin/env
#
# Lina Faller, April 2016
#
# Usage: python annotate.py -r REF_FILE -i FASTA -o OUTFILE -l LOG
#
# This script reads in a reference file with two columns. The first column contains an ID
# and the second column contains a short fragment of nucleotide sequence that uniquely
# identifies this specific reference sequence.
#
# The script outputs a list of all the reference IDs and the counts indicating how many
# sequences in the input matched this reference. There will be one additional ID, labeled
# "unmatched" which indicates the number of reads that could not be matched.
#
import sys
import os
import argparse
import logging
from Bio import SeqIO
########################################################################
# set up command line argument parsing
def parse_args():
parser = argparse.ArgumentParser(description="This script reads in a reference file with two columns. The first column contains an ID and the second column contains a short fragment of nucleotide sequence that uniquely identifies this specific reference sequence.\nThe script outputs a list of all the reference IDs and the counts indicating how many sequences in the input matched this reference. There will be one additional ID, labeled 'unmatched' which indicates the number of reads that could not be matched.")
parser.add_argument('-i', '--input', action="store", help='File containing the input.', type = str)
parser.add_argument('-o', '--output', action="store", help='File containing the output.', type = str)
parser.add_argument('-r', '--reference', action="store", help='File containing the pairs of reference id and unique probe.', type = str)
parser.add_argument('-l', '--log', action="store", help='File containing logging information.', type = str )
return parser.parse_args()
########################################################################
# find_probe
#
# Return a string indicating the reference_id for this sequence.
# Return "unmatched" if no reference_id was found.
def find_probe( sequence, reference_ids ):
logging.info( "Looking at sequence: {0}".format( sequence ) )
for ref_id in reference_ids:
current_probe = reference_ids[ ref_id ]
if current_probe in sequence:
logging.info( "Identified reference id ({0}) with probe ({1})".format( ref_id, current_probe ) )
return ref_id
logging.info( "No reference id was identified. Probe is considered unmatched." )
return 'unmatched'
########################################################################
# process_reference_file
#
# Return a dict where:
# key = reference_id
# value = unique probe sequence
def process_reference_file( REFERENCE ):
reference_ids = dict()
with open( REFERENCE ) as IN:
for line in IN:
line = line.strip()
items = line.split( "\t" )
reference_ids[ items[0] ] = items[1]
return reference_ids
########################################################################
# Main
def main():
# parse command line arguments
arguments = parse_args()
# variables used
all_probes = dict()
# do you want to debug things?
logging.basicConfig( filename = arguments.log, level = logging.INFO, format = '%(asctime)s %(message)s' )
logging.info( "####################################" )
logging.info( "Program started" )
logging.info( "Call: " + str(sys.argv) )
########################################################################
# process reference file
ref_ids = process_reference_file( arguments.reference )
########################################################################
# process fasta file
for record in SeqIO.parse(arguments.input, "fasta"):
header = record.description
seq = str(record.seq)
seq = seq.strip()
current_ref_id = find_probe( seq, ref_ids )
if current_ref_id in all_probes:
all_probes[ current_ref_id ] += 1
else:
all_probes[ current_ref_id ] = 1
########################################################################
# write output
logging.info( "Write output file: {0}".format( arguments.output ) )
OUT = open( arguments.output, 'w' ) or die( "Cannot open {0} for output!\n\n".format( arguments.output ) )
for probe in all_probes:
# write output
OUT.write( "{0}\t{1}\n".format( probe, all_probes[ probe ] ) )
OUT.close()
logging.info( "Program finished running." )
if __name__ == '__main__':
main()
| mit |
DirectXMan12/nova-hacking | nova/cmd/spicehtml5proxy.py | 4 | 2893 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Websocket proxy that is compatible with OpenStack Nova
SPICE HTML5 consoles. Leverages websockify.py by Joel Martin
"""
import os
import sys
from oslo.config import cfg
from nova import config
from nova.console import websocketproxy
opts = [
cfg.StrOpt('spicehtml5proxy_host',
default='0.0.0.0',
help='Host on which to listen for incoming requests'),
cfg.IntOpt('spicehtml5proxy_port',
default=6082,
help='Port on which to listen for incoming requests'),
]
CONF = cfg.CONF
CONF.register_cli_opts(opts)
CONF.import_opt('record', 'nova.cmd.novnc')
CONF.import_opt('daemon', 'nova.cmd.novnc')
CONF.import_opt('ssl_only', 'nova.cmd.novnc')
CONF.import_opt('source_is_ipv6', 'nova.cmd.novnc')
CONF.import_opt('cert', 'nova.cmd.novnc')
CONF.import_opt('key', 'nova.cmd.novnc')
CONF.import_opt('web', 'nova.cmd.novnc')
def main():
# Setup flags
config.parse_args(sys.argv)
if CONF.ssl_only and not os.path.exists(CONF.cert):
print "SSL only and %s not found." % CONF.cert
return(-1)
# Check to see if spice html/js/css files are present
if not os.path.exists(CONF.web):
print "Can not find spice html/js/css files at %s." % CONF.web
return(-1)
# Create and start the NovaWebSockets proxy
server = websocketproxy.NovaWebSocketProxy(
listen_host=CONF.spicehtml5proxy_host,
listen_port=CONF.spicehtml5proxy_port,
source_is_ipv6=CONF.source_is_ipv6,
verbose=CONF.verbose,
cert=CONF.cert,
key=CONF.key,
ssl_only=CONF.ssl_only,
daemon=CONF.daemon,
record=CONF.record,
web=CONF.web,
target_host='ignore',
target_port='ignore',
wrap_mode='exit',
wrap_cmd=None)
server.start_server()
| apache-2.0 |
linktlh/Toontown-journey | toontown/nametag/NametagGlobals.py | 2 | 9188 | from pandac.PandaModules import VBase4
CCNormal = 0
CCNoChat = 1
CCNonPlayer = 2
CCSuit = 3
CCToonBuilding = 4
CCSuitBuilding = 5
CCHouseBuilding = 6
CCSpeedChat = 7
CCFreeChat = 8
CHAT = 0
SPEEDCHAT = 1
CHAT_BALLOON = 0
THOUGHT_BALLOON = 1
cardModel = None
arrowModel = None
chatBalloon3dModel = None
chatBalloon3dWidth = 0
chatBalloon3dHeight = 0
chatBalloon2dModel = None
chatBalloon2dWidth = 0
chatBalloon2dHeight = 0
thoughtBalloonModel = None
thoughtBalloonWidth = 0
thoughtBalloonHeight = 0
noButton = (None, None, None, None)
pageButton = (None, None, None, None)
quitButton = (None, None, None, None)
quitButtonWidth = 0
quitButtonHeight = 0
rolloverSound = None
clickSound = None
me = None
want2dNametags = True
forceOnscreenChat = False
force2dNametags = False
wantActiveNametags = True
def setCardModel(model):
global cardModel
cardModel = loader.loadModel(model)
def setArrowModel(model):
global arrowModel
arrowModel = loader.loadModel(model)
def setChatBalloon3dModel(model):
global chatBalloon3dModel
global chatBalloon3dWidth
global chatBalloon3dHeight
chatBalloon3dModel = loader.loadModel(model)
chatBalloon3dWidth, chatBalloon3dHeight = getModelWidthHeight(chatBalloon3dModel)
def setChatBalloon2dModel(model):
global chatBalloon2dModel
global chatBalloon2dWidth
global chatBalloon2dHeight
chatBalloon2dModel = loader.loadModel(model)
chatBalloon2dWidth, chatBalloon2dHeight = getModelWidthHeight(chatBalloon2dModel)
def setThoughtBalloonModel(model):
global thoughtBalloonModel
global thoughtBalloonWidth
global thoughtBalloonHeight
thoughtBalloonModel = loader.loadModel(model)
thoughtBalloonWidth, thoughtBalloonHeight = getModelWidthHeight(thoughtBalloonModel)
def setPageButton(normal, down, rollover, disabled):
global pageButton
pageButton = (normal, down, rollover, disabled)
def setQuitButton(normal, down, rollover, disabled):
global quitButton
global quitButtonWidth
global quitButtonHeight
quitButton = (normal, down, rollover, disabled)
quitButtonWidth, quitButtonHeight = getModelWidthHeight(normal)
def setRolloverSound(sound):
global rolloverSound
rolloverSound = sound
def setClickSound(sound):
global clickSound
clickSound = sound
def setMe(nodePath):
global me
me = nodePath
def setWant2dNametags(value):
global want2dNametags
want2dNametags = value
def setForceOnscreenChat(value):
global forceOnscreenChat
forceOnscreenChat = value
def setForce2dNametags(value):
global force2dNametags
force2dNametags = value
def setWantActiveNametags(value):
global wantActiveNametags
wantActiveNametags = value
def getModelWidthHeight(model):
tightBounds = model.getTightBounds()
if tightBounds is None:
return (0, 0)
minPoint, maxPoint = tightBounds
width = maxPoint.getX() - minPoint.getX()
height = maxPoint.getZ() - minPoint.getZ()
return (width, height)
# Foreground, background:
NametagColors = {
CCNormal: (
(VBase4(0.3, 0.3, 0.7, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(0.3, 0.3, 0.7, 1.0), VBase4(0.2, 0.2, 0.2, 0.1875)), # Down
(VBase4(0.5, 0.5, 1.0, 1.0), VBase4(1.0, 1.0, 1.0, 0.5625)), # Rollover
(VBase4(0.3, 0.3, 0.7, 1.0), VBase4(1.0, 1.0, 1.0, 0.375)) # Disabled
),
CCNoChat: (
(VBase4(0.8, 0.4, 0.0, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(1.0, 0.5, 0.5, 1.0), VBase4(0.2, 0.2, 0.2, 0.1875)), # Click
(VBase4(1.0, 0.5, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 0.5625)), # Rollover
(VBase4(0.8, 0.4, 0.0, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)) # Disabled
),
CCNonPlayer: (
(VBase4(0.8, 0.4, 0.0, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(0.8, 0.4, 0.0, 1.0), VBase4(0.8, 0.8, 0.8, 0.1875)), # Down
(VBase4(0.8, 0.4, 0.0, 1.0), VBase4(0.8, 0.8, 0.8, 0.5625)), # Rollover
(VBase4(0.8, 0.4, 0.0, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)) # Disabled
),
CCSuit: (
(VBase4(0.2, 0.2, 0.2, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(0.2, 0.2, 0.2, 1.0), VBase4(0.2, 0.2, 0.2, 0.1875)), # Down
(VBase4(0.4, 0.4, 0.4, 1.0), VBase4(1.0, 1.0, 1.0, 0.5625)), # Rollover
(VBase4(0.2, 0.2, 0.2, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)) # Disabled
),
CCSuitBuilding: (
(VBase4(0.5, 0.5, 0.5, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(0.5, 0.5, 0.5, 1.0), VBase4(0.8, 0.8, 0.8, 0.1875)), # Down
(VBase4(0.5, 0.5, 0.5, 1.0), VBase4(0.8, 0.8, 0.8, 0.5625)), # Rollover
(VBase4(0.5, 0.5, 0.5, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)) # Disabled
),
CCToonBuilding: (
(VBase4(0.2, 0.6, 0.9, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(0.2, 0.6, 0.9, 1.0), VBase4(0.8, 0.8, 0.8, 0.1875)), # Down
(VBase4(0.2, 0.6, 0.9, 1.0), VBase4(0.8, 0.8, 0.8, 0.5625)), # Rollover
(VBase4(0.2, 0.6, 0.9, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)) # Disabled
),
CCHouseBuilding: (
(VBase4(0.2, 0.6, 0.9, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(0.2, 0.2, 0.5, 1.0), VBase4(0.2, 0.2, 0.2, 0.1875)), # Down
(VBase4(0.5, 0.5, 1.0, 1.0), VBase4(1.0, 1.0, 1.0, 0.5625)), # Rollover
(VBase4(0.0, 0.6, 0.2, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)) # Disabled
),
CCSpeedChat: (
(VBase4(0.0, 0.6, 0.2, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(0.0, 0.5, 0.0, 1.0), VBase4(0.5, 0.5, 0.5, 0.1875)), # Down
(VBase4(0.0, 0.7, 0.2, 1.0), VBase4(1.0, 1.0, 1.0, 0.5625)), # Rollover
(VBase4(0.0, 0.6, 0.2, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)) # Disabled
),
CCFreeChat: (
(VBase4(0.3, 0.3, 0.7, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)), # Normal
(VBase4(0.2, 0.2, 0.5, 1.0), VBase4(0.2, 0.2, 0.2, 0.1875)), # Down
(VBase4(0.5, 0.5, 1.0, 1.0), VBase4(1.0, 1.0, 1.0, 0.5625)), # Rollover
(VBase4(0.3, 0.3, 0.7, 1.0), VBase4(0.8, 0.8, 0.8, 0.375)) # Disabled
)
}
# Foreground, background:
ChatColors = {
CCNormal: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(1.0, 0.5, 0.5, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Down
(VBase4(0.0, 0.6, 0.6, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
),
CCNoChat: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(1.0, 0.5, 0.5, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Click
(VBase4(0.0, 0.6, 0.6, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
),
CCNonPlayer: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(1.0, 0.5, 0.5, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Click
(VBase4(0.0, 0.6, 0.6, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
),
CCSuit: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(1.0, 0.5, 0.5, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Down
(VBase4(0.0, 0.6, 0.6, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
),
CCSuitBuilding: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Down
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
),
CCToonBuilding: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Down
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
),
CCHouseBuilding: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(1.0, 0.5, 0.5, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Down
(VBase4(0.0, 0.6, 0.6, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
),
CCSpeedChat: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(1.0, 0.5, 0.5, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Down
(VBase4(0.0, 0.6, 0.6, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
),
CCFreeChat: (
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Normal
(VBase4(1.0, 0.5, 0.5, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Down
(VBase4(0.0, 0.6, 0.6, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)), # Rollover
(VBase4(0.0, 0.0, 0.0, 1.0), VBase4(1.0, 1.0, 1.0, 1.0)) # Disabled
)
}
| apache-2.0 |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/models/bart/model.py | 1 | 13648 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
import logging
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
logger = logging.getLogger(__name__)
@register_model('bart')
class BARTModel(TransformerModel):
@classmethod
def hub_models(cls):
return {
'bart.base': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz',
'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz',
'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz',
'bart.large.cnn': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz',
'bart.large.xsum': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz',
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
'--pooler-dropout', type=float, metavar='D',
help='dropout probability in the masked_lm pooler layers'
)
parser.add_argument(
'--pooler-activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use for pooler layer'
)
@property
def supported_targets(self):
return {'self'}
def forward(
self, src_tokens, src_lengths, prev_output_tokens,
features_only=False, classification_head_name=None, **kwargs
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
**kwargs,
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
**kwargs,
)
if classification_head_name is not None:
sentence_representation = x[
src_tokens.eq(self.encoder.dictionary.eos()), :
].view(x.size(0), -1, x.size(-1))[:, -1, :]
x = self.classification_heads[classification_head_name](
sentence_representation
)
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
bpe='gpt2',
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return BARTHubInterface(x['args'], x['task'], x['models'][0])
def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):
"""Register a classification head."""
logger.info("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
'and inner_dim {} (prev: {})'.format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + '.' if name != '' else ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0)
inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0)
if getattr(self.args, 'load_checkpoint_heads', False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
'deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes != self.classification_heads[head_name].out_proj.out_features
or inner_dim != self.classification_heads[head_name].dense.out_features
):
logger.warning(
'deleting classification head ({}) from checkpoint '
'with different dimensions than current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
def truncate_emb(key):
if key in state_dict:
state_dict[key] = state_dict[key][:-1, :]
# When finetuning on translation task, remove last row of
# embedding matrix that corresponds to mask_idx token.
loaded_dict_size = state_dict['encoder.embed_tokens.weight'].size(0)
if loaded_dict_size == len(self.encoder.dictionary) + 1 and '<mask>' not in self.encoder.dictionary:
truncate_emb('encoder.embed_tokens.weight')
truncate_emb('decoder.embed_tokens.weight')
truncate_emb('encoder.output_projection.weight')
truncate_emb('decoder.output_projection.weight')
# When continued pretraining on new set of languages for mbart,
# add extra lang embeddings at the end of embed_tokens.
# Note: newly added languages are assumed to have been added at the end.
if self.args.task == 'multilingual_denoising' and loaded_dict_size < len(self.encoder.dictionary):
logger.info(
"Adding extra language embeddings not found in pretrained model for "\
"continued pretraining of MBART on new set of languages."
)
loaded_mask_token_embedding = state_dict['encoder.embed_tokens.weight'][-1, :]
num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size
embed_dim = state_dict['encoder.embed_tokens.weight'].size(1)
new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim)
nn.init.normal_(
new_lang_embed_to_add,
mean=0,
std=embed_dim ** -0.5
)
new_lang_embed_to_add = new_lang_embed_to_add.to(
dtype=state_dict['encoder.embed_tokens.weight'].dtype,
)
state_dict['encoder.embed_tokens.weight'] = torch.cat([
state_dict['encoder.embed_tokens.weight'][:loaded_dict_size-1, :],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0)]
)
state_dict['decoder.embed_tokens.weight'] = torch.cat([
state_dict['decoder.embed_tokens.weight'][:loaded_dict_size-1, :],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0)]
)
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
logger.info('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture('bart', 'bart_large')
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.relu_dropout = getattr(args, 'relu_dropout', 0.)
args.dropout = getattr(args, 'dropout', 0.1)
args.max_target_positions = getattr(args, 'max_target_positions', 1024)
args.max_source_positions = getattr(args, 'max_source_positions', 1024)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', True)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', True)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
@register_model_architecture('bart', 'bart_base')
def bart_base_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*768)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
bart_large_architecture(args)
@register_model_architecture('bart', 'mbart_large')
def mbart_large_architecture(args):
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
bart_large_architecture(args)
@register_model_architecture('bart', 'mbart_base')
def mbart_base_architecture(args):
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
bart_base_architecture(args)
@register_model_architecture('bart', 'mbart_base_wmt20')
def mbart_base_wmt20_architecture(args):
args.layernorm_embedding = getattr(args, 'layernorm_embedding', False)
mbart_base_architecture(args)
| bsd-3-clause |
aaronfang/personal_scripts | af_scripts/tmp/twigGen.py | 2 | 1158 | # generate twigs based on selected curves
# automaticly layout uvs
# rebuild curve to the certain length of one segment
# set root/tip size
# chridren twigs root/tip scale down relatively
# UI
# | [get curves] [rebuild] <unit> |
# | -------------------------------------- |
# | root size:<2.5> tip size:<0.5> |
# | relative size: <0.5>-------||--------- |
# | *random scale <0.1> |
# | *nurbs *polygon |
# | [apply] |
import maya.cmds as cmds
#vars
unit = 2
root_d = 1.5
tip_d = 0.5
# get curves
all_curves = cmds.ls(sl=True,fl=True,type='transform')
# rebuild curves based on length of one segment
for curve in all_curves:
# get curve length
length = cmds.arclen(curve)
segment = int(length/unit)
cmds.rebuildCurve(curve,s=segment,d=3,ch=0)
# create root/tip circles based on parameters
root_circle = cmds.circle(n='root_circle',c=(0,0,0),nr=(0,1,0),r=(root_d/2),d=3,ch=1,s=6)
# extrude -ch true -rn false -po 0 -et 2 -ucp 0 -fpt 1 -upn 1 -rotation 0 -scale 1 -rsp 1 "root_circle" "curve1" ;
# setAttr "extrude1.scale" 0.1;
| mit |
odoousers2014/odoo | addons/mass_mailing/wizard/test_mailing.py | 10 | 1886 | # -*- coding: utf-8 -*-
from openerp import tools
from openerp.osv import osv, fields
class TestMassMailing(osv.TransientModel):
_name = 'mail.mass_mailing.test'
_description = 'Sample Mail Wizard'
_columns = {
'email_to': fields.char('Recipients', required=True,
help='Comma-separated list of email addresses.'),
'mass_mailing_id': fields.many2one('mail.mass_mailing', 'Mailing', required=True),
}
_defaults = {
'email_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
}
def send_mail_test(self, cr, uid, ids, context=None):
Mail = self.pool['mail.mail']
for wizard in self.browse(cr, uid, ids, context=context):
mailing = wizard.mass_mailing_id
test_emails = tools.email_split(wizard.email_to)
mail_ids = []
for test_mail in test_emails:
mail_values = {
'email_from': mailing.email_from,
'reply_to': mailing.reply_to,
'email_to': test_mail,
'subject': mailing.name,
'body_html': '',
'notification': True,
'mailing_id': mailing.id,
}
mail_mail_obj = Mail.browse(cr, uid, Mail.create(cr, uid, mail_values, context=context), context=context)
unsubscribe_url = Mail._get_unsubscribe_url(cr, uid, mail_mail_obj, test_mail, context=context)
body = tools.append_content_to_html(mailing.body_html, unsubscribe_url, plaintext=False, container_tag='p')
Mail.write(cr, uid, mail_mail_obj.id, {'body_html': mailing.body_html}, context=context)
mail_ids.append(mail_mail_obj.id)
Mail.send(cr, uid, mail_ids, context=context)
return True
| agpl-3.0 |
Juniper/contrail-dev-neutron | neutron/plugins/ml2/drivers/mechanism_odl.py | 4 | 15181 | # Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
# @author: Dave Tucker, Hewlett-Packard Development Company L.P.
import time
from oslo.config import cfg
import requests
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.extensions import portbindings
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
ODL_NETWORK = 'network'
ODL_NETWORKS = 'networks'
ODL_SUBNET = 'subnet'
ODL_SUBNETS = 'subnets'
ODL_PORT = 'port'
ODL_PORTS = 'ports'
not_found_exception_map = {ODL_NETWORKS: n_exc.NetworkNotFound,
ODL_SUBNETS: n_exc.SubnetNotFound,
ODL_PORTS: n_exc.PortNotFound}
odl_opts = [
cfg.StrOpt('url',
help=_("HTTP URL of OpenDaylight REST interface.")),
cfg.StrOpt('username',
help=_("HTTP username for authentication")),
cfg.StrOpt('password', secret=True,
help=_("HTTP password for authentication")),
cfg.IntOpt('timeout', default=10,
help=_("HTTP timeout in seconds.")),
cfg.IntOpt('session_timeout', default=30,
help=_("Tomcat session timeout in minutes.")),
]
cfg.CONF.register_opts(odl_opts, "ml2_odl")
def try_del(d, keys):
"""Ignore key errors when deleting from a dictionary."""
for key in keys:
try:
del d[key]
except KeyError:
pass
class JsessionId(requests.auth.AuthBase):
"""Attaches the JSESSIONID and JSESSIONIDSSO cookies to an HTTP Request.
If the cookies are not available or when the session expires, a new
set of cookies are obtained.
"""
def __init__(self, url, username, password):
"""Initialization function for JsessionId."""
# NOTE(kmestery) The 'limit' paramater is intended to limit how much
# data is returned from ODL. This is not implemented in the Hydrogen
# release of OpenDaylight, but will be implemented in the Helium
# timeframe. Hydrogen will silently ignore this value.
self.url = str(url) + '/' + ODL_NETWORKS + '?limit=1'
self.username = username
self.password = password
self.auth_cookies = None
self.last_request = None
self.expired = None
self.session_timeout = cfg.CONF.ml2_odl.session_timeout * 60
self.session_deadline = 0
def obtain_auth_cookies(self):
"""Make a REST call to obtain cookies for ODL authenticiation."""
r = requests.get(self.url, auth=(self.username, self.password))
r.raise_for_status()
jsessionid = r.cookies.get('JSESSIONID')
jsessionidsso = r.cookies.get('JSESSIONIDSSO')
if jsessionid and jsessionidsso:
self.auth_cookies = dict(JSESSIONID=jsessionid,
JSESSIONIDSSO=jsessionidsso)
def __call__(self, r):
"""Verify timestamp for Tomcat session timeout."""
if time.time() > self.session_deadline:
self.obtain_auth_cookies()
self.session_deadline = time.time() + self.session_timeout
r.prepare_cookies(self.auth_cookies)
return r
class OpenDaylightMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for OpenDaylight.
This driver was a port from the Tail-F NCS MechanismDriver. The API
exposed by ODL is slightly different from the API exposed by NCS,
but the general concepts are the same.
"""
auth = None
out_of_sync = True
def initialize(self):
self.url = cfg.CONF.ml2_odl.url
self.timeout = cfg.CONF.ml2_odl.timeout
self.username = cfg.CONF.ml2_odl.username
self.password = cfg.CONF.ml2_odl.password
self.auth = JsessionId(self.url, self.username, self.password)
self.vif_type = portbindings.VIF_TYPE_OVS
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
# Postcommit hooks are used to trigger synchronization.
def create_network_postcommit(self, context):
self.synchronize('create', ODL_NETWORKS, context)
def update_network_postcommit(self, context):
self.synchronize('update', ODL_NETWORKS, context)
def delete_network_postcommit(self, context):
self.synchronize('delete', ODL_NETWORKS, context)
def create_subnet_postcommit(self, context):
self.synchronize('create', ODL_SUBNETS, context)
def update_subnet_postcommit(self, context):
self.synchronize('update', ODL_SUBNETS, context)
def delete_subnet_postcommit(self, context):
self.synchronize('delete', ODL_SUBNETS, context)
def create_port_postcommit(self, context):
self.synchronize('create', ODL_PORTS, context)
def update_port_postcommit(self, context):
self.synchronize('update', ODL_PORTS, context)
def delete_port_postcommit(self, context):
self.synchronize('delete', ODL_PORTS, context)
def synchronize(self, operation, object_type, context):
"""Synchronize ODL with Neutron following a configuration change."""
if self.out_of_sync:
self.sync_full(context)
else:
self.sync_object(operation, object_type, context)
def filter_create_network_attributes(self, network, context, dbcontext):
"""Filter out network attributes not required for a create."""
try_del(network, ['status', 'subnets'])
def filter_create_subnet_attributes(self, subnet, context, dbcontext):
"""Filter out subnet attributes not required for a create."""
pass
def filter_create_port_attributes(self, port, context, dbcontext):
"""Filter out port attributes not required for a create."""
self.add_security_groups(context, dbcontext, port)
# TODO(kmestery): Converting to uppercase due to ODL bug
# https://bugs.opendaylight.org/show_bug.cgi?id=477
port['mac_address'] = port['mac_address'].upper()
try_del(port, ['status'])
def sync_resources(self, resource_name, collection_name, resources,
context, dbcontext, attr_filter):
"""Sync objects from Neutron over to OpenDaylight.
This will handle syncing networks, subnets, and ports from Neutron to
OpenDaylight. It also filters out the requisite items which are not
valid for create API operations.
"""
to_be_synced = []
for resource in resources:
try:
urlpath = collection_name + '/' + resource['id']
self.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
attr_filter(resource, context, dbcontext)
to_be_synced.append(resource)
key = resource_name if len(to_be_synced) == 1 else collection_name
# 400 errors are returned if an object exists, which we ignore.
self.sendjson('post', collection_name, {key: to_be_synced}, [400])
@utils.synchronized('odl-sync-full')
def sync_full(self, context):
"""Resync the entire database to ODL.
Transition to the in-sync state on success.
Note: we only allow a single thead in here at a time.
"""
if not self.out_of_sync:
return
dbcontext = context._plugin_context
networks = context._plugin.get_networks(dbcontext)
subnets = context._plugin.get_subnets(dbcontext)
ports = context._plugin.get_ports(dbcontext)
self.sync_resources(ODL_NETWORK, ODL_NETWORKS, networks,
context, dbcontext,
self.filter_create_network_attributes)
self.sync_resources(ODL_SUBNET, ODL_SUBNETS, subnets,
context, dbcontext,
self.filter_create_subnet_attributes)
self.sync_resources(ODL_PORT, ODL_PORTS, ports,
context, dbcontext,
self.filter_create_port_attributes)
self.out_of_sync = False
def filter_update_network_attributes(self, network, context, dbcontext):
"""Filter out network attributes for an update operation."""
try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
def filter_update_subnet_attributes(self, subnet, context, dbcontext):
"""Filter out subnet attributes for an update operation."""
try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
'allocation_pools', 'tenant_id'])
def filter_update_port_attributes(self, port, context, dbcontext):
"""Filter out port attributes for an update operation."""
self.add_security_groups(context, dbcontext, port)
try_del(port, ['network_id', 'id', 'status', 'mac_address',
'tenant_id', 'fixed_ips'])
create_object_map = {ODL_NETWORKS: filter_create_network_attributes,
ODL_SUBNETS: filter_create_subnet_attributes,
ODL_PORTS: filter_create_port_attributes}
update_object_map = {ODL_NETWORKS: filter_update_network_attributes,
ODL_SUBNETS: filter_update_subnet_attributes,
ODL_PORTS: filter_update_port_attributes}
def sync_single_resource(self, operation, object_type, obj_id,
context, attr_filter_create, attr_filter_update):
"""Sync over a single resource from Neutron to OpenDaylight.
Handle syncing a single operation over to OpenDaylight, and correctly
filter attributes out which are not required for the requisite
operation (create or update) being handled.
"""
dbcontext = context._plugin_context
if operation == 'create':
urlpath = object_type
method = 'post'
else:
urlpath = object_type + '/' + obj_id
method = 'put'
try:
obj_getter = getattr(context._plugin, 'get_%s' % object_type[:-1])
resource = obj_getter(dbcontext, obj_id)
except not_found_exception_map[object_type]:
LOG.debug(_('%(object_type)s not found (%(obj_id)s)'),
{'object_type': object_type.capitalize(),
'obj_id': obj_id})
else:
if operation == 'create':
attr_filter_create(self, resource, context, dbcontext)
elif operation == 'update':
attr_filter_update(self, resource, context, dbcontext)
try:
# 400 errors are returned if an object exists, which we ignore.
self.sendjson(method, urlpath, {object_type[:-1]: resource},
[400])
except Exception:
with excutils.save_and_reraise_exception():
self.out_of_sync = True
def sync_object(self, operation, object_type, context):
"""Synchronize the single modified record to ODL."""
obj_id = context.current['id']
self.sync_single_resource(operation, object_type, obj_id, context,
self.create_object_map[object_type],
self.update_object_map[object_type])
def add_security_groups(self, context, dbcontext, port):
"""Populate the 'security_groups' field with entire records."""
groups = [context._plugin.get_security_group(dbcontext, sg)
for sg in port['security_groups']]
port['security_groups'] = groups
def sendjson(self, method, urlpath, obj, ignorecodes=[]):
"""Send json to the OpenDaylight controller."""
headers = {'Content-Type': 'application/json'}
data = jsonutils.dumps(obj, indent=2) if obj else None
if self.url:
url = '/'.join([self.url, urlpath])
LOG.debug(_('ODL-----> sending URL (%s) <-----ODL') % url)
LOG.debug(_('ODL-----> sending JSON (%s) <-----ODL') % obj)
r = requests.request(method, url=url,
headers=headers, data=data,
auth=self.auth, timeout=self.timeout)
# ignorecodes contains a list of HTTP error codes to ignore.
if r.status_code in ignorecodes:
return
r.raise_for_status()
def bind_port(self, context):
LOG.debug(_("Attempting to bind port %(port)s on "
"network %(network)s"),
{'port': context.current['id'],
'network': context.network.current['id']})
for segment in context.network.network_segments:
if self.check_segment(segment):
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details)
LOG.debug(_("Bound using segment: %s"), segment)
return
else:
LOG.debug(_("Refusing to bind port for segment ID %(id)s, "
"segment %(seg)s, phys net %(physnet)s, and "
"network type %(nettype)s"),
{'id': segment[api.ID],
'seg': segment[api.SEGMENTATION_ID],
'physnet': segment[api.PHYSICAL_NETWORK],
'nettype': segment[api.NETWORK_TYPE]})
def validate_port_binding(self, context):
if self.check_segment(context.bound_segment):
LOG.debug(_('Binding valid.'))
return True
LOG.warning(_("Binding invalid for port: %s"), context.current)
def unbind_port(self, context):
LOG.debug(_("Unbinding port %(port)s on "
"network %(network)s"),
{'port': context.current['id'],
'network': context.network.current['id']})
def check_segment(self, segment):
"""Verify a segment is valid for the OpenDaylight MechanismDriver.
Verify the requested segment is supported by ODL and return True or
False to indicate this to callers.
"""
network_type = segment[api.NETWORK_TYPE]
return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
constants.TYPE_VXLAN]
| apache-2.0 |
WilsonWangTHU/fast-rcnn | lib/fast_rcnn/test.py | 43 | 11975 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['rois'].reshape(*(blobs['rois'].shape))
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs['rois'].astype(np.float32, copy=False))
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# heuristic: keep an average of 40 detections per class per images prior
# to NMS
max_per_set = 40 * num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection thresold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(imdb.num_classes)
# top_scores will hold one minheap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(imdb.num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(net, im, roidb[i]['boxes'])
_t['im_detect'].toc()
_t['misc'].tic()
for j in xrange(1, imdb.num_classes):
inds = np.where((scores[:, j] > thresh[j]) &
(roidb[i]['gt_classes'] == 0))[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
# push new scores onto the minheap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the minheap and update the class threshold
if len(top_scores[j]) > max_per_set:
while len(top_scores[j]) > max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
all_boxes[j][i] = \
np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
if 0:
keep = nms(all_boxes[j][i], 0.3)
vis_detections(im, imdb.classes[j], all_boxes[j][i][keep, :])
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
for j in xrange(1, imdb.num_classes):
for i in xrange(num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Applying NMS to all detections'
nms_dets = apply_nms(all_boxes, cfg.TEST.NMS)
print 'Evaluating detections'
imdb.evaluate_detections(nms_dets, output_dir)
| mit |
myusernamejeep/AutobahnPython | examples/wamp/serial2ws/serial2ws.py | 26 | 4486 | ###############################################################################
##
## Copyright 2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, time
if sys.platform == 'win32':
## on windows, we need to use the following reactor for serial support
## http://twistedmatrix.com/trac/ticket/3802
##
from twisted.internet import win32eventreactor
win32eventreactor.install()
from twisted.internet import reactor
print "Using Twisted reactor", reactor.__class__
print
from twisted.python import usage, log
from twisted.protocols.basic import LineReceiver
from twisted.internet.serialport import SerialPort
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import WampServerFactory, WampServerProtocol, exportRpc
class Serial2WsOptions(usage.Options):
optParameters = [
['baudrate', 'b', 9600, 'Serial baudrate'],
['port', 'p', 3, 'Serial port to use'],
['webport', 'w', 8080, 'Web port to use for embedded Web server'],
['wsurl', 's', "ws://localhost:9000", 'WebSocket port to use for embedded WebSocket server']
]
## MCU protocol
##
class McuProtocol(LineReceiver):
## need a reference to our WS-MCU gateway factory to dispatch PubSub events
##
def __init__(self, wsMcuFactory):
self.wsMcuFactory = wsMcuFactory
## this method is exported as RPC and can be called by connected clients
##
@exportRpc("control-led")
def controlLed(self, status):
if status:
print "turn on LED"
self.transport.write('1')
else:
print "turn off LED"
self.transport.write('0')
def connectionMade(self):
log.msg('Serial port connected.')
def lineReceived(self, line):
try:
## parse data received from MCU
##
data = [int(x) for x in line.split()]
## construct PubSub event from raw data
##
evt = {'id': data[0], 'value': data[1]}
## publish event to all clients subscribed to topic
##
self.wsMcuFactory.dispatch("http://example.com/mcu#analog-value", evt)
log.msg("Analog value: %s" % str(evt));
except ValueError:
log.err('Unable to parse value %s' % line)
## WS-MCU protocol
##
class WsMcuProtocol(WampServerProtocol):
def onSessionOpen(self):
## register topic prefix under which we will publish MCU measurements
##
self.registerForPubSub("http://example.com/mcu#", True)
## register methods for RPC
##
self.registerForRpc(self.factory.mcuProtocol, "http://example.com/mcu-control#")
## WS-MCU factory
##
class WsMcuFactory(WampServerFactory):
protocol = WsMcuProtocol
def __init__(self, url):
WampServerFactory.__init__(self, url)
self.mcuProtocol = McuProtocol(self)
if __name__ == '__main__':
## parse options
##
o = Serial2WsOptions()
try:
o.parseOptions()
except usage.UsageError, errortext:
print '%s %s' % (sys.argv[0], errortext)
print 'Try %s --help for usage details' % sys.argv[0]
sys.exit(1)
baudrate = int(o.opts['baudrate'])
port = int(o.opts['port'])
webport = int(o.opts['webport'])
wsurl = o.opts['wsurl']
## start Twisted log system
##
log.startLogging(sys.stdout)
## create Serial2Ws gateway factory
##
wsMcuFactory = WsMcuFactory(wsurl)
listenWS(wsMcuFactory)
## create serial port and serial port protocol
##
log.msg('About to open serial port %d [%d baud] ..' % (port, baudrate))
serialPort = SerialPort(wsMcuFactory.mcuProtocol, port, reactor, baudrate = baudrate)
## create embedded web server for static files
##
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(webport, web)
## start Twisted reactor ..
##
reactor.run()
| apache-2.0 |
alobbs/autome | plugin.py | 1 | 1419 | import imp
import logging
import os
_shared_objs = {}
def _get_one(name):
global _shared_objs
# Build path
here = os.path.dirname(os.path.abspath(__file__))
plugins_dir = os.path.join(here, "plugins")
filepath = os.path.join(plugins_dir, '%s.py' % name)
# Load plug-in
with open(filepath, 'r') as f:
p = imp.load_module(name, f, filepath, ('py', 'r', imp.PY_SOURCE))
# Cached obj?
if name in _shared_objs:
return _shared_objs[name]
obj = None
for n in dir(p):
if n.lower() == name.lower():
obj = getattr(p, n)()
assert obj, "Class not found"
# Cache obj?
is_shared = getattr(obj, "SHARED_OBJ", False)
if is_shared:
_shared_objs[name] = obj
return obj
def get(*args):
if len(args) == 1:
return _get_one(*args)
else:
return tuple([_get_one(s) for s in args])
def logger(script_name):
# Logging file
log_fname = "{}.log".format(script_name)
dirfp = os.path.expanduser("~/.autome/logs")
log_fp = os.path.join(dirfp, log_fname)
# Logger
fstring = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fstring)
handler = logging.FileHandler(log_fp)
handler.setFormatter(formatter)
logger = logging.getLogger(script_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
| mit |
jelly/calibre | src/calibre/ebooks/rtf/rtfml.py | 2 | 11681 | # -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Transform OEB content into RTF markup
'''
import os
import re
import cStringIO
from lxml import etree
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.img import save_cover_data_to
from calibre.utils.imghdr import identify
TAGS = {
'b': '\\b',
'del': '\\deleted',
'h1': '\\s1 \\afs32',
'h2': '\\s2 \\afs28',
'h3': '\\s3 \\afs28',
'h4': '\\s4 \\afs23',
'h5': '\\s5 \\afs23',
'h6': '\\s6 \\afs21',
'i': '\\i',
'li': '\t',
'p': '\t',
'sub': '\\sub',
'sup': '\\super',
'u': '\\ul',
}
SINGLE_TAGS = {
'br': '\n{\\line }\n',
}
STYLES = [
('font-weight', {'bold': '\\b', 'bolder': '\\b'}),
('font-style', {'italic': '\\i'}),
('text-align', {'center': '\\qc', 'left': '\\ql', 'right': '\\qr'}),
('text-decoration', {'line-through': '\\strike', 'underline': '\\ul'}),
]
BLOCK_TAGS = [
'div',
'p',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'li',
]
BLOCK_STYLES = [
'block'
]
'''
TODO:
* Tables
* Fonts
'''
def txt2rtf(text):
# Escape { and } in the text.
text = text.replace('{', r'\'7b')
text = text.replace('}', r'\'7d')
text = text.replace('\\', r'\'5c')
if not isinstance(text, unicode):
return text
buf = cStringIO.StringIO()
for x in text:
val = ord(x)
if val == 160:
buf.write('\\~')
elif val <= 127:
buf.write(x)
else:
c = r'\u{0:d}?'.format(val)
buf.write(c)
return buf.getvalue()
class RTFMLizer(object):
def __init__(self, log):
self.log = log
def extract_content(self, oeb_book, opts):
self.log.info('Converting XHTML to RTF markup...')
self.oeb_book = oeb_book
self.opts = opts
return self.mlize_spine()
def mlize_spine(self):
from calibre.ebooks.oeb.base import XHTML
from calibre.ebooks.oeb.stylizer import Stylizer
output = self.header()
if 'titlepage' in self.oeb_book.guide:
href = self.oeb_book.guide['titlepage'].href
item = self.oeb_book.manifest.hrefs[href]
if item.spine_position is None:
stylizer = Stylizer(item.data, item.href, self.oeb_book,
self.opts, self.opts.output_profile)
self.currently_dumping_item = item
output += self.dump_text(item.data.find(XHTML('body')), stylizer)
output += '{\\page }'
for item in self.oeb_book.spine:
self.log.debug('Converting %s to RTF markup...' % item.href)
# Removing comments is needed as comments with -- inside them can
# cause fromstring() to fail
content = re.sub(ur'<!--.*?-->', u'', etree.tostring(item.data, encoding=unicode), flags=re.DOTALL)
content = self.remove_newlines(content)
content = self.remove_tabs(content)
content = etree.fromstring(content)
stylizer = Stylizer(content, item.href, self.oeb_book, self.opts, self.opts.output_profile)
self.currently_dumping_item = item
output += self.dump_text(content.find(XHTML('body')), stylizer)
output += '{\\page }'
output += self.footer()
output = self.insert_images(output)
output = self.clean_text(output)
return output
def remove_newlines(self, text):
self.log.debug('\tRemove newlines for processing...')
text = text.replace('\r\n', ' ')
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
return text
def remove_tabs(self, text):
self.log.debug('\Replace tabs with space for processing...')
text = text.replace('\t', ' ')
return text
def header(self):
header = u'{\\rtf1{\\info{\\title %s}{\\author %s}}\\ansi\\ansicpg1252\\deff0\\deflang1033\n' % (
self.oeb_book.metadata.title[0].value, authors_to_string([x.value for x in self.oeb_book.metadata.creator]))
return header + (
'{\\fonttbl{\\f0\\froman\\fprq2\\fcharset128 Times New Roman;}{\\f1\\froman\\fprq2\\fcharset128 Times New Roman;}{\\f2\\fswiss\\fprq2\\fcharset128 Arial;}{\\f3\\fnil\\fprq2\\fcharset128 Arial;}{\\f4\\fnil\\fprq2\\fcharset128 MS Mincho;}{\\f5\\fnil\\fprq2\\fcharset128 Tahoma;}{\\f6\\fnil\\fprq0\\fcharset128 Tahoma;}}\n' # noqa
'{\\stylesheet{\\ql \\li0\\ri0\\nowidctlpar\\wrapdefault\\faauto\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\af25\\afs24\\alang1033 \\ltrch\\fcs0 \\fs24\\lang1033\\langfe255\\cgrid\\langnp1033\\langfenp255 \\snext0 Normal;}\n' # noqa
'{\\s1\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel0\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs32\\alang1033 \\ltrch\\fcs0 \\b\\fs32\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink21 heading 1;}\n' # noqa
'{\\s2\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel1\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\ai\\af0\\afs28\\alang1033 \\ltrch\\fcs0 \\b\\i\\fs28\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink22 heading 2;}\n' # noqa
'{\\s3\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel2\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs28\\alang1033 \\ltrch\\fcs0 \\b\\fs28\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink23 heading 3;}\n' # noqa
'{\\s4\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel3\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\ai\\af0\\afs23\\alang1033 \\ltrch\\fcs0\\b\\i\\fs23\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink24 heading 4;}\n' # noqa
'{\\s5\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel4\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs23\\alang1033 \\ltrch\\fcs0 \\b\\fs23\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink25 heading 5;}\n' # noqa
'{\\s6\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel5\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs21\\alang1033 \\ltrch\\fcs0 \\b\\fs21\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink26 heading 6;}}\n' # noqa
)
def footer(self):
return ' }'
def insert_images(self, text):
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
for item in self.oeb_book.manifest:
if item.media_type in OEB_RASTER_IMAGES:
src = item.href
try:
data, width, height = self.image_to_hexstring(item.data)
except:
self.log.warn('Image %s is corrupted, ignoring'%item.href)
repl = '\n\n'
else:
repl = '\n\n{\\*\\shppict{\\pict\\jpegblip\\picw%i\\pich%i \n%s\n}}\n\n' % (width, height, data)
text = text.replace('SPECIAL_IMAGE-%s-REPLACE_ME' % src, repl)
return text
def image_to_hexstring(self, data):
data = save_cover_data_to(data)
width, height = identify(data)[1:]
raw_hex = ''
for char in data:
raw_hex += hex(ord(char)).replace('0x', '').rjust(2, '0')
# Images must be broken up so that they are no longer than 129 chars
# per line
hex_string = ''
col = 1
for char in raw_hex:
if col == 129:
hex_string += '\n'
col = 1
col += 1
hex_string += char
return (hex_string, width, height)
def clean_text(self, text):
# Remove excessive newlines
text = re.sub('%s{3,}' % os.linesep, '%s%s' % (os.linesep, os.linesep), text)
# Remove excessive spaces
text = re.sub('[ ]{2,}', ' ', text)
text = re.sub('\t{2,}', '\t', text)
text = re.sub('\t ', '\t', text)
# Remove excessive line breaks
text = re.sub(r'(\{\\line \}\s*){3,}', r'{\\line }{\\line }', text)
# Remove non-breaking spaces
text = text.replace(u'\xa0', ' ')
text = text.replace('\n\r', '\n')
return text
def dump_text(self, elem, stylizer, tag_stack=[]):
from calibre.ebooks.oeb.base import (XHTML_NS, namespace, barename,
urlnormalize)
if not isinstance(elem.tag, basestring) \
or namespace(elem.tag) != XHTML_NS:
p = elem.getparent()
if p is not None and isinstance(p.tag, basestring) and namespace(p.tag) == XHTML_NS \
and elem.tail:
return elem.tail
return u''
text = u''
style = stylizer.style(elem)
if style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') \
or style['visibility'] == 'hidden':
if hasattr(elem, 'tail') and elem.tail:
return elem.tail
return u''
tag = barename(elem.tag)
tag_count = 0
# Are we in a paragraph block?
if tag in BLOCK_TAGS or style['display'] in BLOCK_STYLES:
if 'block' not in tag_stack:
tag_count += 1
tag_stack.append('block')
# Process tags that need special processing and that do not have inner
# text. Usually these require an argument
if tag == 'img':
src = elem.get('src')
if src:
src = urlnormalize(self.currently_dumping_item.abshref(src))
block_start = ''
block_end = ''
if 'block' not in tag_stack:
block_start = '{\\par\\pard\\hyphpar '
block_end = '}'
text += '%s SPECIAL_IMAGE-%s-REPLACE_ME %s' % (block_start, src, block_end)
single_tag = SINGLE_TAGS.get(tag, None)
if single_tag:
text += single_tag
rtf_tag = TAGS.get(tag, None)
if rtf_tag and rtf_tag not in tag_stack:
tag_count += 1
text += '{%s\n' % rtf_tag
tag_stack.append(rtf_tag)
# Processes style information
for s in STYLES:
style_tag = s[1].get(style[s[0]], None)
if style_tag and style_tag not in tag_stack:
tag_count += 1
text += '{%s\n' % style_tag
tag_stack.append(style_tag)
# Proccess tags that contain text.
if hasattr(elem, 'text') and elem.text:
text += txt2rtf(elem.text)
for item in elem:
text += self.dump_text(item, stylizer, tag_stack)
for i in range(0, tag_count):
end_tag = tag_stack.pop()
if end_tag != 'block':
if tag in BLOCK_TAGS:
text += u'\\par\\pard\\plain\\hyphpar}'
else:
text += u'}'
if hasattr(elem, 'tail') and elem.tail:
if 'block' in tag_stack:
text += '%s' % txt2rtf(elem.tail)
else:
text += '{\\par\\pard\\hyphpar %s}' % txt2rtf(elem.tail)
return text
| gpl-3.0 |
oihane/odoomrp-utils | purchase_order_line_form_button/models/purchase_order.py | 2 | 1142 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, api, _
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.multi
def button_save_data(self):
return True
@api.multi
def button_details(self):
context = self.env.context.copy()
view_id = self.env.ref(
'purchase_order_line_form_button.'
'purchase_order_line_button_form_view').id
context['view_buttons'] = True
context['parent'] = self.order_id.id
view = {
'name': _('Details'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'purchase.order.line',
'view_id': view_id,
'type': 'ir.actions.act_window',
'target': 'new',
'readonly': True,
'res_id': self.id,
'context': context
}
return view
| agpl-3.0 |
NeovaHealth/odoo | addons/l10n_nl/__init__.py | 424 | 1413 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 Veritos - Jan Verlaan - www.veritos.nl
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Veritos.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
faun/django_test | build/lib/django/db/models/options.py | 4 | 19324 | import re
from bisect import bisect
from django.conf import settings
from django.db.models.related import RelatedObject
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.utils.translation import activate, deactivate_all, get_language, string_concat
from django.utils.encoding import force_unicode, smart_str
from django.utils.datastructures import SortedDict
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'auto_created')
class Options(object):
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.admin = None
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
self.proxy_for_model = None
self.parents = SortedDict()
self.duplicate_targets = {}
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
cls._meta = self
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
# calling code can uniformly expect that.
ut = meta_attrs.pop('unique_together', self.unique_together)
if ut and not isinstance(ut[0], (tuple, list)):
ut = (ut,)
self.unique_together = ut
# verbose_name_plural is a special case because it uses a 's'
# by default.
self.verbose_name_plural = meta_attrs.pop('verbose_name_plural', string_concat(self.verbose_name, 's'))
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + module_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.module_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
self.order_with_respect_to = self.get_field(self.order_with_respect_to)
self.ordering = ('_order',)
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = self.parents.value_for_index(0)
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
# Determine any sets of fields that are pointing to the same targets
# (e.g. two ForeignKeys to the same remote model). The query
# construction code needs to know this. At the end of this,
# self.duplicate_targets will map each duplicate field column to the
# columns it duplicates.
collections = {}
for column, target in self.duplicate_targets.iteritems():
try:
collections[target].add(column)
except KeyError:
collections[target] = set([column])
self.duplicate_targets = {}
for elt in collections.itervalues():
if len(elt) == 1:
continue
for column in elt:
self.duplicate_targets[column] = elt.difference(set([column]))
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if field.rel and isinstance(field.rel, ManyToManyRel):
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
if hasattr(self, '_m2m_cache'):
del self._m2m_cache
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
if hasattr(self, '_name_map'):
del self._name_map
def add_virtual_field(self, field):
self.virtual_fields.append(field)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_str(self.app_label), smart_str(self.module_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_unicode(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
def _fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
fields = property(_fields)
def get_fields_with_model(self):
"""
Returns a sequence of (field, model) pairs for all fields. The "model"
element is None for fields on the current model. Mostly of use when
constructing queries so that we know which model a field belongs to.
"""
try:
self._field_cache
except AttributeError:
self._fill_fields_cache()
return self._field_cache
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def _many_to_many(self):
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return self._m2m_cache.keys()
many_to_many = property(_many_to_many)
def get_m2m_with_model(self):
"""
The many-to-many version of get_fields_with_model().
"""
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return self._m2m_cache.items()
def _fill_m2m_cache(self):
cache = SortedDict()
for parent in self.parents:
for field, model in parent._meta.get_m2m_with_model():
if model:
cache[field] = model
else:
cache[field] = parent
for field in self.local_many_to_many:
cache[field] = None
self._m2m_cache = cache
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = many_to_many and (self.fields + self.many_to_many) or self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._name_map[name]
except AttributeError:
cache = self.init_name_map()
return cache[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def get_all_field_names(self):
"""
Returns a list of all field names that are possible for this model
(including reverse relation names). This is used for pretty printing
debugging output (a list of choices), so any internal-only field names
are not included.
"""
try:
cache = self._name_map
except AttributeError:
cache = self.init_name_map()
names = cache.keys()
names.sort()
# Internal-only names end with "+" (symmetrical m2m related names being
# the main example). Trim them.
return [val for val in names if not val.endswith('+')]
def init_name_map(self):
"""
Initialises the field name -> field object mapping.
"""
cache = {}
# We intentionally handle related m2m objects first so that symmetrical
# m2m accessor names can be overridden, if necessary.
for f, model in self.get_all_related_m2m_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, True)
for f, model in self.get_all_related_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, False)
for f, model in self.get_m2m_with_model():
cache[f.name] = (f, model, True, True)
for f, model in self.get_fields_with_model():
cache[f.name] = (f, model, True, False)
if app_cache_ready():
self._name_map = cache
return cache
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
def get_change_permission(self):
return 'change_%s' % self.object_name.lower()
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_all_related_objects(self, local_only=False):
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
if local_only:
return [k for k, v in self._related_objects_cache.items() if not v]
return self._related_objects_cache.keys()
def get_all_related_objects_with_model(self):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
return self._related_objects_cache.items()
def _fill_related_objects_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model():
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
self._related_objects_cache = cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return cache.keys()
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return cache.items()
def _fill_related_many_to_many_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_many_to_many:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a granparent or even more distant relation.
"""
if not self.parents:
return
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
raise TypeError('%r is not an ancestor of this model'
% model._meta.module_name)
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_ordered_objects(self):
"Returns a list of Options objects that are ordered with respect to this object."
if not hasattr(self, '_ordered_objects'):
objects = []
# TODO
#for klass in get_models(get_app(self.app_label)):
# opts = klass._meta
# if opts.order_with_respect_to and opts.order_with_respect_to.rel \
# and self == opts.order_with_respect_to.rel.to._meta:
# objects.append(opts)
self._ordered_objects = objects
return self._ordered_objects
def pk_index(self):
"""
Returns the index of the primary key field in the self.fields list.
"""
return self.fields.index(self.pk)
| bsd-3-clause |
Vutshi/qutip | qutip/examples/ex_43.py | 1 | 2912 | #
# Using the propagator to find the steady state of a driven system.
#
from qutip import *
from pylab import *
def run():
#
# configure the parameters
#
delta = 0.075 * 2 * pi # qubit sigma_x coefficient
eps0 = 0.0 * 2 * pi # qubit sigma_z coefficient
A = 2.0 * 2 * pi # sweep rate
gamma1 = 0.0001 # relaxation rate
gamma2 = 0.005 # dephasing rate
psi0 = basis(2, 0) # initial state
omega = 0.05 * 2 * pi # driving frequency
T = (2 * pi) / omega # driving period
#
# Hamiltonian
#
sx = sigmax()
sz = sigmaz()
sm = destroy(2)
H0 = - delta / 2.0 * sx - eps0 / 2.0 * sz
H1 = - A / 2.0 * sz
# alternative 1: using function callback format (H_func_t)
# args = [H0, H1, omega]
# def hamiltonian_t(t, args):
# H0 = args[0]
# H1 = args[1]
# w = args[2]
# return H0 + cos(w * t) * H1
# alternative 2: using list-callback format
args = {'w': omega}
def H1_coeff_t(t, args):
return cos(args['w'] * t)
hamiltonian_t = [H0, [H1, H1_coeff_t]]
# alternative 3: using list-string format
# args = {'w': omega}
# hamiltonian_t = [H0, [H1, 'cos(w * t)']]
#
# collapse operators
#
c_ops = []
n_th = 0.0 # temperature in terms of the bath excitation number
rate = gamma1 * (1 + n_th)
if rate > 0.0:
c_ops.append(sqrt(rate) * sm) # relaxation
rate = gamma1 * n_th
if rate > 0.0:
c_ops.append(sqrt(rate) * sm.dag()) # excitation
rate = gamma2
if rate > 0.0:
c_ops.append(sqrt(rate) * sz) # dephasing
#
# evolve for five driving periods
#
tlist = linspace(0.0, 5 * T, 1500)
output = mesolve(hamiltonian_t, psi0, tlist, c_ops, [sm.dag() * sm], args)
#
# find the propagator for one driving period
#
T = 2 * pi / omega
U = propagator(hamiltonian_t, T, c_ops, args)
#
# find the steady state of repeated applications of the propagator
# (i.e., t -> inf)
#
rho_ss = propagator_steadystate(U)
p_ex_ss = expect(sm.dag() * sm, rho_ss)
#
# plot the results
#
figure(1)
subplot(211)
plot(tlist, output.expect[0], 'b')
plot(tlist, 1 - output.expect[0], 'r')
plot(tlist, ones(shape(tlist)) * p_ex_ss, 'k', linewidth=2)
xlabel('Time')
ylabel('Probability')
title('Occupation probabilty of qubit [NEW]')
legend((r"$\left|1\right>$", r"$\left|0\right>$",
r"$\left|1\right>$ steady state"), loc=0)
subplot(212)
plot(tlist, -delta / 2.0 * ones(shape(tlist)), 'r')
plot(tlist, -(eps0 / 2.0 + A / 2.0 * cos(omega * tlist)), 'b')
legend(("$\sigma_x$ coefficient", "$\sigma_z$ coefficient"))
xlabel('Time')
ylabel('Coefficients in the Hamiltonian')
show()
close()
if __name__ == '__main__':
run()
| gpl-3.0 |
adybbroe/pyspectral | bin/download_rsr.py | 2 | 1863 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, 2019 Pytroll developers
#
# Author(s):
#
# Adam.Dybbroe <adam.dybbroe@smhi.se>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Script to download the RSR files from internet
"""
import logging
import argparse
from pyspectral.utils import logging_on, logging_off
from pyspectral.rsr_reader import check_and_download
LOG = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Download relative spectral response data in hdf5')
parser.add_argument("-o", "--destination", help=("Destination path where to store the files"),
default=None, type=str)
parser.add_argument(
"-d", '--dry_run', help=("Dry run - no action"), action='store_true',
default=False)
parser.add_argument(
"-v", '--verbose', help=("Turn logging on"), action='store_true')
args = parser.parse_args()
dest_dir = args.destination
verbose = args.verbose
dry_run = args.dry_run
if verbose:
logging_on(logging.DEBUG)
else:
logging_off()
if dest_dir:
check_and_download(dest_dir=dest_dir, dry_run=dry_run)
else:
check_and_download(dry_run=dry_run)
| gpl-3.0 |
prutseltje/ansible | lib/ansible/modules/windows/win_domain.py | 15 | 2170 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
module: win_domain
short_description: Ensures the existence of a Windows domain
version_added: 2.3
description:
- Ensure that the domain named by C(dns_domain_name) exists and is reachable.
- If the domain is not reachable, the domain is created in a new forest on the target Windows Server 2012R2+ host.
- This module may require subsequent use of the M(win_reboot) action if changes are made.
options:
dns_domain_name:
description:
- The DNS name of the domain which should exist and be reachable or reside on the target Windows host.
required: yes
domain_netbios_name:
description:
- The netbios name of the domain.
- If not set, then the default netbios name will be the first section of dns_domain_name, up to, but not including the first period.
version_added: '2.6'
safe_mode_password:
description:
- Safe mode password for the domain controller.
required: yes
database_path:
description:
- The path to a directory on a fixed disk of the Windows host where the
domain database will be created.
- If not set then the default path is C(%SYSTEMROOT%\NTDS).
version_added: '2.5'
sysvol_path:
description:
- The path to a directory on a fixed disk of the Windows host where the
Sysvol file will be created.
- If not set then the default path is C(%SYSTEMROOT%\SYSVOL).
version_added: '2.5'
author:
- Matt Davis (@nitzmahone)
'''
RETURN = '''
reboot_required:
description: True if changes were made that require a reboot.
returned: always
type: boolean
sample: true
'''
EXAMPLES = r'''
- name: Ensure the named domain is reachable from the target host; if not, create the domain in a new forest residing on the target host
win_domain:
dns_domain_name: ansible.vagrant
safe_mode_password: password123!
'''
| gpl-3.0 |
Pirata-Repository/Pirata | script.navi-x/src/CDialogBrowse.py | 6 | 5956 | #############################################################################
#
# Copyright (C) 2013 Navi-X
#
# This file is part of Navi-X.
#
# Navi-X is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Navi-X is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Navi-X. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#############################################################################
#
# CDialogBrowse:
# This class is a non-standard dialog window which is used for downloading
# and file selection.
#@todo: Use WindowXMLDialog instead of the WindowDialog for better customization
#@todo: fix layout issues for non-XBOX platforms.
#############################################################################
from string import *
import sys, os.path
import urllib
import urllib2
import re, random, string
import xbmc, xbmcgui
import re, os, time, datetime, traceback
import shutil
import zipfile
from settings import *
try: Emulating = xbmcgui.Emulating
except: Emulating = False
LABEL_TITLE = 141
TEXT_PATH = 142
BUTTON_BROWSE = 143
BUTTON_EDIT = 144
BUTTON_OK = 145
BUTTON_CANCEL = 146
######################################################################
# Description: Browse dialog class
######################################################################
class CDialogBrowse(xbmcgui.WindowXMLDialog):
def __init__(self, strXMLname, strFallbackPath):
self.setCoordinateResolution(PAL_4x3)
self.filename=''
self.dir=''
def onInit( self ):
control=self.getControl(LABEL_TITLE)
control.setLabel(self.label)
self.SetLabel(self.dir + self.filename)
control=self.getControl(BUTTON_OK)
self.setFocus(control)
def onAction(self, action):
#select item is handled via other onClick().
if not action.getId() == ACTION_SELECT_ITEM:
self.onAction1(action)
def onAction1(self, action):
if (action==ACTION_PREVIOUS_MENU) or (action==ACTION_PARENT_DIR) or (action==ACTION_PREVIOUS_MENU2):
self.state= -1 #success
self.close() #exit
if action==ACTION_SELECT_ITEM:
if self.getFocus()==self.getControl(BUTTON_OK):
if (self.dir.lower().startswith('http://')==False) and (self.dir.lower().startswith('ftp://')==False):
self.state=0 #success
self.close() #exit
elif os.path.exists(self.dir)==False:
dialog=xbmcgui.Dialog()
dialog.ok("Error", "Destination directory does not exist")
else:
self.state=0 #success
self.close() #exit
if self.getFocus()==self.getControl(BUTTON_CANCEL):
self.state= -1 #success
self.close() #exit
if self.getFocus()==self.getControl(BUTTON_EDIT):
keyboard=xbmc.Keyboard(self.dir+self.filename)
keyboard.doModal()
if (keyboard.isConfirmed()==True):
fn=keyboard.getText()
pos=fn.rfind(SEPARATOR) #find last '\' in the string
if fn.lower().startswith('http://') or fn.lower().startswith('ftp://'):
filename=fn
self.filename=fn
elif pos != -1:
self.dir=fn[:pos+1]
filename=fn[pos+1:]
if len(filename) > 42:
dialog=xbmcgui.Dialog()
dialog.ok("Error","Filename exceeds 42 characters.")
self.filename=filename
else:
self.filename=filename
self.SetLabel(self.dir+self.filename)
if self.getFocus() == self.getControl(BUTTON_BROWSE):
dialog = xbmcgui.Dialog()
fn = dialog.browse(self.type,'Xbox Media Center', 'files', '', False, False)
if fn:
if self.type == 3:
if fn[-1] != SEPARATOR:
fn = fn + SEPARATOR
self.dir = fn
else:
pos = fn.rfind(SEPARATOR) #find last '\' in the string
if pos != -1:
self.dir = fn[:pos+1]
filename = fn[pos+1:]
self.filename = filename
self.SetLabel(self.dir + self.filename)
def onFocus( self, controlId ):
pass
def onClick( self, controlId ):
if controlId == BUTTON_CANCEL:
self.onAction1(ACTION_PREVIOUS_MENU)
else:
self.onAction1(ACTION_SELECT_ITEM)
def onControl(self, control):
pass
def SetFile(self, dir, filename, type, heading=""):
self.dir = dir
self.filename = filename
self.type = type
self.label = heading
def SetLabel(self, filename):
control = self.getControl(TEXT_PATH)
#control.setLabel(filename[-60:])
control.setText(filename)
| gpl-2.0 |
xuechong87/Moegirlwiki | extensions/Widgets/googlecode/googlecode_distutils_upload.py | 7 | 3784 | # Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
'''distutils command class for uploading to Google Code
Add this command to your setup.py script for automatic uploading of
source and Windows binary distributions. For example:
try:
from googlecode_distutils_upload import upload
except ImportError:
class upload(distutils.core.Command):
user_options = []
def __init__(self, *args, **kwargs):
sys.stderr.write("""\
error: Install this module in site-packages to upload:
http://support.googlecode.com/svn/trunk/scripts/googlecode_distutils_upload.py
""")
sys.exit(3)
setup(...,
cmdclass={'upload': upload},
)
'''
import distutils
import distutils.command.bdist_wininst
import os
import sys
import googlecode_upload
SOURCE_LABELS = ['Type-Source']
WINDOWS_LABELS = ['OpSys-Windows', 'Type-Installer']
class upload(distutils.core.Command):
description = 'upload source or Windows distribution to Google Code'
user_options = [('src', None,
'upload source distribution'),
('windows', None,
'upload Windows distribution'),
('dist-dir=', 'd',
'directory to find distribution archive in'
' [default: dist]'),
('config-dir=', None,
'read svn auth data from DIR'
' ("none" means not to use svn auth data)'),
('user=', 'u',
'Google Code username'),
]
boolean_options = ['src', 'windows']
def initialize_options(self):
self.src = False
self.windows = False
self.dist_dir = None
self.config_dir = None
self.user = None
def finalize_options(self):
# Validate src and windows options.
if (not self.src and not self.windows) or (self.src and self.windows):
sys.stderr.write('error: Use exactly one of --src or --windows\n')
sys.exit(2)
# Get dist-dir default from sdist or bdist_wininst.
if self.src:
self.set_undefined_options('sdist', ('dist_dir', 'dist_dir'))
else:
self.set_undefined_options('bdist_wininst', ('dist_dir', 'dist_dir'))
# Do nothing for config-dir and user; upload_find_auth does the
# right thing when they're None.
def run(self):
name = self.distribution.get_name()
version = self.distribution.get_version()
if self.src:
# TODO(epg): sdist is more flexible with formats...
fn = os.path.join(self.dist_dir, self.distribution.get_fullname())
if sys.platform == 'win32':
fn += '.zip'
else:
fn += '.tar.gz'
summary = ' '.join([name, version, 'source distribution'])
labels = SOURCE_LABELS
else:
# Get filename from bdist_wininst.
bd = distutils.command.bdist_wininst.bdist_wininst(self.distribution)
bd.initialize_options()
bd.dist_dir = self.dist_dir
bd.finalize_options()
fn = bd.get_installer_filename(self.distribution.get_fullname())
summary = ' '.join([name, version, 'for Windows'])
labels = WINDOWS_LABELS
(status, reason,
file_url) = googlecode_upload.upload_find_auth(fn, name, summary,
labels, self.config_dir,
self.user)
if file_url is None:
sys.stderr.write('error: %s (%d)\n' % (reason, status))
sys.exit(2)
sys.stdout.write('Uploaded %s\n' % (file_url,))
| gpl-2.0 |
FreeAgent/djangoappengine-starter | django/forms/fields.py | 103 | 38484 | """
Field classes.
"""
import datetime
import os
import re
import time
import urlparse
import warnings
from decimal import Decimal, DecimalException
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.core import validators
import django.utils.copycompat as copy
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, smart_str
from django.utils.functional import lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from widgets import TextInput, PasswordInput, HiddenInput, MultipleHiddenInput, \
ClearableFileInput, CheckboxInput, Select, NullBooleanSelect, SelectMultiple, \
DateInput, DateTimeInput, TimeInput, SplitDateTimeWidget, SplitHiddenDateTimeWidget, \
FILE_INPUT_CONTRADICTION
__all__ = (
'Field', 'CharField', 'IntegerField',
'DEFAULT_DATE_INPUT_FORMATS', 'DateField',
'DEFAULT_TIME_INPUT_FORMATS', 'TimeField',
'DEFAULT_DATETIME_INPUT_FORMATS', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'FilePathField', 'SlugField',
'TypedChoiceField', 'TypedMultipleChoiceField'
)
def en_format(name):
"""
Helper function to stay backward compatible.
"""
from django.conf.locale.en import formats
warnings.warn(
"`django.forms.fields.DEFAULT_%s` is deprecated; use `django.utils.formats.get_format('%s')` instead." % (name, name),
DeprecationWarning
)
return getattr(formats, name)
DEFAULT_DATE_INPUT_FORMATS = lazy(lambda: en_format('DATE_INPUT_FORMATS'), tuple, list)()
DEFAULT_TIME_INPUT_FORMATS = lazy(lambda: en_format('TIME_INPUT_FORMATS'), tuple, list)()
DEFAULT_DATETIME_INPUT_FORMATS = lazy(lambda: en_format('DATETIME_INPUT_FORMATS'), tuple, list)()
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
default_error_messages = {
'required': _(u'This field is required.'),
'invalid': _(u'Enter a valid value.'),
}
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
if label is not None:
label = smart_unicode(label)
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
if help_text is None:
self.help_text = u''
else:
self.help_text = smart_unicode(help_text)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(min_length))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(max_length))
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def widget_attrs(self, widget):
if self.max_length is not None and isinstance(widget, (TextInput, PasswordInput)):
# The HTML attribute is maxlength, not max_length.
return {'maxlength': str(self.max_length)}
class IntegerField(Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class FloatField(IntegerField):
default_error_messages = {
'invalid': _(u'Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class DecimalField(Field):
default_error_messages = {
'invalid': _(u'Enter a number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
'max_digits': _('Ensure that there are no more than %s digits in total.'),
'max_decimal_places': _('Ensure that there are no more than %s decimal places.'),
'max_whole_digits': _('Ensure that there are no more than %s digits before the decimal point.')
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, *args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_str(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in validators.EMPTY_VALUES:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'])
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(self.error_messages['max_digits'] % self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(self.error_messages['max_decimal_places'] % self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
raise ValidationError(self.error_messages['max_whole_digits'] % (self.max_digits - self.decimal_places))
return value
class DateField(Field):
widget = DateInput
default_error_messages = {
'invalid': _(u'Enter a valid date.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
for format in self.input_formats or formats.get_format('DATE_INPUT_FORMATS'):
try:
return datetime.date(*time.strptime(value, format)[:3])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class TimeField(Field):
widget = TimeInput
default_error_messages = {
'invalid': _(u'Enter a valid time.')
}
def __init__(self, input_formats=None, *args, **kwargs):
super(TimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.time):
return value
for format in self.input_formats or formats.get_format('TIME_INPUT_FORMATS'):
try:
return datetime.time(*time.strptime(value, format)[3:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class DateTimeField(Field):
widget = DateTimeInput
default_error_messages = {
'invalid': _(u'Enter a valid date/time.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateTimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'])
if value[0] in validators.EMPTY_VALUES and value[1] in validators.EMPTY_VALUES:
return None
value = '%s %s' % tuple(value)
for format in self.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'):
try:
return datetime.datetime(*time.strptime(value, format)[:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
if isinstance(regex, basestring):
regex = re.compile(regex)
self.regex = regex
self.validators.append(validators.RegexValidator(regex=regex))
class EmailField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid e-mail address.'),
}
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _(u"No file was submitted. Check the encoding type on the form."),
'missing': _(u"No file was submitted."),
'empty': _(u"The submitted file is empty."),
'max_length': _(u'Ensure this filename has at most %(max)d characters (it has %(length)d).'),
'contradiction': _(u'Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in validators.EMPTY_VALUES:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'])
if self.max_length is not None and len(file_name) > self.max_length:
error_values = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'] % error_values)
if not file_name:
raise ValidationError(self.error_messages['invalid'])
if not file_size:
raise ValidationError(self.error_messages['empty'])
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'])
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in validators.EMPTY_VALUES; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(u"Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
import Image
# We need to get a file object for PIL. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = StringIO(data.read())
else:
file = StringIO(data['content'])
try:
# load() is the only method that can spot a truncated JPEG,
# but it cannot be called sanely after verify()
trial_image = Image.open(file)
trial_image.load()
# Since we're about to use the file again we have to reset the
# file object if possible.
if hasattr(file, 'reset'):
file.reset()
# verify() is the only method that can spot a corrupt PNG,
# but it must be called immediately after the constructor
trial_image = Image.open(file)
trial_image.verify()
except ImportError:
# Under PyPy, it is possible to import PIL. However, the underlying
# _imaging C module isn't available, so an ImportError will be
# raised. Catch and re-raise.
raise
except Exception: # Python Imaging Library doesn't recognize it as an image
raise ValidationError(self.error_messages['invalid_image'])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid URL.'),
'invalid_link': _(u'This URL appears to be a broken link.'),
}
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=validators.URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(URLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
if value:
url_fields = list(urlparse.urlsplit(value))
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
value = urlparse.urlunsplit(url_fields)
url_fields = list(urlparse.urlsplit(value))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlparse.urlunsplit(url_fields)
return super(URLField, self).to_python(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if value in ('False', '0'):
value = False
else:
value = bool(value)
value = super(BooleanField, self).to_python(value)
if not value and self.required:
raise ValidationError(self.error_messages['required'])
return value
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text=None, *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_unicode(k2):
return True
else:
if value == smart_unicode(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the value is in self.choices and can be coerced to the
right type.
"""
value = super(TypedChoiceField, self).to_python(value)
super(TypedChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
def validate(self, value):
pass
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _(u'Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
return [smart_unicode(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'])
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
value = super(TypedMultipleChoiceField, self).to_python(value)
super(TypedMultipleChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': choice})
return new_value
def validate(self, value):
pass
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _(u'Enter a list of values.'),
}
def __init__(self, fields=(), *args, **kwargs):
super(MultiValueField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in validators.EMPTY_VALUES]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if self.required and field_value in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError, e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, required=True,
widget=None, label=None, initial=None, help_text=None,
*args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and (self.match is None or self.match_re.search(f)):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _(u'Enter a valid date.'),
'invalid_time': _(u'Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_date'])
if data_list[1] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_time'])
return datetime.datetime.combine(*data_list)
return None
class IPAddressField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid IPv4 address.'),
}
default_validators = [validators.validate_ipv4_address]
class SlugField(CharField):
default_error_messages = {
'invalid': _(u"Enter a valid 'slug' consisting of letters, numbers,"
u" underscores or hyphens."),
}
default_validators = [validators.validate_slug]
| bsd-3-clause |
leapcode/leap_mx | src/leap/mx/tcp_map.py | 1 | 2207 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# tcpmap.py
# Copyright (C) 2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from abc import ABCMeta
from abc import abstractproperty
from twisted.internet.protocol import ServerFactory
from twisted.python import log
# For info on codes, see: http://www.postfix.org/tcp_table.5.html
TCP_MAP_CODE_SUCCESS = 200
TCP_MAP_CODE_TEMPORARY_FAILURE = 400
TCP_MAP_CODE_PERMANENT_FAILURE = 500
# we have to also extend from object here to make the class a new-style class.
# If we don't, we get a TypeError because "new-style classes can't have only
# classic bases". This has to do with the way abc.ABCMeta works and the old
# and new style of python classes.
class LEAPPostfixTCPMapServerFactory(ServerFactory, object):
"""
A factory for postfix tcp map servers.
"""
__metaclass__ = ABCMeta
def __init__(self, couchdb):
"""
Initialize the factory.
:param couchdb: A CouchDB client.
:type couchdb: leap.mx.couchdbhelper.ConnectedCouchDB
"""
self._cdb = couchdb
@abstractproperty
def _query_message(self):
pass
def get(self, lookup_key):
"""
Look up user based on lookup_key.
:param lookup_key: The lookup key.
:type lookup_key: str
:return: A deferred that will be fired with the user's address, uuid
and pgp key.
:rtype: Deferred
"""
log.msg("%s: %s" % (self._query_message, lookup_key,))
d = self._cdb.getUuidAndPubkey(lookup_key)
d.addErrback(log.err)
return d
| agpl-3.0 |
leighpauls/k2cro4 | net/tools/testserver/testserver_base.py | 1 | 4300 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import optparse
import os
import struct
import sys
import warnings
# Ignore deprecation warnings, they make our output more cluttered.
warnings.filterwarnings("ignore", category=DeprecationWarning)
if sys.platform == 'win32':
import msvcrt
class Error(Exception):
"""Error class for this module."""
class OptionError(Error):
"""Error for bad command line options."""
class FileMultiplexer(object):
def __init__(self, fd1, fd2) :
self.__fd1 = fd1
self.__fd2 = fd2
def __del__(self) :
if self.__fd1 != sys.stdout and self.__fd1 != sys.stderr:
self.__fd1.close()
if self.__fd2 != sys.stdout and self.__fd2 != sys.stderr:
self.__fd2.close()
def write(self, text) :
self.__fd1.write(text)
self.__fd2.write(text)
def flush(self) :
self.__fd1.flush()
self.__fd2.flush()
class TestServerRunner(object):
"""Runs a test server and communicates with the controlling C++ test code.
Subclasses should override the create_server method to create their server
object, and the add_options method to add their own options.
"""
def __init__(self):
self.option_parser = optparse.OptionParser()
self.add_options()
def main(self):
self.options, self.args = self.option_parser.parse_args()
logfile = open('testserver.log', 'w')
sys.stderr = FileMultiplexer(sys.stderr, logfile)
if self.options.log_to_console:
sys.stdout = FileMultiplexer(sys.stdout, logfile)
else:
sys.stdout = logfile
server_data = {
'host': self.options.host,
}
self.server = self.create_server(server_data)
self._notify_startup_complete(server_data)
self.run_server()
def create_server(self, server_data):
"""Creates a server object and returns it.
Must populate server_data['port'], and can set additional server_data
elements if desired."""
raise NotImplementedError()
def run_server(self):
try:
self.server.serve_forever()
except KeyboardInterrupt:
print 'shutting down server'
self.server.stop = True
def add_options(self):
self.option_parser.add_option('--startup-pipe', type='int',
dest='startup_pipe',
help='File handle of pipe to parent process')
self.option_parser.add_option('--log-to-console', action='store_const',
const=True, default=False,
dest='log_to_console',
help='Enables or disables sys.stdout logging '
'to the console.')
self.option_parser.add_option('--port', default=0, type='int',
help='Port used by the server. If '
'unspecified, the server will listen on an '
'ephemeral port.')
self.option_parser.add_option('--host', default='127.0.0.1',
dest='host',
help='Hostname or IP upon which the server '
'will listen. Client connections will also '
'only be allowed from this address.')
def _notify_startup_complete(self, server_data):
# Notify the parent that we've started. (BaseServer subclasses
# bind their sockets on construction.)
if self.options.startup_pipe is not None:
server_data_json = json.dumps(server_data)
server_data_len = len(server_data_json)
print 'sending server_data: %s (%d bytes)' % (
server_data_json, server_data_len)
if sys.platform == 'win32':
fd = msvcrt.open_osfhandle(self.options.startup_pipe, 0)
else:
fd = self.options.startup_pipe
startup_pipe = os.fdopen(fd, "w")
# First write the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the other end of the
# pipe is on the same machine.
startup_pipe.write(struct.pack('=L', server_data_len))
startup_pipe.write(server_data_json)
startup_pipe.close()
| bsd-3-clause |
frreiss/tensorflow-fred | tensorflow/python/framework/device_test.py | 8 | 4043 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
TEST_V1_AND_V2 = (("v1", device_spec.DeviceSpecV1),
("v2", device_spec.DeviceSpecV2))
class DeviceTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(*TEST_V1_AND_V2)
def testMerge(self, DeviceSpec): # pylint: disable=invalid-name
d = DeviceSpec.from_string("/job:muu/task:1/device:MyFunnyDevice:2")
self.assertEqual("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
if not context.executing_eagerly():
with ops.device(device.merge_device("/device:GPU:0")):
var1 = variables.Variable(1.0)
self.assertEqual("/device:GPU:0", var1.device)
with ops.device(device.merge_device("/job:worker")):
var2 = variables.Variable(1.0)
self.assertEqual("/job:worker/device:GPU:0", var2.device)
with ops.device(device.merge_device("/device:CPU:0")):
var3 = variables.Variable(1.0)
self.assertEqual("/job:worker/device:CPU:0", var3.device)
with ops.device(device.merge_device("/job:ps")):
var4 = variables.Variable(1.0)
self.assertEqual("/job:ps/device:CPU:0", var4.device)
def testCanonicalName(self):
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/job:foo/replica:0"))
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/replica:0/job:foo"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/replica:0/task:0"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/task:0/replica:0"))
self.assertEqual("/device:CPU:0",
device.canonical_name("/device:CPU:0"))
self.assertEqual("/device:GPU:2",
device.canonical_name("/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/job:foo/replica:0/task:0/device:GPU:0"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/device:GPU:0/task:0/replica:0/job:foo"))
def testCheckValid(self):
device.check_valid("/job:foo/replica:0")
with self.assertRaisesRegex(ValueError, "invalid literal for int"):
device.check_valid("/job:j/replica:foo")
with self.assertRaisesRegex(ValueError, "invalid literal for int"):
device.check_valid("/job:j/task:bar")
with self.assertRaisesRegex(ValueError, "Unknown attribute: 'bar'"):
device.check_valid("/bar:muu/baz:2")
with self.assertRaisesRegex(ValueError, "Cannot specify multiple device"):
device.check_valid("/cpu:0/device:GPU:2")
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
taknevski/tensorflow-xsmm | tensorflow/contrib/cmake/tools/create_def_file.py | 4 | 4580 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""create_def_file.py - tool to create a windows def file.
The def file can be used to export symbols from the tensorflow dll to enable
tf.load_library().
Because the linker allows only 64K symbols to be exported per dll
we filter the symbols down to the essentials. The regular expressions
we use for this are specific to tensorflow.
TODO: this works fine but there is an issue with exporting
'const char * const' and importing it from a user_ops. The problem is
on the importing end and using __declspec(dllimport) works around it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import io
import os
import re
import subprocess
import sys
import tempfile
# External tools we use that come with visual studio sdk and
# we assume that the caller has the correct PATH to the sdk
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
# Exclude if matched
EXCLUDE_RE = re.compile(r"deleting destructor|::internal::")
# Include if matched before exclude
INCLUDEPRE_RE = re.compile(r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::CheckOpMessageBuilder")
# Include if matched after exclude
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"perftools::gputools")
def get_args():
"""Parse command line."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="input library", required=True)
parser.add_argument("--output", help="output deffile", required=True)
args = parser.parse_args()
return args
def main():
"""main."""
args = get_args()
# Pipe dumpbin to extract all linkable symbols from a lib.
# Good symbols are collected in candidates and also written to
# a temp file.
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", args.input],
stdout=subprocess.PIPE)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
tmpfile.file.close()
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
# Run the symbols through undname to get their undecorated name
# so we can filter on something readable.
with open(args.output, "w") as def_fp:
# track dupes
taken = set()
# Header for the def file. Since the tensorflow.dll is actually called
# _pywrap_tensorflow.pyd in the python wheel, hint that in the def file.
def_fp.write("LIBRARY _pywrap_tensorflow_internal.pyd\n")
def_fp.write("EXPORTS\n")
def_fp.write("\t ??1OpDef@tensorflow@@UEAA@XZ\n")
# Each symbols returned by undname matches the same position in candidates.
# We compare on undname but use the decorated name from candidates.
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(io.TextIOWrapper(proc.stdout, encoding="utf-8")):
decorated = candidates[idx]
if decorated in taken:
# Symbol is already in output, done.
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
def_fp.write("\t" + decorated + "\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
Endika/odoomrp-wip | mrp_stock_location_route_fix/models/stock_warehouse.py | 28 | 1158 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, api
class StockWarehouse(models.Model):
_inherit = 'stock.warehouse'
@api.model
def _get_manufacture_pull_rule(self, warehouse):
return super(StockWarehouse, self.with_context(
lang='en_US'))._get_manufacture_pull_rule(warehouse)
| agpl-3.0 |
osm-fr/osmose-backend | plugins/TagFix_MultipleTag_Lang_fr.py | 4 | 5678 | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2011 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from plugins.Plugin import Plugin
class TagFix_MultipleTag_Lang_fr(Plugin):
only_for = ["fr"]
def init(self, logger):
Plugin.init(self, logger)
self.errors[3032] = self.def_class(item = 3032, level = 1, tags = ['tag', 'fix:chair'],
title = T_('Watch multiple tags'))
import re
self.Eglise = re.compile(u"(.glise|chapelle|basilique|cath.drale) de .*", re.IGNORECASE)
self.EgliseNot1 = re.compile(u"(.glise|chapelle|basilique|cath.drale) de la .*", re.IGNORECASE)
self.EgliseNot2 = re.compile(u"(.glise|chapelle|basilique|cath.drale) de l'.*", re.IGNORECASE)
self.MonumentAuxMorts = re.compile(u"monument aux morts.*", re.IGNORECASE)
self.SalleDesFetes = re.compile(u".*salle des f.tes.*", re.IGNORECASE)
self.MaisonDeQuartier = re.compile(u".*maison de quartier.*", re.IGNORECASE)
self.Marche = re.compile(u"marché( .+)?", re.IGNORECASE)
def node(self, data, tags):
err = []
if not "name" in tags:
return err
if "amenity" in tags:
if tags["amenity"] == "place_of_worship":
if self.Eglise.match(tags["name"]) and not self.EgliseNot1.match(tags["name"]) and not self.EgliseNot2.match(tags["name"]):
err.append({"class": 3032, "subclass": 1, "text": T_("\"name={0}\" is the localisation but not the name", tags["name"])})
else:
if "shop" not in tags and "public_transport" not in tags and self.Marche.match(tags["name"]):
err.append({"class": 3032, "subclass": 5, "fix": {"amenity": "marketplace"}})
if "historic" in tags:
if tags["historic"] == "monument":
if self.MonumentAuxMorts.match(tags["name"]):
err.append({"class": 3032, "subclass": 2, "text": T_(u"A war memorial is not a historic=monument"),
"fix": {"historic": "memorial"} })
if (not "highway" in tags) and (self.SalleDesFetes.match(tags["name"]) or self.MaisonDeQuartier.match(tags["name"])) and not ("amenity" in tags and tags["amenity"] == "community_centre"):
err.append({"class": 3032, "subclass": 3, "text": T_(u"Put a tag for a village hall or a community center"),
"fix": {"+": {"amenity": "community_centre"}} })
return err
def way(self, data, tags, nds):
return self.node(data, tags)
def relation(self, data, tags, members):
return self.node(data, tags)
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
a = TagFix_MultipleTag_Lang_fr(None)
class _config:
options = {"language": "fr"}
class father:
config = _config()
a.father = father()
a.init(None)
for t in [{"amenity": "place_of_worship", "name": u"Église de Paris"},
{"amenity": "place_of_worship", "name": u"Cathédrale de Notre-Dame"},
{"name": u"Marché des Capucines"},
{"historic": "monument", "name": u"Monument aux morts du quartier"},
{"name": u"Salle des fêtes"},
{"name": u"Maison de quartier"},
]:
self.check_err(a.node(None, t), t)
self.check_err(a.way(None, t, None), t)
self.check_err(a.relation(None, t, None), t)
for t in [{"amenity": "place_of_worship", "name": u"Église de l'endroit"},
{"shop": "yes", "name": u"Marché des Capucines"},
{"amenity":"place_of_worship"},
{"historic": "yes", "name": u"Monument aux morts du quartier"},
{"historic": "monument", "name": u"Monument typique du quartier"},
{"highway": "primary", "name": u"Salle des fêtes"},
{"highway": "residential", "name": u"Maison de quartier"},
{"amenity": "community_centre", "name": u"Salle des fêtes"},
{"amenity": "community_centre", "name": u"Maison de quartier"},
]:
assert not a.way(None, t, None), t
| gpl-3.0 |
mattclark/osf.io | addons/onedrive/migrations/0002_auto_20171121_1426.py | 22 | 1426 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 20:26
from __future__ import unicode_literals
import datetime
import pytz
from django.db import migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('addons_onedrive', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='nodesettings',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.utc), verbose_name='created'),
preserve_default=False,
),
migrations.AddField(
model_name='nodesettings',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
migrations.AddField(
model_name='usersettings',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.utc), verbose_name='created'),
preserve_default=False,
),
migrations.AddField(
model_name='usersettings',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
| apache-2.0 |
ganyuling/nvm | npm/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
kromain/chromium-tools | tests/rietveld_test.py | 17 | 15115 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for rietveld.py."""
import logging
import os
import ssl
import sys
import time
import traceback
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.patches_data import GIT, RAW
from testing_support import auto_stub
import patch
import rietveld
def _api(files):
"""Mock a rietveld api request."""
return rietveld.json.dumps({'files': files})
def _file(
status, is_binary=False, num_chunks=1, chunk_id=789, property_changes=''):
"""Mock a file in a rietveld api request."""
return {
'status': status,
'is_binary': is_binary,
'num_chunks': num_chunks,
'id': chunk_id,
'property_changes': property_changes,
}
class BaseFixture(unittest.TestCase):
# Override.
TESTED_CLASS = Exception
def setUp(self):
super(BaseFixture, self).setUp()
# Access to a protected member XX of a client class
# pylint: disable=W0212
self.rietveld = self.TESTED_CLASS('url', 'email', 'password')
self.rietveld._send = self._rietveld_send
self.requests = []
def tearDown(self):
self.assertEqual([], self.requests)
super(BaseFixture, self).tearDown()
def _rietveld_send(self, url, *args, **kwargs):
self.assertTrue(self.requests, url)
request = self.requests.pop(0)
self.assertEqual(2, len(request))
self.assertEqual(url, request[0])
return request[1]
def _check_patch(self,
p,
filename,
diff,
source_filename=None,
is_binary=False,
is_delete=False,
is_git_diff=False,
is_new=False,
patchlevel=0,
svn_properties=None):
svn_properties = svn_properties or []
self.assertEqual(p.filename, filename)
self.assertEqual(p.source_filename, source_filename)
self.assertEqual(p.is_binary, is_binary)
self.assertEqual(p.is_delete, is_delete)
if hasattr(p, 'is_git_diff'):
self.assertEqual(p.is_git_diff, is_git_diff)
self.assertEqual(p.is_new, is_new)
if hasattr(p, 'patchlevel'):
self.assertEqual(p.patchlevel, patchlevel)
if diff:
self.assertEqual(p.get(True), diff)
if hasattr(p, 'svn_properties'):
self.assertEqual(p.svn_properties, svn_properties)
class RietveldTest(BaseFixture):
TESTED_CLASS = rietveld.Rietveld
def test_get_patch_empty(self):
self.requests = [('/api/123/456', '{}')]
patches = self.rietveld.get_patch(123, 456)
self.assertTrue(isinstance(patches, patch.PatchSet))
self.assertEqual([], patches.patches)
def test_get_patch_no_status(self):
self.requests = [
( '/api/123/456',
_api(
{
'tools/clang_check/README.chromium': {
'status': None,
'id': 789,
}})),
('/download/issue123_456_789.diff', RAW.DELETE),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'tools/clang_check/README.chromium',
RAW.DELETE,
is_delete=True)
def test_get_patch_2_files(self):
self.requests = [
('/api/123/456',
_api({'foo': _file('A'), 'file_a': _file('M', chunk_id=790)})),
('/download/issue123_456_789.diff', RAW.NEW),
('/download/issue123_456_790.diff', RAW.NEW_NOT_NULL),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(2, len(patches.patches))
self._check_patch(
patches.patches[0], 'file_a', RAW.NEW_NOT_NULL, is_new=True)
self._check_patch(patches.patches[1], 'foo', RAW.NEW, is_new=True)
def test_get_patch_add(self):
self.requests = [
('/api/123/456', _api({'foo': _file('A')})),
('/download/issue123_456_789.diff', RAW.NEW),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(patches.patches[0], 'foo', RAW.NEW, is_new=True)
def test_invalid_status(self):
self.requests = [
('/api/123/456', _api({'file_a': _file('B')})),
]
try:
self.rietveld.get_patch(123, 456)
self.fail()
except patch.UnsupportedPatchFormat, e:
self.assertEqual('file_a', e.filename)
def test_add_plus_merge(self):
# svn:mergeinfo is dropped.
properties = (
'\nAdded: svn:mergeinfo\n'
' Merged /branches/funky/file_b:r69-2775\n')
self.requests = [
('/api/123/456',
_api({'pp': _file('A+', property_changes=properties)})),
('/download/issue123_456_789.diff', GIT.COPY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'pp',
GIT.COPY,
is_git_diff=True,
is_new=True,
patchlevel=1,
source_filename='PRESUBMIT.py')
def test_add_plus_eol_style(self):
properties = '\nAdded: svn:eol-style\n + LF\n'
self.requests = [
('/api/123/456',
_api({'pp': _file('A+', property_changes=properties)})),
('/download/issue123_456_789.diff', GIT.COPY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'pp',
GIT.COPY,
is_git_diff=True,
is_new=True,
patchlevel=1,
source_filename='PRESUBMIT.py',
svn_properties=[('svn:eol-style', 'LF')])
def test_add_empty(self):
self.requests = [
('/api/123/456', _api({'__init__.py': _file('A ', num_chunks=0)})),
('/download/issue123_456_789.diff', RAW.CRAP_ONLY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'__init__.py',
RAW.CRAP_ONLY,
is_new=True)
def test_delete(self):
name = 'tools/clang_check/README.chromium'
self.requests = [
('/api/123/456', _api({name: _file('D')})),
('/download/issue123_456_789.diff', RAW.DELETE),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(patches.patches[0], name, RAW.DELETE, is_delete=True)
def test_delete_empty(self):
name = 'tests/__init__.py'
self.requests = [
('/api/123/456', _api({name: _file('D')})),
('/download/issue123_456_789.diff', GIT.DELETE_EMPTY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
name,
GIT.DELETE_EMPTY,
is_delete=True,
is_git_diff=True,
patchlevel=1)
def test_m_plus(self):
properties = '\nAdded: svn:eol-style\n + LF\n'
self.requests = [
('/api/123/456',
_api({'chrome/file.cc': _file('M+', property_changes=properties)})),
('/download/issue123_456_789.diff', RAW.PATCH),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'chrome/file.cc',
RAW.PATCH,
svn_properties=[('svn:eol-style', 'LF')])
def test_m_plus_unknown_prop(self):
properties = '\nAdded: svn:foobar\n + stuff\n'
self.requests = [
('/api/123/456',
_api({'file_a': _file('M+', property_changes=properties)})),
]
try:
self.rietveld.get_patch(123, 456)
self.fail()
except patch.UnsupportedPatchFormat, e:
self.assertEqual('file_a', e.filename)
def test_get_patch_moved(self):
self.requests = [
('/api/123/456', _api({'file_b': _file('A+')})),
('/download/issue123_456_789.diff', RAW.MINIMAL_RENAME),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'file_b',
RAW.MINIMAL_RENAME,
source_filename='file_a',
is_new=True)
def test_svn_properties(self):
# Line too long (N/80)
# pylint: disable=C0301
# To test one of these, run something like
# import json, pprint, urllib
# url = 'http://codereview.chromium.org/api/202046/1'
# pprint.pprint(json.load(urllib.urlopen(url))['files'])
# svn:mergeinfo across branches:
# http://codereview.chromium.org/202046/diff/1/third_party/libxml/xmlcatalog_dummy.cc
self.assertEqual(
[('svn:eol-style', 'LF')],
rietveld.Rietveld.parse_svn_properties(
u'\nAdded: svn:eol-style\n + LF\n', 'foo'))
# svn:eol-style property that is lost in the diff
# http://codereview.chromium.org/202046/diff/1/third_party/libxml/xmllint_dummy.cc
self.assertEqual(
[],
rietveld.Rietveld.parse_svn_properties(
u'\nAdded: svn:mergeinfo\n'
' Merged /branches/chrome_webkit_merge_branch/third_party/'
'libxml/xmldummy_mac.cc:r69-2775\n',
'foo'))
self.assertEqual(
[],
rietveld.Rietveld.parse_svn_properties(u'', 'foo'))
# http://codereview.chromium.org/api/7834045/15001
self.assertEqual(
[('svn:executable', '*'), ('svn:eol-style', 'LF')],
rietveld.Rietveld.parse_svn_properties(
'\n'
'Added: svn:executable\n'
' + *\n'
'Added: svn:eol-style\n'
' + LF\n',
'foo'))
# http://codereview.chromium.org/api/9139006/7001
self.assertEqual(
[('svn:mime-type', 'image/png')],
rietveld.Rietveld.parse_svn_properties(
'\n'
'Added: svn:mime-type\n'
' + image/png\n',
'foo'))
def test_bad_svn_properties(self):
try:
rietveld.Rietveld.parse_svn_properties(u'\n', 'foo')
self.fail()
except rietveld.patch.UnsupportedPatchFormat, e:
self.assertEqual('foo', e.filename)
# TODO(maruel): Change with no diff, only svn property change:
# http://codereview.chromium.org/6462019/
def test_search_all_empty(self):
url = (
'/search?format=json'
'&base=base'
'&created_after=2010-01-02'
'&created_before=2010-01-01'
'&modified_after=2010-02-02'
'&modified_before=2010-02-01'
'&owner=owner%40example.com'
'&reviewer=reviewer%40example.com'
'&closed=2'
'&commit=2'
'&private=2'
'&keys_only=True'
'&with_messages=True'
'&limit=23')
self.requests = [
(url, '{}'),
]
results = list(self.rietveld.search(
'owner@example.com',
'reviewer@example.com',
'base',
True,
True,
True,
'2010-01-01',
'2010-01-02',
'2010-02-01',
'2010-02-02',
23,
True,
True,
))
self.assertEqual([], results)
def test_results_cursor(self):
# Verify cursor iteration is transparent.
self.requests = [
('/search?format=json&base=base',
rietveld.json.dumps({
'cursor': 'MY_CURSOR',
'results': [{'foo': 'bar'}, {'foo': 'baz'}],
})),
('/search?format=json&base=base&cursor=MY_CURSOR',
rietveld.json.dumps({
'cursor': 'NEXT',
'results': [{'foo': 'prout'}],
})),
('/search?format=json&base=base&cursor=NEXT',
rietveld.json.dumps({
'cursor': 'VOID',
'results': [],
})),
]
expected = [
{'foo': 'bar'},
{'foo': 'baz'},
{'foo': 'prout'},
]
for i in self.rietveld.search(base='base'):
self.assertEqual(expected.pop(0), i)
self.assertEqual([], expected)
class CachingRietveldTest(BaseFixture):
# Tests only one request is done.
TESTED_CLASS = rietveld.CachingRietveld
def test_get_description(self):
self.requests = [
('/1/description', 'Blah blah blah'),
]
expected = 'Blah blah blah'
self.assertEqual(expected, self.rietveld.get_description(1))
self.assertEqual(expected, self.rietveld.get_description(1))
def test_get_issue_properties(self):
data = {'description': 'wow\r\nno CR!', 'messages': 'foo'}
self.requests = [
('/api/1?messages=true', rietveld.json.dumps(data)),
]
expected = {u'description': u'wow\nno CR!'}
expected_msg = {u'description': u'wow\nno CR!', u'messages': u'foo'}
self.assertEqual(expected, self.rietveld.get_issue_properties(1, False))
self.assertEqual(expected_msg, self.rietveld.get_issue_properties(1, True))
def test_get_patchset_properties(self):
self.requests = [
('/api/1/2', '{}'),
]
expected = {}
self.assertEqual(expected, self.rietveld.get_patchset_properties(1, 2))
self.assertEqual(expected, self.rietveld.get_patchset_properties(1, 2))
class ProbeException(Exception):
"""Deep-probe a value."""
value = None
def __init__(self, value):
super(ProbeException, self).__init__()
self.value = value
def MockSend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Mock upload.py's Send() to probe the timeout value"""
raise ProbeException(timeout)
def MockSendTimeout(request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Mock upload.py's Send() to raise SSLError"""
raise ssl.SSLError('The read operation timed out')
class DefaultTimeoutTest(auto_stub.TestCase):
TESTED_CLASS = rietveld.Rietveld
def setUp(self):
super(DefaultTimeoutTest, self).setUp()
self.rietveld = self.TESTED_CLASS('url', 'email', 'password')
self.mock(self.rietveld.rpc_server, 'Send', MockSend)
self.sleep_time = 0
def test_timeout_get(self):
with self.assertRaises(ProbeException) as cm:
self.rietveld.get('/api/1234')
self.assertIsNotNone(cm.exception.value, 'Rietveld timeout was not set: %s'
% traceback.format_exc())
def test_timeout_post(self):
with self.assertRaises(ProbeException) as cm:
self.rietveld.post('/api/1234', [('key', 'data')])
self.assertIsNotNone(cm.exception.value, 'Rietveld timeout was not set: %s'
% traceback.format_exc())
def MockSleep(self, t):
self.sleep_time = t
def test_ssl_timeout_post(self):
self.mock(self.rietveld.rpc_server, 'Send', MockSendTimeout)
self.mock(time, 'sleep', self.MockSleep)
self.sleep_time = 0
with self.assertRaises(ssl.SSLError):
self.rietveld.post('/api/1234', [('key', 'data')])
self.assertNotEqual(self.sleep_time, 0)
if __name__ == '__main__':
logging.basicConfig(level=[
logging.ERROR, logging.INFO, logging.DEBUG][min(2, sys.argv.count('-v'))])
unittest.main()
| bsd-3-clause |
LucHermitte/ITK | Modules/ThirdParty/pygccxml/src/pygccxml/parser/project_reader.py | 12 | 25417 | # Copyright 2014 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import os
import time
import types
from . import source_reader
from . import declarations_cache
import pygccxml.declarations
from .. import utils
class COMPILATION_MODE:
ALL_AT_ONCE = 'all at once'
FILE_BY_FILE = 'file by file'
class file_configuration_t(object):
"""
source code location configuration.
The class instance uses "variant" interface to represent the following
data:
1) path to a C++ source file
2) path to GCC-XML generated XML file
3) path to a C++ source file and path to GCC-XML generated file
In this case, if XML file does not exists, it will be created. Next
time you will ask to parse the source file, the XML file will be used
instead.
Small tip: you can setup your makefile to delete XML files every time,
the relevant source file was changed.
4) Python string, that contains valid C++ code
There are few functions, that will help you to construct
:class:`file_configuration_t` object:
* :func:`create_source_fc`
* :func:`create_gccxml_fc`
* :func:`create_cached_source_fc`
* :func:`create_text_fc`
"""
class CONTENT_TYPE:
STANDARD_SOURCE_FILE = 'standard source file'
CACHED_SOURCE_FILE = 'cached source file'
GCCXML_GENERATED_FILE = 'gccxml generated file'
TEXT = 'text'
def __init__(
self,
data,
start_with_declarations=None,
content_type=CONTENT_TYPE.STANDARD_SOURCE_FILE,
cached_source_file=None):
object.__init__(self)
self.__data = data
if not start_with_declarations:
start_with_declarations = []
self.__start_with_declarations = start_with_declarations
self.__content_type = content_type
self.__cached_source_file = cached_source_file
if not self.__cached_source_file \
and self.__content_type == self.CONTENT_TYPE.CACHED_SOURCE_FILE:
self.__cached_source_file = self.__data + '.xml'
@property
def data(self):
return self.__data
@property
def start_with_declarations(self):
return self.__start_with_declarations
@property
def content_type(self):
return self.__content_type
@property
def cached_source_file(self):
return self.__cached_source_file
def create_text_fc(text):
"""
Creates :class:`parser.file_configuration_t` instance, configured to
contain Python string, that contains valid C++ code
:param text: C++ code
:type text: str
:rtype: :class:`parser.file_configuration_t`
"""
return file_configuration_t(
data=text,
content_type=file_configuration_t.CONTENT_TYPE.TEXT)
def create_source_fc(header):
"""
Creates :class:`parser.file_configuration_t` instance, configured to
contain path to C++ source file
:param header: path to C++ source file
:type header: str
:rtype: :class:`parser.file_configuration_t`
"""
return file_configuration_t(
data=header,
content_type=file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE)
def create_gccxml_fc(xml_file):
"""
Creates :class:`parser.file_configuration_t` instance, configured to
contain path to GCC-XML generated XML file.
:param xml_file: path to GCC-XML generated XML file
:type xml_file: str
:rtype: :class:`parser.file_configuration_t`
"""
return file_configuration_t(
data=xml_file,
content_type=file_configuration_t.CONTENT_TYPE.GCCXML_GENERATED_FILE)
def create_cached_source_fc(header, cached_source_file):
"""
Creates :class:`parser.file_configuration_t` instance, configured to
contain path to GCC-XML generated XML file and C++ source file. If XML file
does not exists, it will be created and used for parsing. If XML file
exists, it will be used for parsing.
:param header: path to C++ source file
:type header: str
:param cached_source_file: path to GCC-XML generated XML file
:type cached_source_file: str
:rtype: :class:`parser.file_configuration_t`
"""
return file_configuration_t(
data=header,
cached_source_file=cached_source_file,
content_type=file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE)
class project_reader_t:
"""parses header files and returns the contained declarations"""
def __init__(self, config, cache=None, decl_factory=None):
"""
:param config: GCCXML configuration
:type config: :class:gccxml_configuration_t
:param cache: declaration cache, by default a cache functionality will
not be used
:type cache: :class:`cache_base_t` instance or `str`
:param decl_factory: declaration factory
:type decl_factory: :class:`decl_factory_t`
"""
self.__config = config
self.__dcache = None
if isinstance(cache, declarations_cache.cache_base_t):
self.__dcache = cache
elif utils.is_str(cache):
self.__dcache = declarations_cache.file_cache_t(cache)
else:
self.__dcache = declarations_cache.dummy_cache_t()
self.__decl_factory = decl_factory
if not decl_factory:
self.__decl_factory = pygccxml.declarations.decl_factory_t()
self.logger = utils.loggers.gccxml
@staticmethod
def get_os_file_names(files):
"""
returns file names
:param files: list of strings and\\or :class:`file_configuration_t`
instances.
:type files: list
"""
fnames = []
for f in files:
if utils.is_str(f):
fnames.append(f)
elif isinstance(f, file_configuration_t):
if f.content_type in (
file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE,
file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE):
fnames.append(f.data)
else:
pass
return fnames
def read_files(
self,
files,
compilation_mode=COMPILATION_MODE.FILE_BY_FILE):
"""
parses a set of files
:param files: list of strings and\\or :class:`file_configuration_t`
instances.
:type files: list
:param compilation_mode: determines whether the files are parsed
individually or as one single chunk
:type compilation_mode: :class:`COMPILATION_MODE`
:rtype: [:class:`declaration_t`]
"""
if compilation_mode == COMPILATION_MODE.ALL_AT_ONCE \
and len(files) == len(self.get_os_file_names(files)):
return self.__parse_all_at_once(files)
else:
if compilation_mode == COMPILATION_MODE.ALL_AT_ONCE:
msg = ''.join([
"Unable to parse files using ALL_AT_ONCE mode. ",
"There is some file configuration that is not file. ",
"pygccxml.parser.project_reader_t switches to ",
"FILE_BY_FILE mode."])
self.logger.warning(msg)
return self.__parse_file_by_file(files)
def __parse_file_by_file(self, files):
namespaces = []
config = self.__config.clone()
self.logger.debug("Reading project files: file by file")
for prj_file in files:
reader = None
header = None
content_type = None
if isinstance(prj_file, file_configuration_t):
del config.start_with_declarations[:]
config.start_with_declarations.extend(
prj_file.start_with_declarations)
header = prj_file.data
content_type = prj_file.content_type
else:
config = self.__config
header = prj_file
content_type = \
file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE
reader = source_reader.source_reader_t(
config,
self.__dcache,
self.__decl_factory)
decls = None
if content_type == \
file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE:
self.logger.info('Parsing source file "%s" ... ' % header)
decls = reader.read_file(header)
elif content_type == \
file_configuration_t.CONTENT_TYPE.GCCXML_GENERATED_FILE:
self.logger.info('Parsing xml file "%s" ... ' % header)
decls = reader.read_xml_file(header)
elif content_type == \
file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE:
# TODO: raise error when header file does not exist
if not os.path.exists(prj_file.cached_source_file):
dir_ = os.path.split(prj_file.cached_source_file)[0]
if dir_ and not os.path.exists(dir_):
os.makedirs(dir_)
self.logger.info(
'Creating xml file "%s" from source file "%s" ... ' %
(prj_file.cached_source_file, header))
reader.create_xml_file(header, prj_file.cached_source_file)
self.logger.info(
'Parsing xml file "%s" ... ' %
prj_file.cached_source_file)
decls = reader.read_xml_file(prj_file.cached_source_file)
else:
decls = reader.read_string(header)
namespaces.append(decls)
self.logger.debug("Flushing cache... ")
start_time = time.clock()
self.__dcache.flush()
self.logger.debug(
"Cache has been flushed in %.1f secs" %
(time.clock() - start_time))
answer = []
self.logger.debug("Joining namespaces ...")
for file_nss in namespaces:
answer = self._join_top_namespaces(answer, file_nss)
self.logger.debug("Joining declarations ...")
for ns in answer:
if isinstance(ns, pygccxml.declarations.namespace_t):
self._join_declarations(ns)
leaved_classes = self._join_class_hierarchy(answer)
types = self.__declarated_types(answer)
self.logger.debug("Relinking declared types ...")
self._relink_declarated_types(leaved_classes, types)
source_reader.bind_aliases(pygccxml.declarations.make_flatten(answer))
return answer
def __parse_all_at_once(self, files):
config = self.__config.clone()
self.logger.debug("Reading project files: all at once")
header_content = []
for header in files:
if isinstance(header, file_configuration_t):
del config.start_with_declarations[:]
config.start_with_declarations.extend(
header.start_with_declarations)
header_content.append(
'#include "%s" %s' %
(header.data, os.linesep))
else:
header_content.append(
'#include "%s" %s' %
(header, os.linesep))
return self.read_string(''.join(header_content))
def read_string(self, content):
"""Parse a string containing C/C++ source code.
:param content: C/C++ source code.
:type content: str
:rtype: Declarations
"""
reader = source_reader.source_reader_t(
self.__config,
None,
self.__decl_factory)
return reader.read_string(content)
def read_xml(self, file_configuration):
"""parses C++ code, defined on the file_configurations and returns
GCCXML generated file content"""
xml_file_path = None
delete_xml_file = True
fc = file_configuration
reader = source_reader.source_reader_t(
self.__config,
None,
self.__decl_factory)
try:
if fc.content_type == fc.CONTENT_TYPE.STANDARD_SOURCE_FILE:
self.logger.info('Parsing source file "%s" ... ' % fc.data)
xml_file_path = reader.create_xml_file(fc.data)
elif fc.content_type == \
file_configuration_t.CONTENT_TYPE.GCCXML_GENERATED_FILE:
self.logger.info('Parsing xml file "%s" ... ' % fc.data)
xml_file_path = fc.data
delete_xml_file = False
elif fc.content_type == fc.CONTENT_TYPE.CACHED_SOURCE_FILE:
# TODO: raise error when header file does not exist
if not os.path.exists(fc.cached_source_file):
dir_ = os.path.split(fc.cached_source_file)[0]
if dir_ and not os.path.exists(dir_):
os.makedirs(dir_)
self.logger.info(
'Creating xml file "%s" from source file "%s" ... ' %
(fc.cached_source_file, fc.data))
xml_file_path = reader.create_xml_file(
fc.data,
fc.cached_source_file)
else:
xml_file_path = fc.cached_source_file
else:
xml_file_path = reader.create_xml_file_from_string(fc.data)
xml_file = open(xml_file_path, 'r')
xml = xml_file.read()
xml_file.close()
utils.remove_file_no_raise(xml_file_path)
return xml
finally:
if xml_file_path and delete_xml_file:
utils.remove_file_no_raise(xml_file_path)
def _join_top_namespaces(self, main_ns_list, other_ns_list):
answer = main_ns_list[:]
for other_ns in other_ns_list:
main_ns = pygccxml.declarations.find_declaration(
answer,
type=pygccxml.declarations.namespace_t,
name=other_ns._name,
recursive=False)
if main_ns:
main_ns.take_parenting(other_ns)
else:
answer.append(other_ns)
return answer
def _join_namespaces(self, nsref):
assert isinstance(nsref, pygccxml.declarations.namespace_t)
# decl.__class__ : { decl.name : [decls] } double declaration hash
ddhash = {}
decls = []
for decl in nsref.declarations:
if decl.__class__ not in ddhash:
ddhash[decl.__class__] = {decl._name: [decl]}
decls.append(decl)
else:
joined_decls = ddhash[decl.__class__]
if decl._name not in joined_decls:
decls.append(decl)
joined_decls[decl._name] = [decl]
else:
if isinstance(decl, pygccxml.declarations.calldef_t):
if decl not in joined_decls[decl._name]:
# functions has overloading
decls.append(decl)
joined_decls[decl._name].append(decl)
elif isinstance(decl, pygccxml.declarations.enumeration_t):
# unnamed enums
if not decl.name and decl not in \
joined_decls[decl._name]:
decls.append(decl)
joined_decls[decl._name].append(decl)
elif isinstance(decl, pygccxml.declarations.class_t):
# unnamed classes
if not decl.name and decl not in \
joined_decls[decl._name]:
decls.append(decl)
joined_decls[decl._name].append(decl)
else:
assert 1 == len(joined_decls[decl._name])
if isinstance(decl, pygccxml.declarations.namespace_t):
joined_decls[decl._name][0].take_parenting(decl)
class_t = pygccxml.declarations.class_t
class_declaration_t = pygccxml.declarations.class_declaration_t
if class_t in ddhash and class_declaration_t in ddhash:
# if there is a class and its forward declaration - get rid of the
# second one.
class_names = set()
for name, same_name_classes in ddhash[class_t].items():
if not name:
continue
class_names.add(same_name_classes[0].mangled)
class_declarations = ddhash[class_declaration_t]
for name, same_name_class_declarations in \
class_declarations.items():
if not name:
continue
for class_declaration in same_name_class_declarations:
if class_declaration.mangled and \
class_declaration.mangled in class_names:
decls.remove(class_declaration)
nsref.declarations = decls
def _join_class_hierarchy(self, namespaces):
create_key = lambda decl: (
decl.location.as_tuple(),
tuple(pygccxml.declarations.declaration_path(decl)))
classes = [
decl for decl in pygccxml.declarations.make_flatten(namespaces)
if isinstance(decl, pygccxml.declarations.class_t)]
leaved_classes = {}
# selecting classes to leave
for class_ in classes:
key = create_key(class_)
if key not in leaved_classes:
leaved_classes[key] = class_
# replacing base and derived classes with those that should be leave
# also this loop will add missing derived classes to the base
for class_ in classes:
leaved_class = leaved_classes[create_key(class_)]
for base_info in class_.bases:
leaved_base = leaved_classes[
create_key(
base_info.related_class)]
# treating base class hierarchy of leaved_class
leaved_base_info = pygccxml.declarations.hierarchy_info_t(
related_class=leaved_base, access=base_info.access)
if leaved_base_info not in leaved_class.bases:
leaved_class.bases.append(leaved_base_info)
else:
index = leaved_class.bases.index(leaved_base_info)
leaved_class.bases[
index].related_class = leaved_base_info.related_class
# treating derived class hierarchy of leaved_base
leaved_derived_for_base_info = \
pygccxml.declarations.hierarchy_info_t(
related_class=leaved_class,
access=base_info.access)
if leaved_derived_for_base_info not in leaved_base.derived:
leaved_base.derived.append(leaved_derived_for_base_info)
else:
index = leaved_base.derived.index(
leaved_derived_for_base_info)
leaved_base.derived[index].related_class = \
leaved_derived_for_base_info.related_class
for derived_info in class_.derived:
leaved_derived = leaved_classes[
create_key(
derived_info.related_class)]
# treating derived class hierarchy of leaved_class
leaved_derived_info = pygccxml.declarations.hierarchy_info_t(
related_class=leaved_derived, access=derived_info.access)
if leaved_derived_info not in leaved_class.derived:
leaved_class.derived.append(leaved_derived_info)
# treating base class hierarchy of leaved_derived
leaved_base_for_derived_info = \
pygccxml.declarations.hierarchy_info_t(
related_class=leaved_class,
access=derived_info.access)
if leaved_base_for_derived_info not in leaved_derived.bases:
leaved_derived.bases.append(leaved_base_for_derived_info)
# this loops remove instance we from parent.declarations
for class_ in classes:
key = create_key(class_)
if id(leaved_classes[key]) == id(class_):
continue
else:
declarations = None
if class_.parent:
declarations = class_.parent.declarations
else:
# yes, we are talking about global class that doesn't
declarations = namespaces
# belong to any namespace. Usually is compiler generated
# top level classes
declarations_ids = [id(decl) for decl in declarations]
del declarations[declarations_ids.index(id(class_))]
return leaved_classes
def _relink_declarated_types(self, leaved_classes, declarated_types):
create_key = lambda decl: (
decl.location.as_tuple(),
tuple(
pygccxml.declarations.declaration_path(decl)))
create_mangled_key = lambda decl: (
decl.location.as_tuple(),
decl.mangled)
mangled_leaved_classes = {}
for cls in leaved_classes.values():
mangled_leaved_classes[create_mangled_key(cls)] = cls
for decl_wrapper_type in declarated_types:
# it is possible, that cache contains reference to dropped class
# We need to clear it
decl_wrapper_type.cache.reset()
if isinstance(
decl_wrapper_type.declaration,
pygccxml.declarations.class_t):
key = create_key(decl_wrapper_type.declaration)
if key in leaved_classes:
decl_wrapper_type.declaration = leaved_classes[key]
else:
if decl_wrapper_type.declaration._name.startswith(
'__vmi_class_type_info_pseudo'):
continue
msg = []
msg.append(
"Unable to find out actual class definition: '%s'." %
decl_wrapper_type.declaration._name)
msg.append((
"Class definition has been changed from one " +
"compilation to an other."))
msg.append((
"Why did it happen to me? Here is a short list " +
"of reasons: "))
msg.append((
" 1. There are different preprocessor " +
"definitions applied on same file during compilation"))
msg.append(" 2. Bug in pygccxml.")
self.logger.error(os.linesep.join(msg))
elif isinstance(
decl_wrapper_type.declaration,
pygccxml.declarations.class_declaration_t):
key = create_mangled_key(decl_wrapper_type.declaration)
if key in mangled_leaved_classes:
decl_wrapper_type.declaration = mangled_leaved_classes[key]
def _join_declarations(self, declref):
self._join_namespaces(declref)
for ns in declref.declarations:
if isinstance(ns, pygccxml.declarations.namespace_t):
self._join_declarations(ns)
def __declarated_types(self, namespaces):
def get_from_type(cpptype):
if not cpptype:
return []
elif isinstance(cpptype, pygccxml.declarations.fundamental_t):
return []
elif isinstance(cpptype, pygccxml.declarations.declarated_t):
return [cpptype]
elif isinstance(cpptype, pygccxml.declarations.compound_t):
return get_from_type(cpptype.base)
elif isinstance(cpptype, pygccxml.declarations.calldef_type_t):
types = get_from_type(cpptype.return_type)
for arg in cpptype.arguments_types:
types.extend(get_from_type(arg))
return types
else:
assert isinstance(
cpptype,
(pygccxml.declarations.unknown_t,
pygccxml.declarations.ellipsis_t))
return []
types = []
for decl in pygccxml.declarations.make_flatten(namespaces):
if isinstance(decl, pygccxml.declarations.calldef_t):
types.extend(get_from_type(decl.function_type()))
elif isinstance(
decl,
(pygccxml.declarations.typedef_t,
pygccxml.declarations.variable_t)):
types.extend(get_from_type(decl.type))
return types
| apache-2.0 |
PYPIT/PYPIT | pypeit/data/arc_lines/generate.py | 1 | 5420 | """
Module for generating Arc Line lists
Should be run where it is located (for now)
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, imp, glob, pdb, gzip
import subprocess
from astropy import units as u
from astropy.units.quantity import Quantity
from astropy import constants as const
from astropy.io import fits, ascii
from astropy.table import QTable, Column, Table
# def line_data
# def mk_neon
# TODO
#
def line_data(nrows=1):
''' Defines the dict for arc line Data
Parameters:
----------
nrows: int, optional
Number of rows in Table [default = 1]
'''
aldict = {
'name': ' '*20, # Name
'wave': 0.*u.AA, # Wavelength (Quantity) :: NIST convention (air for >200nm)
'f': 0., # Oscillator strength
# 'gk': 0., # Degeneracy of the upper level
# 'Ej': 0./u.cm, # Energy of lower level (relative to ground state)
# 'Ek': 0./u.cm, # Energy of upper level (relative to ground state)
# 'Ex': 0./u.cm, # Excitation energy (cm^-1)
# 'A': 0./u.s, # Einstein coefficient
# 'gj': 0, # Lower statistical weight (2J+1)
# 'gk': 0, # Upper statistical weight (2J+1)
# 'gamma': 0./u.s, # Sum of A
# 'nj': 0, # Orbital level of lower state (or vibrational level)
# 'nk': 0, # Orbital level of upper state (or vibrational level)
# 'Jj': 0., # Tot ang mom (z projection) of lower state (or rotation level)
# 'Jk': 0., # Tot ang mom (z projection) of upper state (or rotation level)
# 'el': 0, # Electronic transition (2=Lyman (B-X), 3=Werner (C-X))
'Z': 0, # Atomic number (for atoms)
'Am': 0, # Mass number (often written as "A"; only used for D)
'ion': 0, # Ionic state (1=Neutral)
# 'mol': ' '*10, # Molecular name (H2, HD, CO, C13O)
'intensity': 0., # Intensity -- Usually taken from NIST
'f_an': 0, # Flag for analysis (-1=NG, 0=OK, 1=GOOD)
'f_bl': 0, # Flag for blending (0=UNKNOWN,1=GOOD,2=SELF,4=OTHER)
'f_st': 0, # Flag for brightness (0=UNKNOWN,1=POOR,2=WEAK,3=FAIR,4=GOOD)
'Ref': ' '*50, # References
'Comment': ' '*50, # Comment
'group': 0 # Flag for grouping
}
# Table
clms = []
for key in aldict.keys():
if type(aldict[key]) is Quantity:
clm = Column( ([aldict[key].value]*nrows)*aldict[key].unit, name=key)
else:
clm = Column( [aldict[key]]*nrows, name=key)
# Append
clms.append(clm)
tbl = Table(clms)
tbl = tbl[('name','wave','f','Z','Am','ion','intensity','f_an','f_bl',
'f_st','Ref','Comment','group')]
return aldict, tbl
#
def mk_neon():
'''Generate Ne line list from spec2d (DEIMOS) NIST list
Restricting to >5500A (for now)
Those values are in Air, converted from Vacuum as given by NIST
Note that these differ (slightly) from the values in the spec2d NIST blue list
'''
outfil = 'Ne_air_linelist.dat'
# Read spec2d file
f = open('spec2d_lamp_NIST.dat','r')
lines = f.readlines()
f.close()
# Generate table to fill
_, ne_table = line_data(500)
# Loop
cnt = 0
for line in lines:
# Comment
if line[0] == '#':
continue
# Search for Ne
if 'Ne' not in line[22:28]:
continue
wave = float(line[0:11])
if wave < 5500.:
continue
# Fill
ne_table['wave'][cnt] = wave
ne_table['intensity'][cnt] = float(line[11:17])
# Quality
qual = line[17:23].strip()
if qual == 'BLEND':
ne_table['f_bl'][cnt] = 2
elif qual == 'GOOD':
ne_table['f_bl'][cnt] = 1
ne_table['f_st'][cnt] = 4
ne_table['f_an'][cnt] = 1
elif qual == 'FAIR':
ne_table['f_bl'][cnt] = 1
ne_table['f_st'][cnt] = 3
elif qual in ['JUNK','BAD']:
ne_table['f_an'][cnt] = -1
else:
pdb.set_trace()
# Comment
if len(line) > 26:
ne_table['Comment'][cnt] = line[27:].strip()
else:
ne_table['Comment'][cnt] = ' '
# Last bits
ne_table['Z'][cnt] = 10
ne_table['Ref'][cnt] = 'NIST,spec2d'
ne_table['name'][cnt] = 'Ne {:.2f}'.format(wave)
# Increment
cnt += 1
# Cut
ne_table = ne_table[0:cnt]
# Write to file
subt = ne_table[['name','wave','f_an','f_bl','f_st','intensity','Ref','Comment']]
subt.write(outfil, delimiter='|',format='ascii.fixed_width')
print('Wrote Ne line list: {:s}'.format(outfil))
#
def roman_to_number(val):
'''Convert simple Roman numerals to Arabic
Parameters:
-------------
val: str or unicoce
Roman numeral for conversion
Returns:
------------
Number
'''
r_to_n = dict(I=1, II=2, III=3, IV=4, V=5, VI=6,
VII=7, VIII=8, IX=9, X=10)
try:
num = r_to_n[val.strip()]
except KeyError:
print(val)
pdb.set_trace()
return num
###########################################
| gpl-3.0 |
kitsunde/ansible | lib/ansible/template/vars.py | 45 | 3455 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.utils import missing
__all__ = ['AnsibleJ2Vars']
class AnsibleJ2Vars:
'''
Helper class to template all variable content before jinja2 sees it. This is
done by hijacking the variable storage that jinja2 uses, and overriding __contains__
and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
hashes that inject tends to be.
To facilitate using builtin jinja2 things like range, globals are also handled here.
'''
def __init__(self, templar, globals, locals=None, *extras):
'''
Initializes this object with a valid Templar() object, as
well as several dictionaries of variables representing
different scopes (in jinja2 terminology).
'''
self._templar = templar
self._globals = globals
self._extras = extras
self._locals = dict()
if isinstance(locals, dict):
for key, val in locals.iteritems():
if key[:2] == 'l_' and val is not missing:
self._locals[key[2:]] = val
def __contains__(self, k):
if k in self._templar._available_variables:
return True
if k in self._locals:
return True
for i in self._extras:
if k in i:
return True
if k in self._globals:
return True
return False
def __getitem__(self, varname):
if varname not in self._templar._available_variables:
if varname in self._locals:
return self._locals[varname]
for i in self._extras:
if varname in i:
return i[varname]
if varname in self._globals:
return self._globals[varname]
else:
raise KeyError("undefined variable: %s" % varname)
variable = self._templar._available_variables[varname]
# HostVars is special, return it as-is, as is the special variable
# 'vars', which contains the vars structure
from ansible.vars.hostvars import HostVars
if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars):
return variable
else:
return self._templar.template(variable)
def add_locals(self, locals):
'''
If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy.
'''
if locals is None:
return self
return AnsibleJ2Vars(self._templar, self._globals, locals=locals, *self._extras)
| gpl-3.0 |
maxdeliso/elevatorSim | Lib/sunau.py | 55 | 16822 | """Stuff to parse Sun and NeXT audio files.
An audio file consists of a header followed by the data. The structure
of the header is as follows.
+---------------+
| magic word |
+---------------+
| header size |
+---------------+
| data size |
+---------------+
| encoding |
+---------------+
| sample rate |
+---------------+
| # of channels |
+---------------+
| info |
| |
+---------------+
The magic word consists of the 4 characters '.snd'. Apart from the
info field, all header fields are 4 bytes in size. They are all
32-bit unsigned integers encoded in big-endian byte order.
The header size really gives the start of the data.
The data size is the physical size of the data. From the other
parameters the number of frames can be calculated.
The encoding gives the way in which audio samples are encoded.
Possible values are listed below.
The info field currently consists of an ASCII string giving a
human-readable description of the audio file. The info field is
padded with NUL bytes to the header size.
Usage.
Reading audio files:
f = sunau.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' or 'ULAW')
getcompname() -- returns human-readable version of
compression type ('not compressed' matches 'NONE')
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing audio files:
f = sunau.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
# from <multimedia/audio_filehdr.h>
AUDIO_FILE_MAGIC = 0x2e736e64
AUDIO_FILE_ENCODING_MULAW_8 = 1
AUDIO_FILE_ENCODING_LINEAR_8 = 2
AUDIO_FILE_ENCODING_LINEAR_16 = 3
AUDIO_FILE_ENCODING_LINEAR_24 = 4
AUDIO_FILE_ENCODING_LINEAR_32 = 5
AUDIO_FILE_ENCODING_FLOAT = 6
AUDIO_FILE_ENCODING_DOUBLE = 7
AUDIO_FILE_ENCODING_ADPCM_G721 = 23
AUDIO_FILE_ENCODING_ADPCM_G722 = 24
AUDIO_FILE_ENCODING_ADPCM_G723_3 = 25
AUDIO_FILE_ENCODING_ADPCM_G723_5 = 26
AUDIO_FILE_ENCODING_ALAW_8 = 27
# from <multimedia/audio_hdr.h>
AUDIO_UNKNOWN_SIZE = 0xFFFFFFFF # ((unsigned)(~0))
_simple_encodings = [AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_LINEAR_8,
AUDIO_FILE_ENCODING_LINEAR_16,
AUDIO_FILE_ENCODING_LINEAR_24,
AUDIO_FILE_ENCODING_LINEAR_32,
AUDIO_FILE_ENCODING_ALAW_8]
class Error(Exception):
pass
def _read_u32(file):
x = 0
for i in range(4):
byte = file.read(1)
if not byte:
raise EOFError
x = x*256 + ord(byte)
return x
def _write_u32(file, x):
data = []
for i in range(4):
d, m = divmod(x, 256)
data.insert(0, int(m))
x = d
file.write(bytes(data))
class Au_read:
def __init__(self, f):
if type(f) == type(''):
import builtins
f = builtins.open(f, 'rb')
self._opened = True
else:
self._opened = False
self.initfp(f)
def __del__(self):
if self._file:
self.close()
def initfp(self, file):
self._file = file
self._soundpos = 0
magic = int(_read_u32(file))
if magic != AUDIO_FILE_MAGIC:
raise Error('bad magic number')
self._hdr_size = int(_read_u32(file))
if self._hdr_size < 24:
raise Error('header size too small')
if self._hdr_size > 100:
raise Error('header size ridiculously large')
self._data_size = _read_u32(file)
if self._data_size != AUDIO_UNKNOWN_SIZE:
self._data_size = int(self._data_size)
self._encoding = int(_read_u32(file))
if self._encoding not in _simple_encodings:
raise Error('encoding not (yet) supported')
if self._encoding in (AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_ALAW_8):
self._sampwidth = 2
self._framesize = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_8:
self._framesize = self._sampwidth = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_16:
self._framesize = self._sampwidth = 2
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_24:
self._framesize = self._sampwidth = 3
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_32:
self._framesize = self._sampwidth = 4
else:
raise Error('unknown encoding')
self._framerate = int(_read_u32(file))
self._nchannels = int(_read_u32(file))
self._framesize = self._framesize * self._nchannels
if self._hdr_size > 24:
self._info = file.read(self._hdr_size - 24)
for i in range(len(self._info)):
if self._info[i] == b'\0':
self._info = self._info[:i]
break
else:
self._info = ''
def getfp(self):
return self._file
def getnchannels(self):
return self._nchannels
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getnframes(self):
if self._data_size == AUDIO_UNKNOWN_SIZE:
return AUDIO_UNKNOWN_SIZE
if self._encoding in _simple_encodings:
return self._data_size / self._framesize
return 0 # XXX--must do some arithmetic here
def getcomptype(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'ULAW'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'ALAW'
else:
return 'NONE'
def getcompname(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'CCITT G.711 u-law'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error('no marks')
def readframes(self, nframes):
if self._encoding in _simple_encodings:
if nframes == AUDIO_UNKNOWN_SIZE:
data = self._file.read()
else:
data = self._file.read(nframes * self._framesize * self._nchannels)
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
import audioop
data = audioop.ulaw2lin(data, self._sampwidth)
return data
return None # XXX--not implemented yet
def rewind(self):
self._soundpos = 0
self._file.seek(self._hdr_size)
def tell(self):
return self._soundpos
def setpos(self, pos):
if pos < 0 or pos > self.getnframes():
raise Error('position not in range')
self._file.seek(pos * self._framesize + self._hdr_size)
self._soundpos = pos
def close(self):
if self._opened and self._file:
self._file.close()
self._file = None
class Au_write:
def __init__(self, f):
if type(f) == type(''):
import builtins
f = builtins.open(f, 'wb')
self._opened = True
else:
self._opened = False
self.initfp(f)
def __del__(self):
if self._file:
self.close()
self._file = None
def initfp(self, file):
self._file = file
self._framerate = 0
self._nchannels = 0
self._sampwidth = 0
self._framesize = 0
self._nframes = AUDIO_UNKNOWN_SIZE
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._info = b''
self._comptype = 'ULAW' # default is U-law
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if nchannels not in (1, 2, 4):
raise Error('only 1, 2, or 4 channels supported')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if sampwidth not in (1, 2, 4):
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._framerate:
raise Error('sample width not specified')
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if nframes < 0:
raise Error('# of frames cannot be negative')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, type, name):
if type in ('NONE', 'ULAW'):
self._comptype = type
else:
raise Error('unknown compression type')
def getcomptype(self):
return self._comptype
def getcompname(self):
if self._comptype == 'ULAW':
return 'CCITT G.711 u-law'
elif self._comptype == 'ALAW':
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written()
nframes = len(data) / self._framesize
if self._comptype == 'ULAW':
import audioop
data = audioop.lin2ulaw(data, self._sampwidth)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
if self._opened and self._file:
self._file.close()
self._file = None
#
# private methods
#
def _ensure_header_written(self):
if not self._nframeswritten:
if not self._nchannels:
raise Error('# of channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('frame rate not specified')
self._write_header()
def _write_header(self):
if self._comptype == 'NONE':
if self._sampwidth == 1:
encoding = AUDIO_FILE_ENCODING_LINEAR_8
self._framesize = 1
elif self._sampwidth == 2:
encoding = AUDIO_FILE_ENCODING_LINEAR_16
self._framesize = 2
elif self._sampwidth == 4:
encoding = AUDIO_FILE_ENCODING_LINEAR_32
self._framesize = 4
else:
raise Error('internal error')
elif self._comptype == 'ULAW':
encoding = AUDIO_FILE_ENCODING_MULAW_8
self._framesize = 1
else:
raise Error('internal error')
self._framesize = self._framesize * self._nchannels
_write_u32(self._file, AUDIO_FILE_MAGIC)
header_size = 25 + len(self._info)
header_size = (header_size + 7) & ~7
_write_u32(self._file, header_size)
if self._nframes == AUDIO_UNKNOWN_SIZE:
length = AUDIO_UNKNOWN_SIZE
else:
length = self._nframes * self._framesize
_write_u32(self._file, length)
self._datalength = length
_write_u32(self._file, encoding)
_write_u32(self._file, self._framerate)
_write_u32(self._file, self._nchannels)
self._file.write(self._info)
self._file.write(b'\0'*(header_size - len(self._info) - 24))
def _patchheader(self):
self._file.seek(8)
_write_u32(self._file, self._datawritten)
self._datalength = self._datawritten
self._file.seek(0, 2)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Au_read(f)
elif mode in ('w', 'wb'):
return Au_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
openfp = open
| bsd-2-clause |
kevthehermit/VolUtility | volgui/settings.py | 1 | 4765 | """
Django settings for volgui project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from django.utils.crypto import get_random_string
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Generate a new key if none exists
# https://stackoverflow.com/questions/4664724/distributing-django-projects-with-unique-secret-keys
try:
from secret_key import *
except ImportError:
with open('secret_key.py', 'w') as out:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
out.write("SECRET_KEY = '{0}'".format(get_random_string(50, chars)))
from secret_key import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'web'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'volgui.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web/templates'),
os.path.join(BASE_DIR, 'web/templates/modals'),
os.path.join(BASE_DIR, 'web/templates/sections'),
os.path.join(BASE_DIR, 'extensions/')
]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'volgui.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'voladmin',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'VolUtility.log',
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'propagate': True,
'level': 'ERROR',
},
'web': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
},
}
} | gpl-3.0 |
Anlim/decode-Django | Django-1.5.1/django/utils/unittest/runner.py | 571 | 6761 | """Running tests"""
import sys
import time
import unittest
from django.utils.unittest import result
try:
from django.utils.unittest.signals import registerResult
except ImportError:
def registerResult(_):
pass
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped %r" % (reason,))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
def stopTestRun(self):
super(TextTestResult, self).stopTestRun()
self.printErrors()
class TextTestRunner(unittest.TextTestRunner):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
result.failfast = self.failfast
result.buffer = self.buffer
registerResult(result)
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
else:
result.printErrors()
stopTime = time.time()
timeTaken = stopTime - startTime
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
except AttributeError:
pass
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| gpl-2.0 |
youprofit/servo | tests/wpt/css-tests/css-fonts-3_dev/xhtml1print/reference/support/fonts/makegsubfonts.py | 820 | 14309 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData() | mpl-2.0 |
jeroendierckx/Camelot | camelot/admin/object_admin.py | 1 | 35970 | # ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Admin class for Plain Old Python Object"""
import logging
logger = logging.getLogger('camelot.view.object_admin')
from camelot.admin.action.form_action import CloseForm
from camelot.view.model_thread import model_function
from camelot.view.controls.tableview import TableView
from camelot.view.utils import to_string
from camelot.core.utils import ugettext_lazy, ugettext as _
from camelot.view.proxy.collection_proxy import CollectionProxy
from validator.object_validator import ObjectValidator
from PyQt4 import QtCore
class FieldAttributesList(list):
"""A list with field attributes that documents them for
sphinx"""
def __init__(self, original_list):
""":param original_list: the list with field attributes
to document"""
super(FieldAttributesList, self).__init__(original_list)
template = "\n * :ref:`%s <field-attribute-%s>`"
doc = '\n'.join([template%(name, name) for name in original_list])
self.__doc__ = doc
DYNAMIC_FIELD_ATTRIBUTES = FieldAttributesList(['tooltip', 'color', 'background_color',
'editable', 'choices',
'prefix', 'suffix', 'arrow',
'new_message',
'precision'])
class ObjectAdmin(object):
"""The ObjectAdmin class describes the interface that will be used
to interact with objects of a certain class. The behaviour of this class
and the resulting interface can be tuned by specifying specific class
attributes:
**The name used in the GUI**
The name used in the GUI for things like window titles and such can
be specified using the verbose_name attribute.
.. attribute:: verbose_name
A human-readable name for the object, singular ::
verbose_name = _('movie')
If this isn't given, the class name will be used
.. attribute:: verbose_name_plural
A human-readable name for the object, plural ::
verbose_name_plural = _('movies')
If this isn't given, Camelot will use verbose_name + "s"
**Fields displayed**
.. attribute:: list_display
a list with the fields that should be displayed in a table view
.. attribute:: lines_per_row
An integer number specifying the height of a row in the table view, expressed
as the number of lines of text it should be able to display. Defaults to 1.
.. attribute:: form_display
a list with the fields that should be displayed in a form view, defaults to
the same fields as those specified in list_display ::
class Admin(EntityAdmin):
form_display = ['title', 'rating', 'cover']
instead of telling which fields to display. It is also possible to define
the form itself ::
from camelot.view.forms import Form, TabForm, WidgetOnlyForm, HBoxForm
class Admin(EntityAdmin):
form_display = TabForm([
('Movie', Form([
HBoxForm([['title', 'rating'], WidgetOnlyForm('cover')]),
'short_description',
'releasedate',
'director',
'script',
'genre',
'description', 'tags'], scrollbars=True)),
('Cast', WidgetOnlyForm('cast'))
])
**Behaviour**
.. attribute:: form_close_action
The action triggered when the form window is closed by the operating system or the window manager. By default this is the
:class:`camelot.admin.action.form_action.CloseForm` action, which validates
the form and allows the user to discard the changes when the form is invalid. To change the form close action in the
toolbar, the :meth:`camelot.admin.object_admin.ObjectAdmin.get_form_actions` method should be overwritten.
.. attribute:: save_mode
Specifies when the data should be send from the view to the model and flushed
to the database. The default mode is 'on_change', meaning that every change
in the view will be send immediately to the database. Other possibilities are :
* 'on_leave' : the data will be send from the view to the model when the view
is closed, eg. : the form is closed.
.. attribute:: delete_mode
Indicates if the deletion of an object should be confirmed by the user, defaults
to 'on_request', indicating object should be deleted when the user hits the trash
button. Other possibilities are :
* 'on_confirm' : the user will be asked for confirmation before the delete
takes place.
.. attribute:: form_size
a tuple indicating the size of a form view, defaults to (700,500)
.. attribute:: form_actions
Actions to be accessible by pushbuttons on the side of a form, a list of :class:`camelot.admin.action.base.Action` objects. ::
class Admin( EntityAdmin ):
form_actions = [CloseForm()]
These actions will be triggered with a :class:`camelot.admin.action.form_action.FormActionModelContext` as the `model_context` parameter
in the :meth:`camelot.admin.action.base.Action.model_run` method.
.. attribute:: related_toolbar_actions
list of actions that appear in the toolbar of a `OneToMany` editor.
.. attribute:: drop_action
the action that is triggered when a drag and drop occured on the table
view
**Field attributes**
.. attribute:: field_attributes
A dictionary specifying for each field of the model some additional
attributes on how they should be displayed. All of these attributes
are propagated to the constructor of the delegate of this field::
class Movie( Entity ):
title = Column( Unicode(50) )
class Admin( EntityAdmin ):
list_display = ['title']
field_attributes = { 'title' : {'editable':False} }
The :ref:`doc-admin-field_attributes` documentation describes the various keys
that can be used in the field attributes class attribute of an ObjectAdmin
or EntityAdmin.
**Window state**
.. attribute:: form_state
Set this attribute to `maximized` or `minimized` for respective behaviour ::
class Movie( Entity ):
title = Column( Unicode(50) )
class Admin( EntityAdmin ):
list_display = ['title']
form_state = 'maximized'
**Varia**
.. attribute:: name
The name of the group in settings in which user dependent settings will
be stored, defaults to the class name for which this Admin class is used.
.. attribute:: model
The QAbstractItemModel class to be used to display collections of this object,
defaults to a CollectionProxy
.. attribute:: TableView
The QWidget class to be used when a table view is needed
"""
name = None
verbose_name = None
verbose_name_plural = None
list_display = []
lines_per_row = 1
validator = ObjectValidator
model = CollectionProxy
fields = []
form_display = []
form_close_action = CloseForm()
list_filter = []
list_actions = []
list_size = (600, 600)
form_size = (700, 500)
form_actions = []
related_toolbar_actions = []
field_attributes = {}
form_state = None
icon = None # Default
#
# Behavioral attributes
#
drop_action = None
save_mode = 'on_edit'
delete_mode = 'on_request'
TableView = TableView
def __init__( self, app_admin, entity ):
"""
:param app_admin: the application admin object for this application,
if None, then the default application_admin is taken
:param entity: the entity class for which this admin instance is to be
used
"""
if not app_admin:
from camelot.admin.application_admin import get_application_admin
self.app_admin = get_application_admin()
else:
self.app_admin = app_admin
if entity:
self.entity = entity
#
# caches to prevent recalculation of things
#
self._field_attributes = dict()
self._subclasses = None
def __str__(self):
return 'Admin %s' % str(self.entity.__name__)
def __repr__(self):
return 'ObjectAdmin(%s)' % str(self.entity.__name__)
def get_name(self):
""" The name of the group in settings in which user dependent settings
will be stored, this is either the `name` attribute of this class or,
the class name of the class for which this Admin class is used.
:return: a string with the name of the settings group
"""
return self.name or self.entity.__name__
def get_verbose_name(self):
# def uncamelize(text):
# def downcase(matchobj):
# return "_" + matchobj.group(0).lower()
# if text:
# text = text[0].lower() + re.sub(r'([A-Z])', downcase, text[1:])
# return text
return unicode(
self.verbose_name or _(self.entity.__name__.capitalize())
)
def get_verbose_name_plural(self):
return unicode(
self.verbose_name_plural
or (self.get_verbose_name() + 's')
)
def get_icon(self):
return self.icon
@model_function
def get_verbose_identifier(self, obj):
"""Create an identifier for an object that is interpretable
for the user, eg : the primary key of an object. This verbose identifier can
be used to generate a title for a form view of an object.
"""
return u'%s : %s' % (self.get_verbose_name(), unicode(obj))
def get_entity_admin(self, entity):
return self.app_admin.get_entity_admin(entity)
def get_save_mode(self):
return self.save_mode
def get_settings( self ):
"""A settings object in which settings related to this admin can be
stored.
:return: a :class:`QtCore.QSettings` object
"""
settings = self.app_admin.get_settings()
settings.beginGroup( self.get_name()[:255] )
return settings
def get_memento( self ):
return self.app_admin.get_memento()
def get_delete_mode(self):
return self.delete_mode
def get_delete_message(self, obj):
return _('Are you sure you want to delete this')
@model_function
def get_form_actions( self, obj ):
"""Specify the list of action buttons that should appear on the side
of the form view.
:param obj: the object displayed in the form
:return: a list of :class:`camelot.admin.action.base.Action` objects
"""
app_admin = self.get_application_admin()
from camelot.admin.action.form_action import structure_to_form_actions
return app_admin.get_form_actions() + structure_to_form_actions( self.form_actions )
@model_function
def get_form_toolbar_actions( self, toolbar_area ):
"""
By default this function will return the same as :meth:`camelot.admin.application_admin.ApplicationAdmin.get_form_toolbar_actions`
:param toolbar_area: an instance of :class:`Qt.ToolBarArea` indicating
where the toolbar actions will be positioned
:return: a list of :class:`camelot.admin.action.base.Action` objects
that should be displayed on the toolbar of a form view. return
None if no toolbar should be created.
"""
app_admin = self.get_application_admin()
return app_admin.get_form_toolbar_actions( toolbar_area )
def get_related_toolbar_actions( self, toolbar_area, direction ):
"""Specify the toolbar actions that should appear in a OneToMany editor.
:param toolbar_area: the position of the toolbar
:param direction: the direction of the relation : 'onetomany' or
'manytomany'
:return: a list of :class:`camelot.admin.action.base.Action` objects
"""
app_admin = self.get_application_admin()
return self.related_toolbar_actions or \
app_admin.get_related_toolbar_actions( toolbar_area, direction )
@model_function
def get_list_actions(self):
return self.list_actions
@model_function
def get_depending_objects(self, obj):
"""Overwrite this function to generate a list of objects that depend on a given
object. When obj is modified by the user, this function will be called to determine
which other objects need their views updated.
:param obj: an object of the type that is managed by this admin class
:return: an iterator over objects that depend on obj
"""
return []
@model_function
def get_compounding_objects(self, obj):
"""Overwrite this function to generate a list of objects out of which
`obj` is build. These objects will be validated if `obj` is
validated. The effect of returning compounding objects will be :
* `obj` will only be valid if all its compounding object
are valid as well.
* default values will be set for the attributes of the compounding
objects
* when an object is expired or refreshed, all its compounding objects
will be expired and refreshed as well
"""
return []
@model_function
def get_subclass_tree( self ):
"""Get a tree of admin classes representing the subclasses of the class
represented by this admin class
:return: [(subclass_admin, [(subsubclass_admin, [...]),...]),...]
"""
subclasses = []
for subclass in self.entity.__subclasses__():
subclass_admin = self.get_related_admin( subclass )
if subclass_admin!=self:
subclasses.append((
subclass_admin,
subclass_admin.get_subclass_tree()
))
def sort_admins(a1, a2):
return cmp(a1[0].get_verbose_name_plural(), a2[0].get_verbose_name_plural())
subclasses.sort(cmp=sort_admins)
return subclasses
def get_related_admin(self, cls):
"""Get an admin object for another object class. Taking into account
preferences of this admin object or for those of admin object higher up
the chain such as the application admin object.
:param cls: the class for which an admin object is requested
"""
if cls == self.entity:
return self
related_admin = self.app_admin.get_entity_admin(cls)
if not related_admin:
logger.warn('no related admin found for %s' % (cls.__name__))
return related_admin
def get_static_field_attributes(self, field_names):
"""
Convenience function to get all the field attributes
that are static (don't depend on the object being visualized). This
method is only called once for a table or form view, independent of
the number of objects/records being visualized.
:param field_names: a list of field names
:return: [{field_attribute_name:field_attribute_value, ...}, {}, ...]
The returned list has the same order than the requested
field_names.
"""
for field_name in field_names:
field_attributes = self.get_field_attributes(field_name)
static_field_attributes = {}
for name, value in field_attributes.items():
if name not in DYNAMIC_FIELD_ATTRIBUTES or not callable(value):
static_field_attributes[name] = value
yield static_field_attributes
def get_dynamic_field_attributes(self, obj, field_names):
"""
Convenience function to get all the field attributes
that are dynamic (depend on the object being visualized). This method
is called once for each object/row in a table view and once for
each object being visualized in a form view.
:param field_names: a list of field names
:param obj: the object at the row for which to get the values of the dynamic field attributes
:return: [{field_attribute_name:field_attribute_value, ...}, {}, ...]
The returned list has the same order than the requested
field_names. A reimplementation of this method can look like::
def get_dynamic_field_attributes(self, obj, field_names):
for field_attributes in super( MyAdmin, self ).get_dynamic_field_attributes(obj, field_names):
if obj.status == 'finished':
field_attributes['editable'] = True
else:
field_attributes['editable'] = False
yield field_attributes
"""
for field_name in field_names:
field_attributes = self.get_field_attributes(field_name)
dynamic_field_attributes = {}
for name, value in field_attributes.items():
if name not in DYNAMIC_FIELD_ATTRIBUTES:
continue
if name in ('default',):
# the default value of a field is not needed in the GUI,
# and the continuous evaluation of it might be expensive,
# as it might be the max of a column
continue
if callable(value):
return_value = None
try:
return_value = value(obj)
except (ValueError, Exception, RuntimeError, TypeError, NameError), exc:
logger.error(u'error in field_attribute function of %s'%name, exc_info=exc)
finally:
dynamic_field_attributes[name] = return_value
yield dynamic_field_attributes
def get_field_attributes(self, field_name):
"""
Get the attributes needed to visualize the field field_name. This
function is called by get_static_field_attributes and
get_dynamic_field_attributes.
This function first tries to fill the dictionary with field
attributes for a field with those gathered through introspection,
and then updates them with those found in the field_attributes
class attribute.
:param field_name: the name of the field
:return: a dictionary of attributes needed to visualize the field
The values of the returned dictionary either contain the value
of the field attribute, or in the case of dynamic field attributes,
a function that returns the value of the field attribute.
"""
#
# @todo : this function should return a frozen dictionary, so no
# other parts of the application can modify the cached field
# attributes
#
try:
return self._field_attributes[field_name]
except KeyError:
def create_default_getter(field_name):
return lambda o:getattr(o, field_name)
from camelot.view.controls import delegates
#
# Default attributes for all fields
#
attributes = dict(
getter=create_default_getter(field_name),
to_string = to_string,
field_name=field_name,
python_type=str,
length=None,
tooltip=None,
background_color=None,
#minimal_column_width=12,
editable=False,
nullable=True,
widget='str',
blank=True,
delegate=delegates.PlainTextDelegate,
validator_list=[],
name=ugettext_lazy(field_name.replace( '_', ' ' ).capitalize())
)
#
# Field attributes forced by the field_attributes property
#
forced_attributes = {}
try:
forced_attributes = self.field_attributes[field_name]
except KeyError:
pass
#
# TODO : move part of logic from entity admin class over here
#
#
# Overrule introspected field_attributes with those defined
#
attributes.update(forced_attributes)
#
# In case of a 'target' field attribute, instantiate an appropriate
# 'admin' attribute
#
def get_entity_admin(target):
"""Helper function that instantiated an Admin object for a
target entity class
:param target: an entity class for which an Admin object is
needed
"""
try:
fa = self.field_attributes[field_name]
target = fa.get('target', target)
admin_class = fa['admin']
return admin_class(self.app_admin, target)
except KeyError:
return self.get_related_admin(target)
if 'target' in attributes:
attributes['admin'] = get_entity_admin(attributes['target'])
self._field_attributes[field_name] = attributes
return attributes
def get_table( self ):
"""The definition of the table to be used in a list view
:return: a `camelot.admin.table.Table` object
"""
from camelot.admin.table import structure_to_table
table = structure_to_table( self.list_display )
return table
@model_function
def get_columns(self):
"""
The columns to be displayed in the list view, returns a list of pairs
of the name of the field and its attributes needed to display it
properly
@return: [(field_name,
{'widget': widget_type,
'editable': True or False,
'blank': True or False,
'validator_list':[...],
'name':'Field name'}),
...]
"""
table = self.get_table()
return [(field, self.get_field_attributes(field))
for field in table.get_fields() ]
def get_validator( self, model = None, initial_validation = False ):
"""Get a validator object
:return: a :class:`camelot.admin.validator.object_validator.Validator`
"""
return self.validator( self,
model = model,
initial_validation = initial_validation )
@model_function
def get_fields(self):
fields = self.get_form_display().get_fields()
fields_and_attributes = [
(field, self.get_field_attributes(field))
for field in fields
]
return fields_and_attributes
def get_application_admin( self ):
"""Provide access to the :class:`ApplicationAdmin`
:return: the :class:`camelot.admin.application_admin.ApplicationAdmin`
object for the application.
"""
return self.app_admin.get_application_admin()
@model_function
def get_all_fields_and_attributes(self):
"""A dictionary of (field_name:field_attributes) for all fields that can
possibly appear in a list or a form or for which field attributes have
been defined
"""
fields = dict(self.get_columns())
fields.update(dict(self.get_fields()))
return fields
@model_function
def get_form_display(self):
from camelot.view.forms import Form, structure_to_form
if self.form_display:
return structure_to_form(self.form_display)
if self.list_display:
return Form( self.get_table().get_fields() )
return Form([])
def _apply_form_state(self, widget):
"""apply the consequences of the form_state class attribute
to a widget"""
if hasattr(self, 'form_state'):
from camelot.core import constants
if self.form_state == constants.MAXIMIZED:
widget.setWindowState(QtCore.Qt.WindowMaximized)
if self.form_state == constants.MINIMIZED:
widget.setWindowState(QtCore.Qt.WindowMinimized)
def create_form_view(self, title, model, index, parent=None):
"""Creates a Qt widget containing a form view, for a specific index in
a model. Use this method to create a form view for a collection of objects,
the user will be able to use :kbd:`PgUp`/:kbd:`PgDown` to move to
the next object.
:param title: the title of the form view
:param model: the data model to be used to fill the form view
:param index: which row in the data model to display
:param parent: the parent widget for the form
"""
logger.debug('creating form view for index %s' % index)
from camelot.view.controls.formview import FormView
form = FormView(title, self, model, index)
self._apply_form_state( form )
return form
def set_defaults(self, object_instance, include_nullable_fields=True):
"""Set the defaults of an object
:param include_nullable_fields: also set defaults for nullable fields,
depending on the context, this should be set to False to allow the user
to set the field to None
"""
from sqlalchemy.schema import ColumnDefault
if self.is_deleted( object_instance ):
return False
for field, attributes in self.get_fields():
has_default = False
try:
default = attributes['default']
has_default = True
except KeyError:
pass
if has_default:
#
# prevent the setting of a default value when one has been
# set already
#
value = attributes['getter'](object_instance)
if value not in (None, []):
# False is a legitimate value for Booleans, but a
# one-to-many field might have a default value as well
continue
if isinstance(default, ColumnDefault):
if default.is_scalar:
# avoid trip to database
default_value = default.arg
else:
default_value = default.execute()
elif callable(default):
import inspect
args, _varargs, _kwargs, _defs = \
inspect.getargspec(default)
if len(args):
default_value = default(object_instance)
else:
default_value = default()
else:
default_value = default
logger.debug(
'set default for %s to %s' % (
field,
unicode(default_value)
)
)
try:
setattr(object_instance, field, default_value)
except AttributeError, exc:
logger.error(
'Programming Error : could not set'
' attribute %s to %s on %s' % (
field,
default_value,
object_instance.__class__.__name__
),
exc_info=exc
)
for compounding_object in self.get_compounding_objects( object_instance ):
self.get_related_admin( type( compounding_object ) ).set_defaults( compounding_object )
def create_object_form_view(self, title, object_getter, parent=None):
"""Create a form view for a single object, :kbd:`PgUp`/:kbd:`PgDown`
will do nothing.
:param title: the title of the form view
:param object_getter: a function taking no arguments, and returning the object
:param parent: the parent widget for the form
"""
def create_collection_getter( object_getter, object_cache ):
"""Transform an object_getter into a collection_getter which
returns a collection with only the object returned by object
getter.
:param object_getter: a function that returns the object that
should be in the collection
:param object_cache: a list that will be used to store the result
of object_getter, to prevent multiple calls of object_getter
"""
def collection_getter():
if not object_cache:
object_cache.append( object_getter() )
return object_cache
return collection_getter
model = self.model( self,
create_collection_getter( object_getter, [] ),
self.get_fields )
return self.create_form_view(title, model, 0, parent)
def create_new_view(admin, related_collection_proxy=None, parent=None):
"""Create a Qt widget containing a form to create a new instance of the
entity related to this admin class
The returned class has an 'entity_created_signal' that will be fired
when a valid new entity was created by the form
:param collection_proxy: if specified, the object will be appended to
its underlying collection upon creation and removed from it upon
discarding.
"""
from PyQt4 import QtCore
from camelot.view.controls.formview import FormView
from camelot.view.model_thread import post
class NewObjectCollectionProxy( CollectionProxy ):
"""A CollectionProxy for creating new objects, the underlying collection
will always be filled with a single object."""
def __init__(self, related_collection_proxy, *args, **kwargs):
# set attributes before initializing NewObjectCollectionProxy,
# because this one contains posts that need these attributes
self._new_object = None
self._related_collection_proxy = related_collection_proxy
super(NewObjectCollectionProxy, self).__init__(*args, **kwargs)
@property
def max_number_of_rows(self):
return 1
def get_new_object(self):
if not self._new_object:
self._new_object = admin.entity()
# Give the default fields their value
admin.add( self._new_object )
admin.set_defaults(self._new_object)
if self._related_collection_proxy:
self._related_collection_proxy.append_object( self._new_object )
return self._new_object
def get_collection(self):
return [self.get_new_object()]
model = NewObjectCollectionProxy( related_collection_proxy,
admin,
None,
admin.get_fields,
max_number_of_rows=1 )
validator = admin.get_validator(model)
class NewView( FormView ):
entity_created_signal = QtCore.pyqtSignal(object)
def __init__(self, parent):
super( NewView, self).__init__( title = _('New'),
admin = admin,
model = model,
index = 0)
#
# every time data has been changed, it could become valid,
# when this is the case, it should be propagated
#
model.dataChanged.connect( self.dataChanged )
def emit_if_valid(self, valid):
if valid:
def create_instance_getter(new_object):
return lambda:new_object[0]
self.entity_created_signal.emit( model.get_new_object )
@QtCore.pyqtSlot( QtCore.QModelIndex, QtCore.QModelIndex )
def dataChanged(self, _index1, _index2):
def validate():
return validator.isValid(0)
post(validate, self.emit_if_valid)
form = NewView( parent )
admin._apply_form_state( form )
if hasattr(admin, 'form_size'):
form.setMinimumSize(admin.form_size[0], admin.form_size[1])
return form
def primary_key( self, obj ):
"""Get the primary key of an object
:param obj: the object to get the primary key from
:return: a tuple with with components of the primary key, or an
emtpy list if the object has no primary key yet or any more.
"""
return []
def get_modifications( self, obj ):
"""Get the modifications on an object since the last flush.
:param obj: the object for which to get the modifications
:return: a dictionary with the changed attributes and their old
value
"""
return dict()
def delete(self, entity_instance):
"""Delete an entity instance"""
del entity_instance
def flush(self, entity_instance):
"""Flush the pending changes of this entity instance to the backend"""
pass
def expunge(self, entity_instance):
"""Remove this object from the objects being managed"""
pass
def refresh(self, entity_instance):
"""Undo the pending changes to the backend and restore the original
state"""
pass
def add(self, entity_instance):
"""Add an entity instance as a managed entity instance"""
pass
def is_deleted(self, _obj):
"""
:return: True if the object has been deleted from the persistent
state, False otherwise"""
return False
def is_persistent(self, _obj):
""":return: True if the object has a persisted state, False otherwise"""
return False
@model_function
def copy(self, entity_instance):
"""Duplicate this entity instance"""
new_entity_instance = entity_instance.__class__()
return new_entity_instance
| gpl-2.0 |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/test/test_undocumented_details.py | 136 | 1169 | from test.test_support import run_unittest, check_py3k_warnings
import unittest
class TestImplementationComparisons(unittest.TestCase):
def test_type_comparisons(self):
self.assertTrue(str < int or str > int)
self.assertTrue(int <= str or int >= str)
self.assertTrue(cmp(int, str) != 0)
self.assertTrue(int is int)
self.assertTrue(str == str)
self.assertTrue(int != str)
def test_cell_comparisons(self):
def f(x):
if x:
y = 1
def g():
return x
def h():
return y
return g, h
g, h = f(0)
g_cell, = g.func_closure
h_cell, = h.func_closure
self.assertTrue(h_cell < g_cell)
self.assertTrue(g_cell >= h_cell)
self.assertEqual(cmp(g_cell, h_cell), 1)
self.assertTrue(g_cell is g_cell)
self.assertTrue(g_cell == g_cell)
self.assertTrue(h_cell == h_cell)
self.assertTrue(g_cell != h_cell)
def test_main():
with check_py3k_warnings():
run_unittest(TestImplementationComparisons)
if __name__ == '__main__':
test_main()
| mit |
jendap/tensorflow | tensorflow/contrib/eager/python/examples/revnet/revnet.py | 11 | 7742 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reversible residual network compatible with eager execution.
Code for main model.
Reference [The Reversible Residual Network: Backpropagation
Without Storing Activations](https://arxiv.org/pdf/1707.04585.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks
class RevNet(tf.keras.Model):
"""RevNet that depends on all the blocks."""
def __init__(self, config):
"""Initialize RevNet with building blocks.
Args:
config: tf.contrib.training.HParams object; specifies hyperparameters
"""
super(RevNet, self).__init__()
self.axis = 1 if config.data_format == "channels_first" else 3
self.config = config
self._init_block = blocks.InitBlock(config=self.config)
self._final_block = blocks.FinalBlock(config=self.config)
self._block_list = self._construct_intermediate_blocks()
self._moving_average_variables = []
def _construct_intermediate_blocks(self):
# Precompute input shape after initial block
stride = self.config.init_stride
if self.config.init_max_pool:
stride *= 2
if self.config.data_format == "channels_first":
w, h = self.config.input_shape[1], self.config.input_shape[2]
input_shape = (self.config.init_filters, w // stride, h // stride)
else:
w, h = self.config.input_shape[0], self.config.input_shape[1]
input_shape = (w // stride, h // stride, self.config.init_filters)
# Aggregate intermediate blocks
block_list = tf.contrib.checkpoint.List()
for i in range(self.config.n_rev_blocks):
# RevBlock configurations
n_res = self.config.n_res[i]
filters = self.config.filters[i]
if filters % 2 != 0:
raise ValueError("Number of output filters must be even to ensure"
"correct partitioning of channels")
stride = self.config.strides[i]
strides = (self.config.strides[i], self.config.strides[i])
# Add block
rev_block = blocks.RevBlock(
n_res,
filters,
strides,
input_shape,
batch_norm_first=(i != 0), # Only skip on first block
data_format=self.config.data_format,
bottleneck=self.config.bottleneck,
fused=self.config.fused,
dtype=self.config.dtype)
block_list.append(rev_block)
# Precompute input shape for the next block
if self.config.data_format == "channels_first":
w, h = input_shape[1], input_shape[2]
input_shape = (filters, w // stride, h // stride)
else:
w, h = input_shape[0], input_shape[1]
input_shape = (w // stride, h // stride, filters)
return block_list
def call(self, inputs, training=True):
"""Forward pass."""
saved_hidden = None
if training:
saved_hidden = [inputs]
h = self._init_block(inputs, training=training)
if training:
saved_hidden.append(h)
for block in self._block_list:
h = block(h, training=training)
if training:
saved_hidden.append(h)
logits = self._final_block(h, training=training)
return (logits, saved_hidden) if training else (logits, None)
def compute_loss(self, logits, labels):
"""Compute cross entropy loss."""
if self.config.dtype == tf.float32 or self.config.dtype == tf.float16:
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
else:
# `sparse_softmax_cross_entropy_with_logits` does not have a GPU kernel
# for float64, int32 pairs
labels = tf.one_hot(
labels, depth=self.config.n_classes, axis=1, dtype=self.config.dtype)
cross_ent = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
return tf.reduce_mean(cross_ent)
def compute_gradients(self, saved_hidden, labels, training=True, l2_reg=True):
"""Manually computes gradients.
This method silently updates the running averages of batch normalization.
Args:
saved_hidden: List of hidden states Tensors
labels: One-hot labels for classification
training: Use the mini-batch stats in batch norm if set to True
l2_reg: Apply l2 regularization
Returns:
A tuple with the first entry being a list of all gradients and the second
being the loss
"""
def _defunable_pop(l):
"""Functional style list pop that works with `tfe.defun`."""
t, l = l[-1], l[:-1]
return t, l
# Backprop through last block
x = saved_hidden[-1]
with tf.GradientTape() as tape:
tape.watch(x)
logits = self._final_block(x, training=training)
loss = self.compute_loss(logits, labels)
grads_combined = tape.gradient(loss,
[x] + self._final_block.trainable_variables)
dy, final_grads = grads_combined[0], grads_combined[1:]
# Backprop through intermediate blocks
intermediate_grads = []
for block in reversed(self._block_list):
y, saved_hidden = _defunable_pop(saved_hidden)
x = saved_hidden[-1]
dy, grads = block.backward_grads(x, y, dy, training=training)
intermediate_grads = grads + intermediate_grads
# Backprop through first block
_, saved_hidden = _defunable_pop(saved_hidden)
x, saved_hidden = _defunable_pop(saved_hidden)
assert not saved_hidden
with tf.GradientTape() as tape:
y = self._init_block(x, training=training)
init_grads = tape.gradient(
y, self._init_block.trainable_variables, output_gradients=dy)
# Ordering match up with `model.trainable_variables`
grads_all = init_grads + final_grads + intermediate_grads
if l2_reg:
grads_all = self._apply_weight_decay(grads_all)
return grads_all, loss
def _apply_weight_decay(self, grads):
"""Update gradients to reflect weight decay."""
return [
g + self.config.weight_decay * v if v.name.endswith("kernel:0") else g
for g, v in zip(grads, self.trainable_variables)
]
def get_moving_stats(self):
"""Get moving averages of batch normalization."""
device = "/gpu:0" if tf.test.is_gpu_available() else "/cpu:0"
with tf.device(device):
return [v.read_value() for v in self.moving_average_variables]
def restore_moving_stats(self, values):
"""Restore moving averages of batch normalization."""
device = "/gpu:0" if tf.test.is_gpu_available() else "/cpu:0"
with tf.device(device):
for var_, val in zip(self.moving_average_variables, values):
var_.assign(val)
@property
def moving_average_variables(self):
"""Get all variables that are batch norm moving averages."""
def _is_moving_avg(v):
n = v.name
return n.endswith("moving_mean:0") or n.endswith("moving_variance:0")
if not self._moving_average_variables:
self._moving_average_variables = filter(_is_moving_avg, self.variables)
return self._moving_average_variables
| apache-2.0 |
alex-justes/en_scripts | level_upload_task.py | 1 | 2646 | #!/usr/bin/python
#TODO: Check if cookie-file exists
#TODO: Somehow check correctness
import pycurl
import sys
from io import BytesIO
from optparse import OptionParser
from urllib.parse import urlencode
import time
import random
parser = OptionParser()
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="verbose output")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,
help="tssss...")
parser.add_option("-d", "--domain", action="store", type="string", dest="domain", default="demo.en.cx",
help="set domain [demo.en.cx]")
parser.add_option("-c", "--cookies", action="store", type="string", dest="cookies", default="cookies.txt",
help="set file with auth-cookies [cookies.txt]")
parser.add_option("-l", "--level", action="store", type="string", dest="level", default="",
help="set level")
parser.add_option("-g", "--gid", action="store", type="string", dest="gid", default="",
help="set gid")
parser.add_option("-t", "--time", action="store", type="int", dest="time", default="0",
help="specify random delay in seconds between queries [0]")
parser.add_option("-f", "--file", action="store", type="string", dest="file", default="",
help="specify input file")
(options, args) = parser.parse_args(sys.argv)
error = False
if (options.level == ""):
print("Error: You must specify level!")
error = True
if (options.gid == ""):
print("Error: You must specify gid!")
error = True
if (options.file == ""):
print("Error: You must specify input file!")
if (error):
sys.exit()
if (options.verbose):
print("Domain: ", options.domain)
print("Cookie file: ", options.cookies)
print("Level: ", options.level)
print("GID: ", options.gid)
print("Input file: ", options.file)
random.seed()
content = ""
with open(options.file, 'r') as content_file:
content = content_file.read()
url = "http://"+options.domain+"/Administration/Games/TaskEdit.aspx?gid="+options.gid+"&level="+options.level
post_data = {"ForMemberID" : "0", "inputTask" : content}
postfields = urlencode(post_data)
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.COOKIEFILE, options.cookies)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
t = random.randint(0, options.time)
if (not options.quiet):
print("Upload task " +options.file+" to level "+options.level)
print("Sleep for: ",t, " seconds")
time.sleep(t)
| bsd-3-clause |
rahul-c1/scrapy | scrapy/xlib/tx/endpoints.py | 164 | 41184 | # -*- test-case-name: twisted.internet.test.test_endpoints -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementations of L{IStreamServerEndpoint} and L{IStreamClientEndpoint} that
wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces.
This also implements an extensible mini-language for describing endpoints,
parsed by the L{clientFromString} and L{serverFromString} functions.
@since: 10.1
"""
from __future__ import division, absolute_import
import os
import socket
from zope.interface import implementer, directlyProvides
import warnings
from twisted.internet import interfaces, defer, error, fdesc, threads
from twisted.internet.protocol import (
ClientFactory, Protocol, ProcessProtocol, Factory)
from twisted.internet.interfaces import IStreamServerEndpointStringParser
from twisted.internet.interfaces import IStreamClientEndpointStringParser
from twisted.python.filepath import FilePath
from twisted.python.failure import Failure
from twisted.python import log
from twisted.python.components import proxyForInterface
from twisted.plugin import IPlugin, getPlugins
from twisted.internet import stdio
from .interfaces import IFileDescriptorReceiver
__all__ = ["TCP4ClientEndpoint", "SSL4ServerEndpoint"]
class _WrappingProtocol(Protocol):
"""
Wrap another protocol in order to notify my user when a connection has
been made.
"""
def __init__(self, connectedDeferred, wrappedProtocol):
"""
@param connectedDeferred: The L{Deferred} that will callback
with the C{wrappedProtocol} when it is connected.
@param wrappedProtocol: An L{IProtocol} provider that will be
connected.
"""
self._connectedDeferred = connectedDeferred
self._wrappedProtocol = wrappedProtocol
for iface in [interfaces.IHalfCloseableProtocol,
IFileDescriptorReceiver]:
if iface.providedBy(self._wrappedProtocol):
directlyProvides(self, iface)
def logPrefix(self):
"""
Transparently pass through the wrapped protocol's log prefix.
"""
if interfaces.ILoggingContext.providedBy(self._wrappedProtocol):
return self._wrappedProtocol.logPrefix()
return self._wrappedProtocol.__class__.__name__
def connectionMade(self):
"""
Connect the C{self._wrappedProtocol} to our C{self.transport} and
callback C{self._connectedDeferred} with the C{self._wrappedProtocol}
"""
self._wrappedProtocol.makeConnection(self.transport)
self._connectedDeferred.callback(self._wrappedProtocol)
def dataReceived(self, data):
"""
Proxy C{dataReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.dataReceived(data)
def fileDescriptorReceived(self, descriptor):
"""
Proxy C{fileDescriptorReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.fileDescriptorReceived(descriptor)
def connectionLost(self, reason):
"""
Proxy C{connectionLost} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.connectionLost(reason)
def readConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.readConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.readConnectionLost()
def writeConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.writeConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.writeConnectionLost()
class _WrappingFactory(ClientFactory):
"""
Wrap a factory in order to wrap the protocols it builds.
@ivar _wrappedFactory: A provider of I{IProtocolFactory} whose buildProtocol
method will be called and whose resulting protocol will be wrapped.
@ivar _onConnection: A L{Deferred} that fires when the protocol is
connected
@ivar _connector: A L{connector <twisted.internet.interfaces.IConnector>}
that is managing the current or previous connection attempt.
"""
protocol = _WrappingProtocol
def __init__(self, wrappedFactory):
"""
@param wrappedFactory: A provider of I{IProtocolFactory} whose
buildProtocol method will be called and whose resulting protocol
will be wrapped.
"""
self._wrappedFactory = wrappedFactory
self._onConnection = defer.Deferred(canceller=self._canceller)
def startedConnecting(self, connector):
"""
A connection attempt was started. Remember the connector which started
said attempt, for use later.
"""
self._connector = connector
def _canceller(self, deferred):
"""
The outgoing connection attempt was cancelled. Fail that L{Deferred}
with an L{error.ConnectingCancelledError}.
@param deferred: The L{Deferred <defer.Deferred>} that was cancelled;
should be the same as C{self._onConnection}.
@type deferred: L{Deferred <defer.Deferred>}
@note: This relies on startedConnecting having been called, so it may
seem as though there's a race condition where C{_connector} may not
have been set. However, using public APIs, this condition is
impossible to catch, because a connection API
(C{connectTCP}/C{SSL}/C{UNIX}) is always invoked before a
L{_WrappingFactory}'s L{Deferred <defer.Deferred>} is returned to
C{connect()}'s caller.
@return: C{None}
"""
deferred.errback(
error.ConnectingCancelledError(
self._connector.getDestination()))
self._connector.stopConnecting()
def doStart(self):
"""
Start notifications are passed straight through to the wrapped factory.
"""
self._wrappedFactory.doStart()
def doStop(self):
"""
Stop notifications are passed straight through to the wrapped factory.
"""
self._wrappedFactory.doStop()
def buildProtocol(self, addr):
"""
Proxy C{buildProtocol} to our C{self._wrappedFactory} or errback
the C{self._onConnection} L{Deferred}.
@return: An instance of L{_WrappingProtocol} or C{None}
"""
try:
proto = self._wrappedFactory.buildProtocol(addr)
except:
self._onConnection.errback()
else:
return self.protocol(self._onConnection, proto)
def clientConnectionFailed(self, connector, reason):
"""
Errback the C{self._onConnection} L{Deferred} when the
client connection fails.
"""
if not self._onConnection.called:
self._onConnection.errback(reason)
@implementer(interfaces.ITransport)
class _ProcessEndpointTransport(proxyForInterface(
interfaces.IProcessTransport, '_process')):
"""
An L{ITransport} provider for the L{IProtocol} instance passed to the
process endpoint.
@ivar _process: An active process transport which will be used by write
methods on this object to write data to a child process.
@type _process: L{interfaces.IProcessTransport} provider
"""
def write(self, data):
"""
Write to the child process's standard input.
@param data: The data to write on stdin.
"""
self._process.writeToChild(0, data)
def writeSequence(self, data):
"""
Write a list of strings to child process's stdin.
@param data: The list of chunks to write on stdin.
"""
for chunk in data:
self._process.writeToChild(0, chunk)
@implementer(interfaces.IStreamServerEndpoint)
class _TCPServerEndpoint(object):
"""
A TCP server endpoint interface
"""
def __init__(self, reactor, port, backlog, interface):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to
@type interface: str
"""
self._reactor = reactor
self._port = port
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a TCP
socket
"""
return defer.execute(self._reactor.listenTCP,
self._port,
protocolFactory,
backlog=self._backlog,
interface=self._interface)
class TCP4ServerEndpoint(_TCPServerEndpoint):
"""
Implements TCP server endpoint with an IPv4 configuration
"""
def __init__(self, reactor, port, backlog=50, interface=''):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
_TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
class TCP6ServerEndpoint(_TCPServerEndpoint):
"""
Implements TCP server endpoint with an IPv6 configuration
"""
def __init__(self, reactor, port, backlog=50, interface='::'):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
_TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
@implementer(interfaces.IStreamClientEndpoint)
class TCP4ClientEndpoint(object):
"""
TCP client endpoint with an IPv4 configuration.
"""
def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorTCP} provider
@param host: A hostname, used when connecting
@type host: str
@param port: The port number, used when connecting
@type port: int
@param timeout: The number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param bindAddress: A (host, port) tuple of local address to bind to,
or None.
@type bindAddress: tuple
"""
self._reactor = reactor
self._host = host
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via TCP.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectTCP(
self._host, self._port, wf,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class SSL4ServerEndpoint(object):
"""
SSL secured TCP server endpoint with an IPv4 configuration.
"""
def __init__(self, reactor, port, sslContextFactory,
backlog=50, interface=''):
"""
@param reactor: An L{IReactorSSL} provider.
@param port: The port number used for listening
@type port: int
@param sslContextFactory: An instance of
L{twisted.internet.ssl.ContextFactory}.
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
self._reactor = reactor
self._port = port
self._sslContextFactory = sslContextFactory
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen for SSL on a
TCP socket.
"""
return defer.execute(self._reactor.listenSSL, self._port,
protocolFactory,
contextFactory=self._sslContextFactory,
backlog=self._backlog,
interface=self._interface)
@implementer(interfaces.IStreamClientEndpoint)
class SSL4ClientEndpoint(object):
"""
SSL secured TCP client endpoint with an IPv4 configuration
"""
def __init__(self, reactor, host, port, sslContextFactory,
timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorSSL} provider.
@param host: A hostname, used when connecting
@type host: str
@param port: The port number, used when connecting
@type port: int
@param sslContextFactory: SSL Configuration information as an instance
of L{twisted.internet.ssl.ContextFactory}.
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param bindAddress: A (host, port) tuple of local address to bind to,
or None.
@type bindAddress: tuple
"""
self._reactor = reactor
self._host = host
self._port = port
self._sslContextFactory = sslContextFactory
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect with SSL over
TCP.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectSSL(
self._host, self._port, wf, self._sslContextFactory,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class UNIXServerEndpoint(object):
"""
UnixSocket server endpoint.
"""
def __init__(self, reactor, address, backlog=50, mode=0o666, wantPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param address: The path to the Unix socket file, used when listening
@param backlog: number of connections to allow in backlog.
@param mode: mode to set on the unix socket. This parameter is
deprecated. Permissions should be set on the directory which
contains the UNIX socket.
@param wantPID: If True, create a pidfile for the socket.
"""
self._reactor = reactor
self._address = address
self._backlog = backlog
self._mode = mode
self._wantPID = wantPID
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket.
"""
return defer.execute(self._reactor.listenUNIX, self._address,
protocolFactory,
backlog=self._backlog,
mode=self._mode,
wantPID=self._wantPID)
@implementer(interfaces.IStreamClientEndpoint)
class UNIXClientEndpoint(object):
"""
UnixSocket client endpoint.
"""
def __init__(self, reactor, path, timeout=30, checkPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param path: The path to the Unix socket file, used when connecting
@type path: str
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param checkPID: If True, check for a pid file to verify that a server
is listening.
@type checkPID: bool
"""
self._reactor = reactor
self._path = path
self._timeout = timeout
self._checkPID = checkPID
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via a
UNIX Socket
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectUNIX(
self._path, wf,
timeout=self._timeout,
checkPID=self._checkPID)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class AdoptedStreamServerEndpoint(object):
"""
An endpoint for listening on a file descriptor initialized outside of
Twisted.
@ivar _used: A C{bool} indicating whether this endpoint has been used to
listen with a factory yet. C{True} if so.
"""
_close = os.close
_setNonBlocking = staticmethod(fdesc.setNonBlocking)
def __init__(self, reactor, fileno, addressFamily):
"""
@param reactor: An L{IReactorSocket} provider.
@param fileno: An integer file descriptor corresponding to a listening
I{SOCK_STREAM} socket.
@param addressFamily: The address family of the socket given by
C{fileno}.
"""
self.reactor = reactor
self.fileno = fileno
self.addressFamily = addressFamily
self._used = False
def listen(self, factory):
"""
Implement L{IStreamServerEndpoint.listen} to start listening on, and
then close, C{self._fileno}.
"""
if self._used:
return defer.fail(error.AlreadyListened())
self._used = True
try:
self._setNonBlocking(self.fileno)
port = self.reactor.adoptStreamPort(
self.fileno, self.addressFamily, factory)
self._close(self.fileno)
except:
return defer.fail()
return defer.succeed(port)
def _parseTCP(factory, port, interface="", backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a TCP(IPv4) stream endpoint into the structured arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenTCP} (or, modulo argument 2, the factory, arguments
to L{TCP4ServerEndpoint}.
"""
return (int(port), factory), {'interface': interface,
'backlog': int(backlog)}
def _parseUNIX(factory, address, mode='666', backlog=50, lockfile=True):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a UNIX (AF_UNIX/SOCK_STREAM) stream endpoint into the
structured arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param address: the pathname of the unix socket
@type address: C{str}
@param backlog: the length of the listen queue
@type backlog: C{str}
@param lockfile: A string '0' or '1', mapping to True and False
respectively. See the C{wantPID} argument to C{listenUNIX}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenUNIX} (or, modulo argument 2, the factory,
arguments to L{UNIXServerEndpoint}.
"""
return (
(address, factory),
{'mode': int(mode, 8), 'backlog': int(backlog),
'wantPID': bool(int(lockfile))})
def _parseSSL(factory, port, privateKey="server.pem", certKey=None,
sslmethod=None, interface='', backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for an SSL (over TCP/IPv4) stream endpoint into the structured
arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@param privateKey: The file name of a PEM format private key file.
@type privateKey: C{str}
@param certKey: The file name of a PEM format certificate file.
@type certKey: C{str}
@param sslmethod: The string name of an SSL method, based on the name of a
constant in C{OpenSSL.SSL}. Must be one of: "SSLv23_METHOD",
"SSLv2_METHOD", "SSLv3_METHOD", "TLSv1_METHOD".
@type sslmethod: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorSSL.listenSSL} (or, modulo argument 2, the factory, arguments
to L{SSL4ServerEndpoint}.
"""
from twisted.internet import ssl
if certKey is None:
certKey = privateKey
kw = {}
if sslmethod is not None:
kw['method'] = getattr(ssl.SSL, sslmethod)
else:
kw['method'] = ssl.SSL.SSLv23_METHOD
certPEM = FilePath(certKey).getContent()
keyPEM = FilePath(privateKey).getContent()
privateCertificate = ssl.PrivateCertificate.loadPEM(certPEM + keyPEM)
cf = ssl.CertificateOptions(
privateKey=privateCertificate.privateKey.original,
certificate=privateCertificate.original,
**kw
)
return ((int(port), factory, cf),
{'interface': interface, 'backlog': int(backlog)})
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _StandardIOParser(object):
"""
Stream server endpoint string parser for the Standard I/O type.
@ivar prefix: See L{IStreamClientEndpointStringParser.prefix}.
"""
prefix = "stdio"
def _parseServer(self, reactor):
"""
Internal parser function for L{_parseServer} to convert the string
arguments into structured arguments for the L{StandardIOEndpoint}
@param reactor: Reactor for the endpoint
"""
return StandardIOEndpoint(reactor)
def parseStreamServer(self, reactor, *args, **kwargs):
# Redirects to another function (self._parseServer), tricks zope.interface
# into believing the interface is correctly implemented.
return self._parseServer(reactor)
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _TCP6ServerParser(object):
"""
Stream server endpoint string parser for the TCP6ServerEndpoint type.
@ivar prefix: See L{IStreamClientEndpointStringParser.prefix}.
"""
prefix = "tcp6" # Used in _parseServer to identify the plugin with the endpoint type
def _parseServer(self, reactor, port, backlog=50, interface='::'):
"""
Internal parser function for L{_parseServer} to convert the string
arguments into structured arguments for the L{TCP6ServerEndpoint}
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to
@type interface: str
"""
port = int(port)
backlog = int(backlog)
return TCP6ServerEndpoint(reactor, port, backlog, interface)
def parseStreamServer(self, reactor, *args, **kwargs):
# Redirects to another function (self._parseServer), tricks zope.interface
# into believing the interface is correctly implemented.
return self._parseServer(reactor, *args, **kwargs)
_serverParsers = {"tcp": _parseTCP,
"unix": _parseUNIX,
"ssl": _parseSSL,
}
_OP, _STRING = range(2)
def _tokenize(description):
"""
Tokenize a strports string and yield each token.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@return: an iterable of 2-tuples of (L{_OP} or L{_STRING}, string). Tuples
starting with L{_OP} will contain a second element of either ':' (i.e.
'next parameter') or '=' (i.e. 'assign parameter value'). For example,
the string 'hello:greet\=ing=world' would result in a generator
yielding these values::
_STRING, 'hello'
_OP, ':'
_STRING, 'greet=ing'
_OP, '='
_STRING, 'world'
"""
current = ''
ops = ':='
nextOps = {':': ':=', '=': ':'}
description = iter(description)
for n in description:
if n in ops:
yield _STRING, current
yield _OP, n
current = ''
ops = nextOps[n]
elif n == '\\':
current += next(description)
else:
current += n
yield _STRING, current
def _parse(description):
"""
Convert a description string into a list of positional and keyword
parameters, using logic vaguely like what Python does.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@return: a 2-tuple of C{(args, kwargs)}, where 'args' is a list of all
':'-separated C{str}s not containing an '=' and 'kwargs' is a map of
all C{str}s which do contain an '='. For example, the result of
C{_parse('a:b:d=1:c')} would be C{(['a', 'b', 'c'], {'d': '1'})}.
"""
args, kw = [], {}
def add(sofar):
if len(sofar) == 1:
args.append(sofar[0])
else:
kw[sofar[0]] = sofar[1]
sofar = ()
for (type, value) in _tokenize(description):
if type is _STRING:
sofar += (value,)
elif value == ':':
add(sofar)
sofar = ()
add(sofar)
return args, kw
# Mappings from description "names" to endpoint constructors.
_endpointServerFactories = {
'TCP': TCP4ServerEndpoint,
'SSL': SSL4ServerEndpoint,
'UNIX': UNIXServerEndpoint,
}
_endpointClientFactories = {
'TCP': TCP4ClientEndpoint,
'SSL': SSL4ClientEndpoint,
'UNIX': UNIXClientEndpoint,
}
_NO_DEFAULT = object()
def _parseServer(description, factory, default=None):
"""
Parse a stports description into a 2-tuple of arguments and keyword values.
@param description: A description in the format explained by
L{serverFromString}.
@type description: C{str}
@param factory: A 'factory' argument; this is left-over from
twisted.application.strports, it's not really used.
@type factory: L{IProtocolFactory} or L{None}
@param default: Deprecated argument, specifying the default parser mode to
use for unqualified description strings (those which do not have a ':'
and prefix).
@type default: C{str} or C{NoneType}
@return: a 3-tuple of (plugin or name, arguments, keyword arguments)
"""
args, kw = _parse(description)
if not args or (len(args) == 1 and not kw):
deprecationMessage = (
"Unqualified strport description passed to 'service'."
"Use qualified endpoint descriptions; for example, 'tcp:%s'."
% (description,))
if default is None:
default = 'tcp'
warnings.warn(
deprecationMessage, category=DeprecationWarning, stacklevel=4)
elif default is _NO_DEFAULT:
raise ValueError(deprecationMessage)
# If the default has been otherwise specified, the user has already
# been warned.
args[0:0] = [default]
endpointType = args[0]
parser = _serverParsers.get(endpointType)
if parser is None:
# If the required parser is not found in _server, check if
# a plugin exists for the endpointType
for plugin in getPlugins(IStreamServerEndpointStringParser):
if plugin.prefix == endpointType:
return (plugin, args[1:], kw)
raise ValueError("Unknown endpoint type: '%s'" % (endpointType,))
return (endpointType.upper(),) + parser(factory, *args[1:], **kw)
def _serverFromStringLegacy(reactor, description, default):
"""
Underlying implementation of L{serverFromString} which avoids exposing the
deprecated 'default' argument to anything but L{strports.service}.
"""
nameOrPlugin, args, kw = _parseServer(description, None, default)
if type(nameOrPlugin) is not str:
plugin = nameOrPlugin
return plugin.parseStreamServer(reactor, *args, **kw)
else:
name = nameOrPlugin
# Chop out the factory.
args = args[:1] + args[2:]
return _endpointServerFactories[name](reactor, *args, **kw)
def serverFromString(reactor, description):
"""
Construct a stream server endpoint from an endpoint description string.
The format for server endpoint descriptions is a simple string. It is a
prefix naming the type of endpoint, then a colon, then the arguments for
that endpoint.
For example, you can call it like this to create an endpoint that will
listen on TCP port 80::
serverFromString(reactor, "tcp:80")
Additional arguments may be specified as keywords, separated with colons.
For example, you can specify the interface for a TCP server endpoint to
bind to like this::
serverFromString(reactor, "tcp:80:interface=127.0.0.1")
SSL server endpoints may be specified with the 'ssl' prefix, and the
private key and certificate files may be specified by the C{privateKey} and
C{certKey} arguments::
serverFromString(reactor, "ssl:443:privateKey=key.pem:certKey=crt.pem")
If a private key file name (C{privateKey}) isn't provided, a "server.pem"
file is assumed to exist which contains the private key. If the certificate
file name (C{certKey}) isn't provided, the private key file is assumed to
contain the certificate as well.
You may escape colons in arguments with a backslash, which you will need to
use if you want to specify a full pathname argument on Windows::
serverFromString(reactor,
"ssl:443:privateKey=C\\:/key.pem:certKey=C\\:/cert.pem")
finally, the 'unix' prefix may be used to specify a filesystem UNIX socket,
optionally with a 'mode' argument to specify the mode of the socket file
created by C{listen}::
serverFromString(reactor, "unix:/var/run/finger")
serverFromString(reactor, "unix:/var/run/finger:mode=660")
This function is also extensible; new endpoint types may be registered as
L{IStreamServerEndpointStringParser} plugins. See that interface for more
information.
@param reactor: The server endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@return: A new endpoint which can be used to listen with the parameters
given by by C{description}.
@rtype: L{IStreamServerEndpoint<twisted.internet.interfaces.IStreamServerEndpoint>}
@raise ValueError: when the 'description' string cannot be parsed.
@since: 10.2
"""
return _serverFromStringLegacy(reactor, description, _NO_DEFAULT)
def quoteStringArgument(argument):
"""
Quote an argument to L{serverFromString} and L{clientFromString}. Since
arguments are separated with colons and colons are escaped with
backslashes, some care is necessary if, for example, you have a pathname,
you may be tempted to interpolate into a string like this::
serverFromString("ssl:443:privateKey=%s" % (myPathName,))
This may appear to work, but will have portability issues (Windows
pathnames, for example). Usually you should just construct the appropriate
endpoint type rather than interpolating strings, which in this case would
be L{SSL4ServerEndpoint}. There are some use-cases where you may need to
generate such a string, though; for example, a tool to manipulate a
configuration file which has strports descriptions in it. To be correct in
those cases, do this instead::
serverFromString("ssl:443:privateKey=%s" %
(quoteStringArgument(myPathName),))
@param argument: The part of the endpoint description string you want to
pass through.
@type argument: C{str}
@return: The quoted argument.
@rtype: C{str}
"""
return argument.replace('\\', '\\\\').replace(':', '\\:')
def _parseClientTCP(*args, **kwargs):
"""
Perform any argument value coercion necessary for TCP client parameters.
Valid positional arguments to this function are host and port.
Valid keyword arguments to this function are all L{IReactorTCP.connectTCP}
arguments.
@return: The coerced values as a C{dict}.
"""
if len(args) == 2:
kwargs['port'] = int(args[1])
kwargs['host'] = args[0]
elif len(args) == 1:
if 'host' in kwargs:
kwargs['port'] = int(args[0])
else:
kwargs['host'] = args[0]
try:
kwargs['port'] = int(kwargs['port'])
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
def _loadCAsFromDir(directoryPath):
"""
Load certificate-authority certificate objects in a given directory.
@param directoryPath: a L{FilePath} pointing at a directory to load .pem
files from.
@return: a C{list} of L{OpenSSL.crypto.X509} objects.
"""
from twisted.internet import ssl
caCerts = {}
for child in directoryPath.children():
if not child.basename().split('.')[-1].lower() == 'pem':
continue
try:
data = child.getContent()
except IOError:
# Permission denied, corrupt disk, we don't care.
continue
try:
theCert = ssl.Certificate.loadPEM(data)
except ssl.SSL.Error:
# Duplicate certificate, invalid certificate, etc. We don't care.
pass
else:
caCerts[theCert.digest()] = theCert.original
return caCerts.values()
def _parseClientSSL(*args, **kwargs):
"""
Perform any argument value coercion necessary for SSL client parameters.
Valid keyword arguments to this function are all L{IReactorSSL.connectSSL}
arguments except for C{contextFactory}. Instead, C{certKey} (the path name
of the certificate file) C{privateKey} (the path name of the private key
associated with the certificate) are accepted and used to construct a
context factory.
Valid positional arguments to this function are host and port.
@param caCertsDir: The one parameter which is not part of
L{IReactorSSL.connectSSL}'s signature, this is a path name used to
construct a list of certificate authority certificates. The directory
will be scanned for files ending in C{.pem}, all of which will be
considered valid certificate authorities for this connection.
@type caCertsDir: C{str}
@return: The coerced values as a C{dict}.
"""
from twisted.internet import ssl
kwargs = _parseClientTCP(*args, **kwargs)
certKey = kwargs.pop('certKey', None)
privateKey = kwargs.pop('privateKey', None)
caCertsDir = kwargs.pop('caCertsDir', None)
if certKey is not None:
certx509 = ssl.Certificate.loadPEM(
FilePath(certKey).getContent()).original
else:
certx509 = None
if privateKey is not None:
privateKey = ssl.PrivateCertificate.loadPEM(
FilePath(privateKey).getContent()).privateKey.original
else:
privateKey = None
if caCertsDir is not None:
verify = True
caCerts = _loadCAsFromDir(FilePath(caCertsDir))
else:
verify = False
caCerts = None
kwargs['sslContextFactory'] = ssl.CertificateOptions(
method=ssl.SSL.SSLv23_METHOD,
certificate=certx509,
privateKey=privateKey,
verify=verify,
caCerts=caCerts
)
return kwargs
def _parseClientUNIX(*args, **kwargs):
"""
Perform any argument value coercion necessary for UNIX client parameters.
Valid keyword arguments to this function are all L{IReactorUNIX.connectUNIX}
keyword arguments except for C{checkPID}. Instead, C{lockfile} is accepted
and has the same meaning. Also C{path} is used instead of C{address}.
Valid positional arguments to this function are C{path}.
@return: The coerced values as a C{dict}.
"""
if len(args) == 1:
kwargs['path'] = args[0]
try:
kwargs['checkPID'] = bool(int(kwargs.pop('lockfile')))
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
_clientParsers = {
'TCP': _parseClientTCP,
'SSL': _parseClientSSL,
'UNIX': _parseClientUNIX,
}
def clientFromString(reactor, description):
"""
Construct a client endpoint from a description string.
Client description strings are much like server description strings,
although they take all of their arguments as keywords, aside from host and
port.
You can create a TCP client endpoint with the 'host' and 'port' arguments,
like so::
clientFromString(reactor, "tcp:host=www.example.com:port=80")
or, without specifying host and port keywords::
clientFromString(reactor, "tcp:www.example.com:80")
Or you can specify only one or the other, as in the following 2 examples::
clientFromString(reactor, "tcp:host=www.example.com:80")
clientFromString(reactor, "tcp:www.example.com:port=80")
or an SSL client endpoint with those arguments, plus the arguments used by
the server SSL, for a client certificate::
clientFromString(reactor, "ssl:web.example.com:443:"
"privateKey=foo.pem:certKey=foo.pem")
to specify your certificate trust roots, you can identify a directory with
PEM files in it with the C{caCertsDir} argument::
clientFromString(reactor, "ssl:host=web.example.com:port=443:"
"caCertsDir=/etc/ssl/certs")
You can create a UNIX client endpoint with the 'path' argument and optional
'lockfile' and 'timeout' arguments::
clientFromString(reactor, "unix:path=/var/foo/bar:lockfile=1:timeout=9")
or, with the path as a positional argument with or without optional
arguments as in the following 2 examples::
clientFromString(reactor, "unix:/var/foo/bar")
clientFromString(reactor, "unix:/var/foo/bar:lockfile=1:timeout=9")
This function is also extensible; new endpoint types may be registered as
L{IStreamClientEndpointStringParser} plugins. See that interface for more
information.
@param reactor: The client endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@return: A new endpoint which can be used to connect with the parameters
given by by C{description}.
@rtype: L{IStreamClientEndpoint<twisted.internet.interfaces.IStreamClientEndpoint>}
@since: 10.2
"""
args, kwargs = _parse(description)
aname = args.pop(0)
name = aname.upper()
for plugin in getPlugins(IStreamClientEndpointStringParser):
if plugin.prefix.upper() == name:
return plugin.parseStreamClient(*args, **kwargs)
if name not in _clientParsers:
raise ValueError("Unknown endpoint type: %r" % (aname,))
kwargs = _clientParsers[name](*args, **kwargs)
return _endpointClientFactories[name](reactor, **kwargs)
def connectProtocol(endpoint, protocol):
"""
Connect a protocol instance to an endpoint.
This allows using a client endpoint without having to create a factory.
@param endpoint: A client endpoint to connect to.
@param protocol: A protocol instance.
@return: The result of calling C{connect} on the endpoint, i.e. a
L{Deferred} that will fire with the protocol when connected, or an
appropriate error.
"""
class OneShotFactory(Factory):
def buildProtocol(self, addr):
return protocol
return endpoint.connect(OneShotFactory())
| bsd-3-clause |
frmichel/vo-support-tools | SE/cleanup-se/dump-se-files.py | 1 | 9938 | #!/usr/bin/python
# This script lists the files of a SE using the gfal2 python api.
# The SE whose files are listed is the one corresponding to the url given as argument.
# This script outputs a file of which filename is given as argument or stdout if no filename is specified.
# The output dump file provides the following information for each file:
# - the type: file or directory
# - the creation date
# - the last modification date
# - the size
# - the full file path
#
# Fault tolerance mechanisms:
# - in case a query to the SE fails, a maximum of 4 retries is performed, with 30 seconds wait in between.
# Only after the 5th failed attempt shall we report the error.
# - in case more than 100 failures are reported, we give up the process with an error status.
import sys
import os
import commands
import csv
import re
import gfal2
import stat
import datetime
import time
from operator import itemgetter, attrgetter
from optparse import OptionParser
optParser = OptionParser(version = "%prog 1.0", description = """This script lists the files of a SE using the gfal2 python api.
The SE whose file are listed is the one corresponding to the url given as argument.
This script outputs a file of which filename is given as argument, or stdout if no filename is specified.
The output dump file provides the following information for each file:
the type: file or directory, the creation date, the last modification date,
the size, the full file path.""")
optParser.add_option("--url", action = "store", dest = "url", default = '',
help = "The url of the SE to analyse. Mandatory.")
optParser.add_option("--output-file", action = "store", dest = "output_file", default = '',
help = "output file to write results. Defaults to stdout")
# -------------------------------------------------------------------------
# Parameters check
# -------------------------------------------------------------------------
(options, args) = optParser.parse_args()
# Check options validity
if options.url == '':
optParser.error("Option --url is mandatory.")
sys.exit(1)
outputToFile = options.output_file != ''
# Gfal2 context global variable
global context
context = gfal2.creat_context()
# Max number of retries when an exception is raised
global MAX_GFAL2_REQUEST_TRY
MAX_GFAL2_REQUEST_TRY = 5
# Waiting time in seconds when retry
global MAX_RETRY_WAITING_TIME
MAX_RETRY_WAITING_TIME = 30
# Number of errors caught during the process, and max number of errors caught overall
global MAX_ERRORS, errorCount
MAX_ERRORS = 100
errorCount = 0
# ---------------------------------------------------------------
# Method that format a stat.st_mode item into `ls -l` like permissions
# Parameters:
# @param st_mode: permissions to format
# ---------------------------------------------------------------
def mode_to_rights(st_mode) :
# Variable containing the result permission string
permstr = ''
# Set the file type:
# d for directory
# l for symbolic link
# - for files
if stat.S_ISDIR(st_mode):
permstr += 'd'
else:
if stat.S_ISLNK(st_mode):
permstr += 'l'
else:
permstr += '-'
# Loops to call the S_IRUSR, S_IWUSR etc... attribute of st_mode item
# to affect r,w or x permission to user, group and other
usertypes = ['USR', 'GRP', 'OTH']
for usertype in usertypes:
perm_types = ['R', 'W', 'X']
for permtype in perm_types:
perm = getattr(stat, 'S_I%s%s' % (permtype, usertype))
if st_mode & perm:
permstr += permtype.lower()
else:
permstr += '-'
# Return the permissions string
return permstr
# ------------------------------------------------------------------------------------
# Recursive method that goes through the files tree of given url.
# Parameters:
# @param url: the current url, this must be a directory, not a simple file
# @param f: the output file descriptor
# if f = = '' then output is written to stdout
# The recursion algorithm is:
# 1. list the entries of the url directory
# 2. build the map (directory name => stat object)
# 3. for each entry of the map whose type is file: print the file
# 4. for each entry of the map whose type is directory:
# 4.1. print the directory
# 4.2. recursively call the algorithm on url: {current url}/{current entry directory}
# N.B. The recursion stops when the url directory contains no directory
# i.e. contains only files.
#
# The output generated contains lines with the following structure:
# %file permissions% %creation date% %last modification date% %file size% %file full url%
#
# date format is YYYY-MM-DD for all dates
# ------------------------------------------------------------------------------------
def ls_rec(url,f) :
global errorCount
# List the content of the current directory
dir = ''
isOpSuccess = False
nbAttempts = 1
while not isOpSuccess:
try:
dir = context.opendir(url)
isOpSuccess = True
except Exception, e:
nbAttempts += 1
if nbAttempts > MAX_GFAL2_REQUEST_TRY:
print 'Exception caught when calling opendir on url: ' + url + '. Message: ', e
errorCount += 1
if errorCount >= MAX_ERRORS:
print 'Too many errors (' + str(MAX_ERRORS) + ') caught. Giving up process.'
sys.exit(1)
return
else:
time.sleep(MAX_RETRY_WAITING_TIME)
# Build a map (directory name => stat object)
entries_map = {}
# Check each entry of the current directory
while True:
isOpSuccess = False
nbAttempts = 1
dirent = st = ''
while not isOpSuccess:
try:
(dirent, st) = dir.readpp()
isOpSuccess = True
except Exception, e:
nbAttempts += 1
if nbAttempts > MAX_GFAL2_REQUEST_TRY:
print 'Exception caught when calling readpp on url: ' + url + '. Message: ', e
# We stop looking for this entry but will continue to check other entries of the directory
errorCount += 1
if errorCount >= MAX_ERRORS:
print 'Too many errors (' + str(MAX_ERRORS) + ') caught. Giving up process.'
sys.exit(1)
break
else:
time.sleep(MAX_RETRY_WAITING_TIME)
if isOpSuccess:
# Stop if we reached the last entry
if dirent is None:
break
# Current entry is valid, get its stat item and continue
entries_map[dirent.d_name] = st
# End of the while loop to read each entry of the current directory
# Look for file entries and print them
for (entry_key, entry_st) in entries_map.iteritems():
# Check if entry is a file
if not stat.S_ISDIR(entry_st.st_mode):
f.write( mode_to_rights(entry_st.st_mode) + ' ' +
str(datetime.datetime.fromtimestamp(int(entry_st.st_ctime)).strftime('%Y-%m-%d')) + ' ' +
str(datetime.datetime.fromtimestamp(int(entry_st.st_mtime)).strftime('%Y-%m-%d')) + ' ' +
str(entry_st.st_size) + ' ' + url + '/' + entry_key + '\n')
# Look for directory entries, for each print it then recursively call the function on this directory
for (entry_key,entry_st) in entries_map.iteritems():
# check entry is a directory
if stat.S_ISDIR(entry_st.st_mode):
# print the directory line
f.write( mode_to_rights(entry_st.st_mode) + ' ' +
str(datetime.datetime.fromtimestamp(int(entry_st.st_ctime)).strftime('%Y-%m-%d')) + ' ' +
str(datetime.datetime.fromtimestamp(int(entry_st.st_mtime)).strftime('%Y-%m-%d')) + ' ' +
str(entry_st.st_size) + ' ' + url +
'/' + entry_key + '\n')
# Recursively call the method on current entry directory
ls_rec(url + '/' + entry_key, f)
# End of function ls_rec
# ---------------------------------------------------------------------------------------
# Main block:
# ---------------------------------------------------------------------------------------
try:
# Build the file descriptor if specified in argument
f = ''
if outputToFile:
try:
f = open(options.output_file, 'w')
except Exception,e:
print 'Exception when opening output file: ' + options.output_file + ' Message: ', e
sys.exit(1)
else:
f = sys.stdout
# Get stat item of the url given as argument
st = ''
isLStatSuccess = False
attemptLStat = 1
while not isLStatSuccess:
try:
st = context.lstat(options.url)
isLStatSuccess = True
except Exception, e:
attemptLStat += 1
if attemptLStat > MAX_GFAL2_REQUEST_TRY:
print 'Exception caught in lstat on url: ' + options.url + '. Message: ', e
sys.exit(1)
else:
time.sleep(MAX_RETRY_WAITING_TIME)
# Print the url
f.write( mode_to_rights(st.st_mode) + ' ' +
str(datetime.datetime.fromtimestamp(int(st.st_ctime)).strftime('%Y-%m-%d')) + ' ' +
str(datetime.datetime.fromtimestamp(int(st.st_mtime)).strftime('%Y-%m-%d')) + ' ' +
str(st.st_size) + ' ' +
options.url + '\n')
# Start the recursive process
ls_rec(options.url,f)
# Final cleanup
if outputToFile:
f.close()
except Exception,e:
print 'Unexpected exception caught when computing url: ' + options.url + '. Message: ', e
sys.exit(1)
sys.exit(0)
| mit |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/boto/route53/healthcheck.py | 151 | 5824 | # Copyright (c) 2014 Tellybug, Matt Millar
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
From http://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHealthCheck.html
POST /2013-04-01/healthcheck HTTP/1.1
<?xml version="1.0" encoding="UTF-8"?>
<CreateHealthCheckRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<CallerReference>unique description</CallerReference>
<HealthCheckConfig>
<IPAddress>IP address of the endpoint to check</IPAddress>
<Port>port on the endpoint to check</Port>
<Type>HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP</Type>
<ResourcePath>path of the file that
you want Amazon Route 53 to request</ResourcePath>
<FullyQualifiedDomainName>domain name of the
endpoint to check</FullyQualifiedDomainName>
<SearchString>if Type is HTTP_STR_MATCH or HTTPS_STR_MATCH,
the string to search for in the response body
from the specified resource</SearchString>
<RequestInterval>10 | 30</RequestInterval>
<FailureThreshold>integer between 1 and 10</FailureThreshold>
</HealthCheckConfig>
</CreateHealthCheckRequest>
"""
class HealthCheck(object):
"""An individual health check"""
POSTXMLBody = """
<HealthCheckConfig>
%(ip_addr_part)s
<Port>%(port)s</Port>
<Type>%(type)s</Type>
<ResourcePath>%(resource_path)s</ResourcePath>
%(fqdn_part)s
%(string_match_part)s
%(request_interval)s
<FailureThreshold>%(failure_threshold)s</FailureThreshold>
</HealthCheckConfig>
"""
XMLIpAddrPart = """<IPAddress>%(ip_addr)s</IPAddress>"""
XMLFQDNPart = """<FullyQualifiedDomainName>%(fqdn)s</FullyQualifiedDomainName>"""
XMLStringMatchPart = """<SearchString>%(string_match)s</SearchString>"""
XMLRequestIntervalPart = """<RequestInterval>%(request_interval)d</RequestInterval>"""
valid_request_intervals = (10, 30)
def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3):
"""
HealthCheck object
:type ip_addr: str
:param ip_addr: Optional IP Address
:type port: int
:param port: Port to check
:type hc_type: str
:param hc_type: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
:type resource_path: str
:param resource_path: Path to check
:type fqdn: str
:param fqdn: domain name of the endpoint to check
:type string_match: str
:param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource
:type request_interval: int
:param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
:type failure_threshold: int
:param failure_threshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa.
"""
self.ip_addr = ip_addr
self.port = port
self.hc_type = hc_type
self.resource_path = resource_path
self.fqdn = fqdn
self.string_match = string_match
self.failure_threshold = failure_threshold
if request_interval in self.valid_request_intervals:
self.request_interval = request_interval
else:
raise AttributeError(
"Valid values for request_interval are: %s" %
",".join(str(i) for i in self.valid_request_intervals))
if failure_threshold < 1 or failure_threshold > 10:
raise AttributeError(
'Valid values for failure_threshold are 1 - 10.')
def to_xml(self):
params = {
'ip_addr_part': '',
'port': self.port,
'type': self.hc_type,
'resource_path': self.resource_path,
'fqdn_part': "",
'string_match_part': "",
'request_interval': (self.XMLRequestIntervalPart %
{'request_interval': self.request_interval}),
'failure_threshold': self.failure_threshold,
}
if self.fqdn is not None:
params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn}
if self.ip_addr:
params['ip_addr_part'] = self.XMLIpAddrPart % {'ip_addr': self.ip_addr}
if self.string_match is not None:
params['string_match_part'] = self.XMLStringMatchPart % {'string_match': self.string_match}
return self.POSTXMLBody % params
| bsd-3-clause |
telefonicaid/fiware-livedemoapp | package/location2cb/stop_vans.py | 1 | 1067 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2013 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE LiveDemo App
#
# FI-WARE LiveDemo App is free software: you can redistribute it and/or modify it under the terms
# of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# FI-WARE LiveDemo App is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License along with FI-WARE LiveDemo App. If not,
# see http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact with fermin at tid dot es
__author__ = 'sergg'
import locs_sim as locs
def main():
locs.stop_scenario()
if __name__ == '__main__':
main()
| agpl-3.0 |
global-humanitarians-unite/ghu | ghu_web/ghu_main/forms.py | 1 | 1204 | from django import forms
from django.core.exceptions import ValidationError
from ghu_global.models import User
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.contrib.auth.password_validation import validate_password
def unused_username(username):
if User.objects.filter(username=username).count() > 0:
raise ValidationError('Username {} is already in use.'.format(username))
class RegisterForm(forms.Form):
username = forms.CharField(max_length=User._meta.get_field('username').max_length,
validators=(UnicodeUsernameValidator, unused_username))
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput, validators=(validate_password,))
def save(self):
user = User.objects.create_user(username=self.cleaned_data['username'],
email=self.cleaned_data['email'],
password=self.cleaned_data['password'],
is_active=False)
return user
class SearchForm(forms.Form):
search_terms = forms.CharField(label = 'Search', max_length = 100, required = False)
| apache-2.0 |
okwow123/djangol2 | example/env/lib/python2.7/site-packages/django/conf/locale/ml/formats.py | 1007 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| mit |
peguin40/zulip | zerver/views/pointer.py | 11 | 1988 | from __future__ import absolute_import
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from six import text_type
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.lib.utils import statsd, generate_random_token
from zerver.models import UserProfile, Message, UserMessage
def get_pointer_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
# type: () -> text_type
return generate_random_token(32)
def get_profile_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
| apache-2.0 |
js0701/chromium-crosswalk | tools/telemetry/third_party/modulegraph/modulegraph_tests/test_util.py | 23 | 1801 | import unittest
import encodings
import encodings.aliases
from modulegraph import util
import sys
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
class TestUtil (unittest.TestCase):
def test_imp_find_module(self):
fn = util.imp_find_module('encodings.aliases')[1]
self.assertTrue(encodings.aliases.__file__.startswith(fn))
def test_imp_walk(self):
imps = list(util.imp_walk('encodings.aliases'))
self.assertEqual(len(imps), 2)
self.assertEqual(imps[0][0], 'encodings')
self.assertTrue(encodings.__file__.startswith(imps[0][1][1]))
self.assertEqual(imps[1][0], 'aliases')
self.assertTrue(encodings.aliases.__file__.startswith(imps[1][1][1]))
# Close all files, avoid warning by unittest
for i in imps:
if i[1][0] is not None:
i[1][0].close()
def test_guess_encoding(self):
fp = BytesIO(b"# coding: utf-8")
self.assertEqual(util.guess_encoding(fp), "utf-8")
fp = BytesIO(b"\n# coding: utf-8")
self.assertEqual(util.guess_encoding(fp), "utf-8")
fp = BytesIO(b"# coding: latin-1")
self.assertEqual(util.guess_encoding(fp), "latin-1")
fp = BytesIO(b"\n# coding: latin-1")
self.assertEqual(util.guess_encoding(fp), "latin-1")
fp = BytesIO(b"#!/usr/bin/env/python\n# vim: set fileencoding=latin-1 :")
self.assertEqual(util.guess_encoding(fp), "latin-1")
fp = BytesIO(b"\n\n\n# coding: latin-1")
if sys.version_info[0] == 2:
self.assertEqual(util.guess_encoding(fp), "ascii")
else:
self.assertEqual(util.guess_encoding(fp), "utf-8")
del fp
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
ramr/kubernetes | cluster/juju/charms/trusty/kubernetes/hooks/kubernetes_installer.py | 213 | 2518 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from path import path
class KubernetesInstaller():
"""
This class contains the logic needed to install kuberentes binary files.
"""
def __init__(self, arch, version, master, output_dir):
""" Gather the required variables for the install. """
# The kubernetes charm needs certain commands to be aliased.
self.aliases = {'kube-proxy': 'proxy',
'kubelet': 'kubelet'}
self.arch = arch
self.version = version
self.master = master
self.output_dir = output_dir
def download(self):
""" Download the kuberentes binaries from the kubernetes master. """
url = 'http://{0}/kubernetes/{1}/local/bin/linux/{2}'.format(
self.master, self.version, self.arch)
if not self.output_dir.isdir():
self.output_dir.makedirs_p()
for key in self.aliases:
uri = '{0}/{1}'.format(url, key)
destination = self.output_dir / key
wget = 'wget -nv {0} -O {1}'.format(uri, destination)
print(wget)
output = subprocess.check_output(wget.split())
print(output)
destination.chmod(0o755)
def install(self, install_dir=path('/usr/local/bin')):
""" Create links to the binary files to the install directory. """
if not install_dir.isdir():
install_dir.makedirs_p()
# Create the symbolic links to the real kubernetes binaries.
for key, value in self.aliases.iteritems():
target = self.output_dir / key
if target.exists():
link = install_dir / value
if link.exists():
link.remove()
target.symlink(link)
else:
print('Error target file {0} does not exist.'.format(target))
exit(1)
| apache-2.0 |
MihaiMoldovanu/ansible | test/integration/targets/aws_lambda/files/mini_lambda.py | 139 | 1237 | from __future__ import print_function
import json
import os
def handler(event, context):
"""
The handler function is the function which gets called each time
the lambda is run.
"""
# printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
# the log entry.
print("got event:\n" + json.dumps(event))
# if the name parameter isn't present this can throw an exception
# which will result in an amazon chosen failure from the lambda
# which can be completely fine.
name = event["name"]
# we can use environment variables as part of the configuration of the lambda
# which can change the behaviour of the lambda without needing a new upload
extra = os.environ.get("EXTRA_MESSAGE")
if extra is not None and len(extra) > 0:
greeting = "hello {0}. {1}".format(name, extra)
else:
greeting = "hello " + name
return {"message": greeting}
def main():
"""
This main function will normally never be called during normal
lambda use. It is here for testing the lambda program only.
"""
event = {"name": "james"}
context = None
print(handler(event, context))
if __name__ == '__main__':
main()
| gpl-3.0 |
duncanmmacleod/gwpy | gwpy/segments/tests/test_segments.py | 3 | 5648 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for :mod:`gwpy.segments.segments`
"""
import pytest
import h5py
from astropy.table import Table
from ...testing.utils import (
assert_segmentlist_equal, assert_table_equal, TemporaryFilename)
from ...time import LIGOTimeGPS
from .. import (Segment, SegmentList)
# -- Segment ------------------------------------------------------------------
class TestSegment(object):
TEST_CLASS = Segment
@classmethod
@pytest.fixture()
def segment(cls):
return cls.TEST_CLASS(1, 2)
def test_start_end(self, segment):
assert segment.start == 1.
assert segment.end == 2.
def test_repr(self, segment):
assert repr(segment) == 'Segment(1, 2)'
def test_str(self, segment):
assert str(segment) == '[1 ... 2)'
# -- SegmentList --------------------------------------------------------------
class TestSegmentList(object):
TEST_CLASS = SegmentList
ENTRY_CLASS = Segment
@classmethod
def create(cls, *segments):
return cls.TEST_CLASS([cls.ENTRY_CLASS(a, b) for a, b in segments])
@classmethod
@pytest.fixture()
def segmentlist(cls):
return cls.create((1, 2), (3, 4), (4, 6), (8, 10))
# -- test methods ---------------------------
def test_extent(self, segmentlist):
"""Test `gwpy.segments.SegmentList.extent returns the right type
"""
extent = segmentlist.extent()
assert isinstance(extent, self.ENTRY_CLASS)
assert extent == Segment(1, 10)
def test_coalesce(self):
segmentlist = self.create((1, 2), (3, 4), (4, 5))
c = segmentlist.coalesce()
assert c is segmentlist
assert_segmentlist_equal(c, [(1, 2), (3, 5)])
assert isinstance(c[0], self.ENTRY_CLASS)
def test_to_table(self, segmentlist):
segtable = segmentlist.to_table()
assert_table_equal(
segtable,
Table(
rows=[
(0, 1, 2, 1),
(1, 3, 4, 1),
(2, 4, 6, 2),
(3, 8, 10, 2),
],
names=('index', 'start', 'end', 'duration'),
),
)
# -- test I/O -------------------------------
def test_read_write_segwizard(self, segmentlist):
with TemporaryFilename(suffix='.txt') as tmp:
# check write/read returns the same list
segmentlist.write(tmp)
sl2 = self.TEST_CLASS.read(tmp, coalesce=False)
assert_segmentlist_equal(sl2, segmentlist)
assert isinstance(sl2[0][0], LIGOTimeGPS)
# check that coalesceing does what its supposed to
c = type(segmentlist)(segmentlist).coalesce()
sl2 = self.TEST_CLASS.read(tmp, coalesce=True)
assert_segmentlist_equal(sl2, c)
# check gpstype kwarg
sl2 = self.TEST_CLASS.read(tmp, gpstype=float)
assert isinstance(sl2[0][0], float)
def test_read_write_segwizard_strict(self):
with TemporaryFilename(suffix='.txt') as tmp:
with open(tmp, "w") as tmpf:
print("0 0 1 .5", file=tmpf)
with pytest.raises(ValueError):
self.TEST_CLASS.read(tmp, strict=True, format='segwizard')
sl = self.TEST_CLASS.read(tmp, strict=False, format='segwizard')
assert_segmentlist_equal(sl, [(0, 1)])
def test_read_write_segwizard_twocol(self):
with TemporaryFilename(suffix='.txt') as tmp:
with open(tmp, "w") as tmpf:
print("0 1", file=tmpf)
print("2 3", file=tmpf)
sl = self.TEST_CLASS.read(tmp, format='segwizard')
assert_segmentlist_equal(sl, [(0, 1), (2, 3)])
@pytest.mark.parametrize('ext', ('.hdf5', '.h5'))
def test_read_write_hdf5(self, segmentlist, ext):
with TemporaryFilename(suffix=ext) as fp:
# check basic write/read with auto-path discovery
segmentlist.write(fp, 'test-segmentlist')
sl2 = self.TEST_CLASS.read(fp)
assert_segmentlist_equal(sl2, segmentlist)
assert isinstance(sl2[0][0], LIGOTimeGPS)
sl2 = self.TEST_CLASS.read(fp, path='test-segmentlist')
assert_segmentlist_equal(sl2, segmentlist)
# check we can read directly from the h5 object
with h5py.File(fp, "r") as h5f:
sl2 = self.TEST_CLASS.read(h5f["test-segmentlist"])
assert_segmentlist_equal(sl2, segmentlist)
# check overwrite kwarg
with pytest.raises(IOError):
segmentlist.write(fp, 'test-segmentlist')
segmentlist.write(fp, 'test-segmentlist', overwrite=True)
# check gpstype kwarg
sl2 = self.TEST_CLASS.read(fp, gpstype=float)
assert_segmentlist_equal(sl2, segmentlist)
assert isinstance(sl2[0][0], float)
| gpl-3.0 |
BehavioralInsightsTeam/edx-platform | lms/djangoapps/certificates/migrations/0002_data__certificatehtmlviewconfiguration_data.py | 61 | 2057 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
# Converted from the original South migration 0020_certificatehtmlviewconfiguration_data.py
from django.db import migrations, models
def forwards(apps, schema_editor):
"""
Bootstraps the HTML view template with some default configuration parameters
"""
config = {
"default": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "Your Platform Name Here",
"company_about_url": "http://www.example.com/about-us",
"company_privacy_url": "http://www.example.com/privacy-policy",
"company_tos_url": "http://www.example.com/terms-service",
"company_verified_certificate_url": "http://www.example.com/verified-certificate",
"logo_src": "/static/certificates/images/logo.png",
"logo_url": "http://www.example.com"
},
"honor": {
"certificate_type": "Honor Code",
"certificate_title": "Certificate of Achievement",
},
"verified": {
"certificate_type": "Verified",
"certificate_title": "Verified Certificate of Achievement",
}
}
certificate_html_view_configuration_model = apps.get_model("certificates", "CertificateHtmlViewConfiguration")
objects = certificate_html_view_configuration_model.objects
if not objects.exists():
objects.create(
configuration=json.dumps(config),
enabled=False,
)
def backwards(apps, schema_editor):
"""
Rolling back to zero-state, so remove all currently-defined configurations
"""
certificate_html_view_configuration_model = apps.get_model("certificates", "CertificateHtmlViewConfiguration")
certificate_html_view_configuration_model.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('certificates', '0001_initial'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
| agpl-3.0 |
ClockworkNet/OpScripts | opscripts/config/v5.py | 1 | 4898 | # vim: set fileencoding=utf-8 :
"""OpScripts configuration library (thin wrapper over ConfigArgParse)
"""
# Standard library
from __future__ import absolute_import, division, print_function
import os.path
import sys
# Third-party
import configargparse
# Local/library specific
from opscripts.notify.email import v3 as ops_notify_email
def OpsConfigArgParse(**kwargs):
"""Wrap configargparse.ArgumentParser so that:
- default_config_path includes /etc/opscripts/SCRIPTNAME.yaml.
- add_config_file_help default to False
- ignore_unknown_config_file_keys default to True
- provide shorthand for adding common arguments
Returns ArgumentParser object and default_config_path list
"""
add_args = dict()
prog = os.path.basename(sys.argv[0])
default_conf_name = "%s.%s" % (os.path.splitext(prog)[0], "yaml")
default_conf_path = os.path.join("/etc/opscripts", default_conf_name)
add_program_name = False
if "add_args" in kwargs:
add_args = kwargs["add_args"]
del kwargs["add_args"]
if "add_config_file_help" not in kwargs:
kwargs["add_config_file_help"] = False
if "default_config_files" not in kwargs:
kwargs["default_config_files"] = [default_conf_path]
else:
kwargs["default_config_files"] += [default_conf_path]
default_conf_path = kwargs["default_config_files"]
if "ignore_unknown_config_file_keys" not in kwargs:
kwargs["ignore_unknown_config_file_keys"] = True
if "program_name" not in kwargs:
add_program_name = True
cap = configargparse.ArgumentParser(**kwargs)
# add_args
if "EMAIL" in add_args and add_args["EMAIL"] is True:
add_args["email-from"] = True
add_args["email-host"] = True
add_args["email-port"] = True
add_args["email-to"] = True
if "config" in add_args and add_args["config"] is True:
cap.add_argument("-c", "--config", is_config_file=True,
help="Config file path. Default: {0}"
.format(default_conf_path))
if "dryrun" in add_args and add_args["dryrun"] is True:
cap.add_argument("-n", "--dryrun", action="store_true",
help="Dry run: do not make any changes.")
if "email-from" in add_args and add_args["email-from"] is True:
cap.add_argument("--email-from", metavar="EMAIL",
help="Email sender.")
if "email-host" in add_args and add_args["email-host"] is True:
cap.add_argument("--email-host", metavar="HOST",
default=ops_notify_email.DEFAULT_HOST,
help="Email host used to send messages.")
if "email-port" in add_args and add_args["email-port"] is True:
cap.add_argument("--email-port", metavar="PORT", type=int,
default=ops_notify_email.DEFAULT_PORT,
help="Email port used to send messages.")
if "email-to" in add_args and add_args["email-to"] is True:
cap.add_argument("--email-to", metavar="EMAILS", action="append",
help="Email recipients. May be specified multiple"
" times.")
if "quiet" in add_args and add_args["quiet"] is True:
raise Exception("quiet is no longer supported. Use verbosity"
" instead.")
if "verbose" in add_args and add_args["verbose"] is True:
raise Exception("verbose is no longer supported. Use verbosity"
" instead.")
if "verbosity" in add_args and add_args["verbosity"] is True:
cap.add_argument("-q", "--quiet", action="append_const", const=10,
dest="verbosity",
help="Decrease verbosity. Can be specified multiple"
" times.")
cap.add_argument("-v", "--verbose", action="append_const", const=-10,
dest="verbosity",
help="Increase verbosity. Can be specified multiple"
" times.")
# miscellaneous
if add_program_name:
cap.set_defaults(program_name=prog)
return cap
def parse_args(cap, args=None, namespace=None):
"""Wrap parse_args to allow additional logic:
- Only require --email_from/--email-to when not doing a dryrun
"""
args = cap.parse_args(args=args, namespace=namespace)
if "dryrun" in args and "email_from" in args:
if not args.dryrun:
if not args.email_from:
cap.error("error: argument --email-from is required")
if not args.email_to:
cap.error("error: argument --email-to is required")
else:
if not args.email_from:
args.email_from = "<-dryrun->"
if not args.email_to:
args.email_to = ["<-dryrun->"]
return args
| mit |
ccxt/ccxt | examples/py/tickers.py | 1 | 3385 | # -*- coding: utf-8 -*-
import os
import sys
import time
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
def style(s, style):
return style + s + '\033[0m'
def green(s):
return style(s, '\033[92m')
def blue(s):
return style(s, '\033[94m')
def yellow(s):
return style(s, '\033[93m')
def red(s):
return style(s, '\033[91m')
def pink(s):
return style(s, '\033[95m')
def bold(s):
return style(s, '\033[1m')
def underline(s):
return style(s, '\033[4m')
def dump(*args):
print(' '.join([str(arg) for arg in args]))
def print_exchanges():
dump('Supported exchanges:', ', '.join(ccxt.exchanges))
def print_usage():
dump("Usage: python " + sys.argv[0], green('id'), yellow('[symbol]'))
dump("Symbol is optional, for example:")
dump("python " + sys.argv[0], green('kraken'))
dump("python " + sys.argv[0], green('coinbasepro'), yellow('BTC/USD'))
print_exchanges()
def print_ticker(exchange, symbol):
ticker = exchange.fetch_ticker(symbol.upper())
print(ticker)
dump(
green(exchange.id),
yellow(symbol),
'ticker',
ticker['datetime'],
'high: ' + str(ticker['high']),
'low: ' + str(ticker['low']),
'bid: ' + str(ticker['bid']),
'ask: ' + str(ticker['ask']),
'volume: ' + str(ticker['quoteVolume']))
try:
id = sys.argv[1] # get exchange id from command line arguments
# check if the exchange is supported by ccxt
exchange_found = id in ccxt.exchanges
if exchange_found:
dump('Instantiating', green(id))
# instantiate the exchange by id
exchange = getattr(ccxt, id)()
# load all markets from the exchange
markets = exchange.load_markets()
# output all symbols
dump(green(id), 'has', len(exchange.symbols), 'symbols:', yellow(', '.join(exchange.symbols)))
try:
if len(sys.argv) > 2: # if symbol is present, get that symbol only
symbol = sys.argv[2]
print_ticker(exchange, symbol)
else: # run through all symbols one by one
delay = int(exchange.rateLimit / 1000) # delay in between requests
for symbol in exchange.symbols:
# suffix '.d' means 'darkpool' on some exchanges
if symbol.find('.d') < 0:
# sleep to remain under the rateLimit
time.sleep(delay)
# fetch and print ticker
print_ticker(exchange, symbol)
except ccxt.DDoSProtection as e:
print(type(e).__name__, e.args, 'DDoS Protection (ignoring)')
except ccxt.RequestTimeout as e:
print(type(e).__name__, e.args, 'Request Timeout (ignoring)')
except ccxt.ExchangeNotAvailable as e:
print(type(e).__name__, e.args, 'Exchange Not Available due to downtime or maintenance (ignoring)')
except ccxt.AuthenticationError as e:
print(type(e).__name__, e.args, 'Authentication Error (missing API keys, ignoring)')
else:
dump('Exchange ' + red(id) + ' not found')
print_usage()
except Exception as e:
print(type(e).__name__, e.args, str(e))
print_usage()
| mit |
e110c0/unisono | src/unisono/mmplugins/pathmtu.py | 1 | 1898 | '''
pathmtu.py
Created on: May 14, 2009
Authors: dh
$LastChangedBy$
$LastChangedDate$
$Revision$
(C) 2008-2009 by Computer Networks and Internet, University of Tuebingen
This file is part of UNISONO Unified Information Service for Overlay
Network Optimization.
UNISONO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
UNISONO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with UNISONO. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
from unisono.mmplugins import mmtemplates
from ctypes import *
from os import path
from unisono.dataitem import DataItem
class PathMTUResult(Structure):
'''
Result structure for the PathMTU module
'''
_fields_ = [('PATHMTU', c_int),
('HOPCOUNT', c_int),
('error', c_int),
('errortext', c_char_p)]
class PathMTU(mmtemplates.MMcTemplate):
'''
Wrapper class for the PathMTU module
'''
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def __init__(self, *args):
'''
Constructor
'''
super().__init__(*args)
self.libfile = path.join(path.dirname(__file__), 'libPathMTU.so')
self.cresstruct = PathMTUResult()
self.cost = 10000
self.dataitems = [DataItem('PATHMTU',2,600,1200),
DataItem('HOPCOUNT',2,600,1200)]
self.load_library()
def checkrequest(self, request):
return True
| gpl-2.0 |
jamiefolsom/edx-platform | common/djangoapps/track/shim.py | 40 | 6431 | """Map new event context values to old top-level field values. Ensures events can be parsed by legacy parsers."""
import json
import logging
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
CONTEXT_FIELDS_TO_INCLUDE = [
'username',
'session',
'ip',
'agent',
'host',
'referer',
'accept_language'
]
class LegacyFieldMappingProcessor(object):
"""Ensures all required fields are included in emitted events"""
def __call__(self, event):
context = event.get('context', {})
if 'context' in event:
for field in CONTEXT_FIELDS_TO_INCLUDE:
self.move_from_context(field, event)
remove_shim_context(event)
if 'data' in event:
if context.get('event_source', '') == 'browser' and isinstance(event['data'], dict):
event['event'] = json.dumps(event['data'])
else:
event['event'] = event['data']
del event['data']
else:
event['event'] = {}
if 'timestamp' in context:
event['time'] = context['timestamp']
del context['timestamp']
elif 'timestamp' in event:
event['time'] = event['timestamp']
if 'timestamp' in event:
del event['timestamp']
self.move_from_context('event_type', event, event.get('name', ''))
self.move_from_context('event_source', event, 'server')
self.move_from_context('page', event, None)
def move_from_context(self, field, event, default_value=''):
"""Move a field from the context to the top level of the event."""
context = event.get('context', {})
if field in context:
event[field] = context[field]
del context[field]
else:
event[field] = default_value
def remove_shim_context(event):
if 'context' in event:
context = event['context']
# These fields are present elsewhere in the event at this point
context_fields_to_remove = set(CONTEXT_FIELDS_TO_INCLUDE)
# This field is only used for Segment web analytics and does not concern researchers
context_fields_to_remove.add('client_id')
for field in context_fields_to_remove:
if field in context:
del context[field]
NAME_TO_EVENT_TYPE_MAP = {
'edx.video.played': 'play_video',
'edx.video.paused': 'pause_video',
'edx.video.stopped': 'stop_video',
'edx.video.loaded': 'load_video',
'edx.video.position.changed': 'seek_video',
'edx.video.seeked': 'seek_video',
'edx.video.transcript.shown': 'show_transcript',
'edx.video.transcript.hidden': 'hide_transcript',
}
class VideoEventProcessor(object):
"""
Converts new format video events into the legacy video event format.
Mobile devices cannot actually emit events that exactly match their counterparts emitted by the LMS javascript
video player. Instead of attempting to get them to do that, we instead insert a shim here that converts the events
they *can* easily emit and converts them into the legacy format.
TODO: Remove this shim and perform the conversion as part of some batch canonicalization process.
"""
def __call__(self, event):
name = event.get('name')
if not name:
return
if name not in NAME_TO_EVENT_TYPE_MAP:
return
# Convert edx.video.seeked to edx.video.position.changed because edx.video.seeked was not intended to actually
# ever be emitted.
if name == "edx.video.seeked":
event['name'] = "edx.video.position.changed"
event['event_type'] = NAME_TO_EVENT_TYPE_MAP[name]
if 'event' not in event:
return
payload = event['event']
if 'module_id' in payload:
module_id = payload['module_id']
try:
usage_key = UsageKey.from_string(module_id)
except InvalidKeyError:
log.warning('Unable to parse module_id "%s"', module_id, exc_info=True)
else:
payload['id'] = usage_key.html_id()
del payload['module_id']
if 'current_time' in payload:
payload['currentTime'] = payload.pop('current_time')
if 'context' in event:
context = event['context']
# Converts seek_type to seek and skip|slide to onSlideSeek|onSkipSeek
if 'seek_type' in payload:
seek_type = payload['seek_type']
if seek_type == 'slide':
payload['type'] = "onSlideSeek"
elif seek_type == 'skip':
payload['type'] = "onSkipSeek"
del payload['seek_type']
# For the iOS build that is returning a +30 for back skip 30
if (
context['application']['version'] == "1.0.02" and
context['application']['name'] == "edx.mobileapp.iOS"
):
if 'requested_skip_interval' in payload and 'type' in payload:
if (
payload['requested_skip_interval'] == 30 and
payload['type'] == "onSkipSeek"
):
payload['requested_skip_interval'] = -30
# For the Android build that isn't distinguishing between skip/seek
if 'requested_skip_interval' in payload:
if abs(payload['requested_skip_interval']) != 30:
if 'type' in payload:
payload['type'] = 'onSlideSeek'
if 'open_in_browser_url' in context:
page, _sep, _tail = context.pop('open_in_browser_url').rpartition('/')
event['page'] = page
event['event'] = json.dumps(payload)
class GoogleAnalyticsProcessor(object):
"""Adds course_id as label, and sets nonInteraction property"""
# documentation of fields here: https://segment.com/docs/integrations/google-analytics/
# this should *only* be used on events destined for segment.com and eventually google analytics
def __call__(self, event):
context = event.get('context', {})
course_id = context.get('course_id')
if course_id is not None:
event['label'] = course_id
event['nonInteraction'] = 1
| agpl-3.0 |
cnc-club/linuxcnc | src/hal/user_comps/vismach/scaragui.py | 23 | 5700 | #!/usr/bin/python2.4
# Copyright 2007 John Kasunich and Jeff Epler
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from vismach import *
import hal
import math
import sys
c = hal.component("scaragui")
c.newpin("joint0", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint1", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint2", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint3", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint4", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint5", hal.HAL_FLOAT, hal.HAL_IN)
c.ready()
# parameters that define the geometry see scarakins.c for definitions these
# numbers match the defaults there, and will need to be changed or specified on
# the commandline if you are not using the defaults.
d1 = 490.0
d2 = 340.0
d3 = 50.0
d4 = 250.0
d5 = 50.0
d6 = 50.0
j3min = 40.0
j3max = 270.0
for setting in sys.argv[1:]: exec setting
# calculate a bunch of other dimensions that are used
# to scale the model of the machine
# most of these scale factors are arbitrary, to give
# a nicely proportioned machine. If you know specifics
# for the machine you are modeling, feel free to change
# these numbers
tool_len = math.sqrt(d5*d5+d6*d6) # don't change
tool_dia = tool_len / 6.0
# diameters of the arms
l1_dia = d2 / 5.0
l2_dia = d4 / 5.0
l3_dia = l2_dia * 0.8
# diameters of the "lumps" at the joints
j0_dia = l1_dia * 1.5
j1_dia = max(l1_dia * 1.25, l2_dia * 1.5)
j2_dia = l2_dia * 1.25
# other dims
j0_hi = l1_dia * 1.2
j1_hi1 = l1_dia * 1.1
j1_hi2 = l2_dia * 1.2
j2_hi = l2_dia * 1.3
# don't change these
tool_angle = math.degrees(math.atan2(d6,d5))
tool_radius = tool_dia / 2.0
l1_rad = l1_dia / 2.0
l2_rad = l2_dia / 2.0
l3_len = j3max + j2_hi * 0.7
l3_rad = l3_dia / 2.0
j0_hi = j0_hi / 2.0
j0_rad = j0_dia / 2.0
j1_hi1 = j1_hi1 / 2.0
j1_hi2 = j1_hi2 / 2.0
j1_rad = j1_dia / 2.0
j2_hi = j2_hi / 2.0
j2_rad = j2_dia / 2.0
size = max(d1+d3+l3_len,d2+d4+d6)
# tool - cylinder with a point, and a ball to hide the blunt back end
# the origin starts out at the tool tip, and we want to capture this
# "tooltip" coordinate system
tooltip = Capture()
tool = Collection([
tooltip,
Sphere(0.0, 0.0, tool_len, tool_dia),
CylinderZ(tool_len, tool_radius, tool_dia, tool_radius),
CylinderZ(tool_dia, tool_radius, 0.0, 0.0)])
# translate so origin is at base of tool, not the tip
tool = Translate([tool],0.0,0.0,-tool_len)
# the tool might not be pointing straight down
tool = Rotate([tool],tool_angle,0.0,-1.0,0.0)
# make joint 3 rotate
tool = HalRotate([tool],c,"joint3",1,0,0,1)
link3 = CylinderZ(0.0, l3_rad, l3_len, l3_rad)
# attach tool to end
link3 = Collection([tool,link3])
# make joint 2 go up and down
link3 = HalTranslate([link3],c,"joint2",0,0,-1)
# outer arm
# start with link3 and the cylinder it slides in
link2 = Collection([
link3,
CylinderZ(-j2_hi, j2_rad, j2_hi, j2_rad)])
# move to end of arm
link2 = Translate([link2], d4, 0.0, 0.0)
# add the arm itself
link2 = Collection([
link2,
CylinderX(d4, l2_rad, 1.5*j1_rad, l2_rad)])
# the joint gets interesting, because link2 can be above or below link1
if d3 > 0:
flip = 1
else:
flip = -1
# add the joint
link2 = Collection([
link2,
Box(1.5*j1_rad, -0.9*j1_rad, -j1_hi2, 1.15*j1_rad, 0.9*j1_rad, j1_hi2),
Box(1.15*j1_rad, -0.9*j1_rad, -0.4*d3, 0.0, 0.9*j1_rad, flip*j1_hi2),
CylinderZ(-0.4*d3, j1_rad, flip*1.2*j1_hi2, j1_rad)])
# make the joint work
link2 = HalRotate([link2],c,"joint1",1,0,0,1)
# inner arm
# the outer arm and the joint
link1 = Collection([
Translate([link2],0.0,0.0,d3),
Box(-1.5*j1_rad, -0.9*j1_rad, -j1_hi1, -1.15*j1_rad, 0.9*j1_rad, j1_hi1),
Box(-1.15*j1_rad, -0.9*j1_rad, 0.4*d3, 0.0, 0.9*j1_rad, -flip*j1_hi1),
CylinderZ(0.4*d3, j1_rad, flip*-1.2*j1_hi1, j1_rad),
CylinderZ(0.6*d3, 0.8*j1_rad, 0.4*d3, 0.8*j1_rad)])
# move to end of arm
link1 = Translate([link1], d2, 0.0, 0.0)
# add the arm itself, and the inner joint
link1 = Collection([
link1,
CylinderX(d2-1.5*j1_rad, l1_rad, 1.5*j0_rad, l1_rad),
Box(1.5*j0_rad, -0.9*j0_rad, -j0_hi, 0.0, 0.9*j0_rad, j0_hi),
CylinderZ(-1.2*j0_hi, j0_rad, 1.2*j0_hi, j0_rad)])
# make the joint work
link1 = HalRotate([link1],c,"joint0",1,0,0,1)
#stationary base
link0 = Collection([
CylinderZ(d1-j0_hi, 0.8*j0_rad, d1-1.5*j0_hi, 0.8*j0_rad),
CylinderZ(d1-1.5*j0_hi, 0.8*j0_rad, 0.07*d1, 1.3*j0_rad),
CylinderZ(0.07*d1, 2.0*j0_rad, 0.0, 2.0*j0_rad)])
# slap the arm on top
link0 = Collection([
link0,
Translate([link1],0,0,d1)])
# add a floor
floor = Box(-0.5*size,-0.5*size,-0.02*size,0.5*size,0.5*size,0.0)
# and a table for the workpiece - define in workpiece coords
reach = d2+d4-d6
table_height = d1+d3-j3max-d5
work = Capture()
table = Collection([
work,
Box(-0.35*reach,-0.5*reach, -0.1*d1, 0.35*reach, 0.5*reach, 0.0)])
# make the table moveable (tilting)
table = HalRotate([table],c,"joint4",1,0,1,0)
table = HalRotate([table],c,"joint5",1,1,0,0)
# put the table into its proper place
table = Translate([table],0.5*reach,0.0,table_height)
model = Collection([link0, floor, table])
main(model, tooltip, work, size)
| lgpl-2.1 |
hitszxp/scikit-learn | sklearn/neighbors/kde.py | 32 | 7925 | """
Kernel Density Estimation
-------------------------
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
import numpy as np
from scipy.special import gammainc
from ..base import BaseEstimator
from ..utils import check_array, check_random_state
from ..utils.extmath import row_norms
from .ball_tree import BallTree, DTYPE
from .kd_tree import KDTree
VALID_KERNELS = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear',
'cosine']
TREE_DICT = {'ball_tree': BallTree, 'kd_tree': KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: bandwidth estimation
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation
Parameters
----------
bandwidth : float
The bandwidth of the kernel.
algorithm : string
The tree algorithm to use. Valid options are
['kd_tree'|'ball_tree'|'auto']. Default is 'auto'.
kernel : string
The kernel to use. Valid kernels are
['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
Default is 'gaussian'.
metric : string
The distance metric to use. Note that not all metrics are
valid with all algorithms. Refer to the documentation of
:class:`BallTree` and :class:`KDTree` for a description of
available algorithms. Note that the normalization of the density
output is correct only for the Euclidean distance metric. Default
is 'euclidean'.
atol : float
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 0.
rtol : float
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 1E-8.
breadth_first : boolean
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details. Default is 40.
metric_params : dict
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
"""
def __init__(self, bandwidth=1.0, algorithm='auto',
kernel='gaussian', metric="euclidean", atol=0, rtol=0,
breadth_first=True, leaf_size=40, metric_params=None):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
# run the choose algorithm code so that exceptions will happen here
# we're using clone() in the GenerativeBayes classifier,
# so we can't do this kind of logic in __init__
self._choose_algorithm(self.algorithm, self.metric)
if bandwidth <= 0:
raise ValueError("bandwidth must be positive")
if kernel not in VALID_KERNELS:
raise ValueError("invalid kernel: '{0}'".format(kernel))
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == 'auto':
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return 'kd_tree'
elif metric in BallTree.valid_metrics:
return 'ball_tree'
else:
raise ValueError("invalid metric: '{0}'".format(metric))
elif algorithm in TREE_DICT:
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError("invalid metric for {0}: "
"'{1}'".format(TREE_DICT[algorithm],
metric))
return algorithm
else:
raise ValueError("invalid algorithm: '{0}'".format(algorithm))
def fit(self, X, y=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
X = check_array(X, order='C', dtype=DTYPE)
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](X, metric=self.metric,
leaf_size=self.leaf_size,
**kwargs)
return self
def score_samples(self, X):
"""Evaluate the density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray, shape (n_samples,)
The array of log(density) evaluations.
"""
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = check_array(X, order='C', dtype=DTYPE)
N = self.tree_.data.shape[0]
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X, h=self.bandwidth, kernel=self.kernel, atol=atol_N,
rtol=self.rtol, breadth_first=self.breadth_first, return_log=True)
log_density -= np.log(N)
return log_density
def score(self, X, y=None):
"""Compute the total log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Total log-likelihood of the data in X.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
random_state : RandomState or an int seed (0 by default)
A random number generator instance.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples.
"""
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ['gaussian', 'tophat']:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
i = rng.randint(data.shape[0], size=n_samples)
if self.kernel == 'gaussian':
return np.atleast_2d(rng.normal(data[i], self.bandwidth))
elif self.kernel == 'tophat':
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (gammainc(0.5 * dim, 0.5 * s_sq) ** (1. / dim)
* self.bandwidth / np.sqrt(s_sq))
return data[i] + X * correction[:, np.newaxis]
| bsd-3-clause |
tjhei/burnman_old2 | burnman/minerals/Matas_etal_2007.py | 2 | 3277 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
Matas_etal_2007
^^^^^^^^^^^^^^^
Minerals from Matas et al. 2007 and references therein
"""
from burnman.minerals_base import *
class mg_perovskite(material): # Matas et al 2007 Tables 1&2
"""
Matas et al. 2007 and references therein
"""
def __init__(self):
self.params = {
'equation_of_state':'mgd2',
'V_0': 24.43e-6,
'K_0': 250.0e9,
'Kprime_0': 4.0,
'G_0': 175.0e9,
'Gprime_0': 1.8,
'molar_mass': .1020,
'n': 5,
'Debye_0': 1070.,
'grueneisen_0': 1.48,
'q_0': 1.4}
class fe_perovskite(material): # Matas et al 2007 Tables 1&2
"""
Matas et al. 2007 and references therein
"""
def __init__(self):
self.params = {
'equation_of_state':'mgd2',
'V_0': 25.34e-6,
'K_0': 250.0e9,
'Kprime_0': 4.0,
'G_0': 135.0e9,
'Gprime_0': 1.3,
'molar_mass': .1319,
'n': 5,
'Debye_0': 841.,
'grueneisen_0': 1.48,
'q_0': 1.4}
class al_perovskite(material): # Matas et al 2007 Tables 1&2
"""
Matas et al. 2007 and references therein
"""
def __init__(self):
self.params = {
'equation_of_state':'mgd2',
'V_0': 24.58e-6,
'K_0': 249.0e9,
'Kprime_0': 4.0,
'G_0': 165.0e9,
'Gprime_0': 1.8,
'molar_mass': .1005,
'n': 5,
'Debye_0': 1021.,
'grueneisen_0': 1.48,
'q_0': 1.4}
class ca_perovskite(material): # Matas et al 2007 Tables 1&2
"""
Matas et al. 2007 and references therein
"""
def __init__(self):
self.params = {
'equation_of_state':'mgd2',
'V_0': 27.45e-6,
'K_0': 236.0e9,
'Kprime_0': 3.9,
'G_0': 165.0e9,
'Gprime_0': 2.46,
'molar_mass': .11616,
'n': 5,
'Debye_0': 984.,
'grueneisen_0': 1.53,
'q_0': 1.6}
class periclase (material): # Matas et al 2007 Tables 1&2
"""
Matas et al. 2007 and references therein
"""
def __init__(self):
self.params = {
'equation_of_state':'mgd2',
'V_0': 11.25e-6,
'K_0': 160.1e9,
'Kprime_0': 3.83,
'G_0': 130.0e9,
'Gprime_0': 2.2,
'molar_mass': .0403,
'n': 2,
'Debye_0': 673.,
'grueneisen_0': 1.41,
'q_0': 1.3 }
class wuestite (material): # Matas et al 2007 Tables 1&2
"""
Matas et al. 2007 and references therein
"""
def __init__(self):
self.params = {
'equation_of_state':'mgd2',
'V_0': 12.26e-6,
'K_0': 160.1e9,
'Kprime_0': 3.83,
'G_0': 46.0e9,
'Gprime_0': 0.6,
'molar_mass': .0718,
'n': 2,
'Debye_0': 673.,
'grueneisen_0': 1.41,
'q_0': 1.3 }
| gpl-2.0 |
jrmsdev/bottled-md | setup.py | 1 | 1534 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import sys
from os import path
from setuptools import setup
def cat(fpath):
with open(fpath, 'r') as fh:
blob = fh.read()
fh.close()
return blob
desc = 'generate static html5 files from markdown sources (or serve dinamically)'
install_requires = [
'bottle>=0.12.9',
'Markdown<3.0',
]
setup(
name = 'bottled-md',
version = cat('VERSION').strip(),
description = desc,
long_description = desc,
license = 'BSD',
url = 'https://github.com/jrmsdev/bottled-md',
author = 'Jeremías Casteglione',
author_email = 'git@jrms.com.ar',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
install_requires = install_requires,
py_modules = ['bmd', 'mdx', 'utils'],
data_files = [
('', ['VERSION', 'LICENSE', 'README.md', 'bmd.sh']),
('templates', [
'templates/htdoc_head.html',
'templates/htdoc_tail.html',
]),
('static', ['static/bmd.css']),
],
zip_safe = False,
entry_points = {
'console_scripts': [
'bmd=bmd:cmd',
],
},
test_suite = 'test.suite',
)
| bsd-3-clause |
Diptanshu8/zulip | tools/test_user_agent_parsing.py | 29 | 1026 | #!/usr/bin/env python
from __future__ import print_function
import re
from collections import defaultdict
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from zerver.lib.user_agent import parse_user_agent
user_agents_parsed = defaultdict(int) # type: Dict[str, int]
user_agents_path = os.path.join(os.path.dirname(__file__), "user_agents_unique")
parse_errors = 0
for line in open(user_agents_path).readlines():
line = line.strip()
match = re.match('^(?P<count>[0-9]+) "(?P<user_agent>.*)"$', line)
if match is None:
print(line)
continue
groupdict = match.groupdict()
count = groupdict["count"]
user_agent = groupdict["user_agent"]
ret = parse_user_agent(user_agent)
if ret is None:
print("parse error", line)
parse_errors += 1
continue
user_agents_parsed[ret["name"]] += int(count)
for key in user_agents_parsed:
print(" ", key, user_agents_parsed[key])
print("%s parse errors!" % (parse_errors,))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.